-
Notifications
You must be signed in to change notification settings - Fork 97
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Problem running Stylegan2 in collar #54
Comments
Try; %pip install ninja I remember encountering similar issues with the original repository: |
I think same issue like for me #53 - when you use the plank notebook it won't work in colab |
maybe it helps. a work in progress https://gist.github.com/manzke/32425fcaa88e33492c211b9bdd8adf6c :) which I'm using to train in colab right now with conda |
SG uses custom kernels, so it is a bit of a pain to make it work in colab. |
Thanks this seems to have worked! Really appreciate your help
…Sent from my iPhone
On 11 Feb 2022, at 22:09, Wok ***@***.***> wrote:
Try;
!pip install ninja
—
Reply to this email directly, view it on GitHub, or unsubscribe.
Triage notifications on the go with GitHub Mobile for iOS or Android.
You are receiving this because you authored the thread.
|
perfect, then I'll close this issue. |
When trying to run stylegen2 using cfg='stylegan2' this error occurs:
in train(**kwargs)
76
77 # Launch.
---> 78 launch_training(c=c, desc=desc, outdir=opts.outdir)
in launch_training(c, desc, outdir, rank)
43 sync_device = torch.device('cuda', rank) if c.num_gpus > 1 else None
44 training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
---> 45 training_loop.training_loop(rank=rank, **c)
/content/projected_gan/training/training_loop.py in training_loop(run_dir, training_set_kwargs, data_loader_kwargs, G_kwargs, D_kwargs, G_opt_kwargs, D_opt_kwargs, loss_kwargs, metrics, random_seed, num_gpus, rank, batch_size, batch_gpu, ema_kimg, ema_rampup, G_reg_interval, D_reg_interval, total_kimg, kimg_per_tick, image_snapshot_ticks, network_snapshot_ticks, resume_pkl, resume_kimg, cudnn_benchmark, abort_fn, progress_fn, restart_every)
188 z = torch.empty([batch_gpu, G.z_dim], device=device)
189 c = torch.empty([batch_gpu, G.c_dim], device=device)
--> 190 img = misc.print_module_summary(G, [z, c])
191 misc.print_module_summary(D, [img, c])
192
/content/projected_gan/torch_utils/misc.py in print_module_summary(module, inputs, max_nesting, skip_redundant)
214
215 # Run module.
--> 216 outputs = module(*inputs)
217 for hook in hooks:
218 hook.remove()
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1118 input = bw_hook.setup_input_hook(input)
1119
-> 1120 result = forward_call(input, **kwargs)
1121 if _global_forward_hooks or self._forward_hooks:
1122 for hook in (_global_forward_hooks.values(), *self._forward_hooks.values()):
/content/projected_gan/pg_modules/networks_stylegan2.py in forward(self, z, c, truncation_psi, truncation_cutoff, update_emas, **synthesis_kwargs)
533
534 def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
--> 535 ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
536 img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
537 return img
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1118 input = bw_hook.setup_input_hook(input)
1119
-> 1120 result = forward_call(input, **kwargs)
1121 if _global_forward_hooks or self._forward_hooks:
1122 for hook in (_global_forward_hooks.values(), *self._forward_hooks.values()):
/content/projected_gan/pg_modules/networks_stylegan2.py in forward(self, z, c, truncation_psi, truncation_cutoff, update_emas)
236 for idx in range(self.num_layers):
237 layer = getattr(self, f'fc{idx}')
--> 238 x = layer(x)
239
240 # Update moving average of W.
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1118 input = bw_hook.setup_input_hook(input)
1119
-> 1120 result = forward_call(input, **kwargs)
1121 if _global_forward_hooks or self._forward_hooks:
1122 for hook in (_global_forward_hooks.values(), *self._forward_hooks.values()):
/content/projected_gan/pg_modules/networks_stylegan2.py in forward(self, x)
116 else:
117 x = x.matmul(w.t())
--> 118 x = bias_act.bias_act(x, b, act=self.activation)
119 return x
120
/content/projected_gan/torch_utils/ops/bias_act.py in bias_act(x, b, dim, act, alpha, gain, clamp, impl)
82 assert isinstance(x, torch.Tensor)
83 assert impl in ['ref', 'cuda']
---> 84 if impl == 'cuda' and x.device.type == 'cuda' and _init():
85 return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
86 return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
/content/projected_gan/torch_utils/ops/bias_act.py in _init()
44 headers=['bias_act.h'],
45 source_dir=os.path.dirname(file),
---> 46 extra_cuda_cflags=['--use_fast_math'],
47 )
48 return True
/content/projected_gan/torch_utils/custom_ops.py in get_plugin(module_name, sources, headers, source_dir, **build_kwargs)
135 cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources]
136 torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,
--> 137 verbose=verbose_build, sources=cached_sources, **build_kwargs)
138 else:
139 torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
/usr/local/lib/python3.7/dist-packages/torch/utils/cpp_extension.py in load(name, sources, extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths, build_directory, verbose, with_cuda, is_python_module, is_standalone, keep_intermediates)
1134 is_python_module,
1135 is_standalone,
-> 1136 keep_intermediates=keep_intermediates)
1137
1138
/usr/local/lib/python3.7/dist-packages/torch/utils/cpp_extension.py in _jit_compile(name, sources, extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths, build_directory, verbose, with_cuda, is_python_module, is_standalone, keep_intermediates)
1345 verbose=verbose,
1346 with_cuda=with_cuda,
-> 1347 is_standalone=is_standalone)
1348 finally:
1349 baton.release()
/usr/local/lib/python3.7/dist-packages/torch/utils/cpp_extension.py in _write_ninja_file_and_build_library(name, sources, extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths, build_directory, verbose, with_cuda, is_standalone)
1416 with_cuda: Optional[bool],
1417 is_standalone: bool = False) -> None:
-> 1418 verify_ninja_availability()
1419 if IS_WINDOWS:
1420 compiler = os.environ.get('CXX', 'cl')
/usr/local/lib/python3.7/dist-packages/torch/utils/cpp_extension.py in verify_ninja_availability()
1472 '''
1473 if not is_ninja_available():
-> 1474 raise RuntimeError("Ninja is required to load C++ extensions")
1475
1476
RuntimeError: Ninja is required to load C++ extensions
The text was updated successfully, but these errors were encountered: