-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
- Loading branch information
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,6 +4,8 @@ | |
# change the variables in webui-user.sh instead # | ||
################################################# | ||
|
||
export COMMANDLINE_ARGS="--xformers --no-half --precision=full --listen" | ||
This comment has been minimized.
Sorry, something went wrong.
This comment has been minimized.
Sorry, something went wrong.
TikiTDO
Author
Owner
|
||
|
||
# If run from macOS, load defaults from webui-macos-env.sh | ||
if [[ "$OSTYPE" == "darwin"* ]]; then | ||
if [[ -f webui-macos-env.sh ]] | ||
|
@@ -113,13 +115,13 @@ case "$gpu_info" in | |
printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half" | ||
printf "\n%s\n" "${delimiter}" | ||
;; | ||
*) | ||
*) | ||
;; | ||
esac | ||
if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] | ||
then | ||
export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2" | ||
fi | ||
fi | ||
|
||
for preq in "${GIT}" "${python_cmd}" | ||
do | ||
|
@@ -181,6 +183,6 @@ then | |
else | ||
printf "\n%s\n" "${delimiter}" | ||
printf "Launching launch.py..." | ||
printf "\n%s\n" "${delimiter}" | ||
printf "\n%s\n" "${delimiter}" | ||
exec "${python_cmd}" "${LAUNCH_SCRIPT}" "$@" | ||
fi |
@TikiTDO Is there a reason you need to use full precision rather than half-width FP16 when inferring in parallel? I don't think there's any torch limitation.
Or are you simply taking advantage of the increased VRAM capacity to load in full precision for slightly higher quality model?