Skip to content

Commit

Permalink
OpenVINO fix resolution change
Browse files Browse the repository at this point in the history
  • Loading branch information
Disty0 committed Aug 25, 2023
1 parent 0512e29 commit 8ef4aa7
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 14 deletions.
24 changes: 12 additions & 12 deletions modules/intel/openvino/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,10 @@

class ModelState:
def __init__(self):
self.recompile = 1
self.device = "GPU"
self.height = 512
self.width = 512
self.batch_size = 1
self.mode = 0
self.partition_id = 0
self.model_hash = ""

model_state = ModelState()
self.first_pass = True

@register_backend
@fake_tensor_unsupported
Expand All @@ -31,8 +25,6 @@ def openvino_fx(subgraph, example_inputs):
if os.getenv("OPENVINO_TORCH_MODEL_CACHING") != "0":
os.environ.setdefault('OPENVINO_TORCH_MODEL_CACHING', "1")
model_hash_str = sha256(subgraph.code.encode('utf-8')).hexdigest()
model_hash_str_file = model_hash_str + str(model_state.partition_id)
model_state.partition_id = model_state.partition_id + 1
executor_parameters = {"model_hash_str": model_hash_str}

example_inputs.reverse()
Expand All @@ -48,9 +40,17 @@ def openvino_fx(subgraph, example_inputs):
else:
os.environ.setdefault('OPENVINO_TORCH_BACKEND_DEVICE', device)

file_name = get_cached_file_name(*example_inputs, model_hash_str=model_hash_str_file, device=device, cache_root=cache_root)

if file_name is not None and os.path.isfile(file_name + ".xml") and os.path.isfile(file_name + ".bin"):
#Cache saving keeps increasing the partition id
#This loop check if non 0 partition id caches exist
#Takes 0.002 seconds when nothing is found
use_cached_file = False
for i in range(100):
file_name = get_cached_file_name(*example_inputs, model_hash_str=str(model_hash_str + str(i)), device=device, cache_root=cache_root)
if file_name is not None and os.path.isfile(file_name + ".xml") and os.path.isfile(file_name + ".bin"):
use_cached_file = True
break

if use_cached_file:
om = core.read_model(file_name + ".xml")

dtype_mapping = {
Expand Down
28 changes: 28 additions & 0 deletions modules/processing_diffusers.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,31 @@ def set_pipeline_args(model, prompts: list, negative_prompts: list, prompts_2: t
shared.log.debug(f'Diffuser pipeline: {pipeline.__class__.__name__} task={sd_models.get_diffusers_task(model)} set={clean}')
return args

def recompile_model(hires=False):
if shared.opts.cuda_compile and shared.opts.cuda_compile_backend != 'none':
if shared.opts.cuda_compile_backend == "openvino_fx":
compile_height = p.height if not hires else p.hr_upscale_to_y
compile_width = p.width if not hires else p.hr_upscale_to_x
if (not hasattr(shared.sd_model, "compiled_model_state") or (not shared.sd_model.compiled_model_state.first_pass
and (shared.sd_model.compiled_model_state.height != compile_height or shared.sd_model.compiled_model_state.width != compile_width
or shared.sd_model.compiled_model_state.batch_size != p.batch_size))):
shared.log.info("OpenVINO: Resolution change detected")
shared.log.info("OpenVINO: Recompiling base model")
sd_models.unload_model_weights(op='model')
sd_models.reload_model_weights(op='model')
if is_refiner_enabled:
shared.log.info("OpenVINO: Recompiling refiner")
sd_models.unload_model_weights(op='refiner')
sd_models.reload_model_weights(op='refiner')
shared.sd_model.compiled_model_state.height = compile_height
shared.sd_model.compiled_model_state.width = compile_width
shared.sd_model.compiled_model_state.batch_size = p.batch_size
shared.sd_model.compiled_model_state.first_pass = False
else:
pass #Can be implemented for normal compile or TensorRT
else:
pass #Do nothing if compile is disabled

is_karras_compatible = shared.sd_model.__class__.__init__.__annotations__.get("scheduler", None) == diffusers.schedulers.scheduling_utils.KarrasDiffusionSchedulers
use_sampler = p.sampler_name if not p.is_hr_pass else p.latent_sampler
if (not hasattr(shared.sd_model.scheduler, 'name')) or (shared.sd_model.scheduler.name != use_sampler) and (use_sampler != 'Default') and is_karras_compatible:
Expand Down Expand Up @@ -229,6 +254,8 @@ def set_pipeline_args(model, prompts: list, negative_prompts: list, prompts_2: t
unload_diffusers_lora()
return results

recompile_model()

if shared.opts.diffusers_move_base and not shared.sd_model.has_accelerate:
shared.sd_model.to(devices.device)

Expand Down Expand Up @@ -262,6 +289,7 @@ def set_pipeline_args(model, prompts: list, negative_prompts: list, prompts_2: t
# optional hires pass
if p.is_hr_pass:
p.init_hr()
recompile_model(hires=True)
if p.width != p.hr_upscale_to_x or p.height != p.hr_upscale_to_y:
if shared.opts.save and not p.do_not_save_samples and shared.opts.save_images_before_highres_fix and hasattr(shared.sd_model, 'vae'):
save_intermediate(latents=output.images, suffix="-before-hires")
Expand Down
5 changes: 3 additions & 2 deletions modules/sd_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -807,9 +807,10 @@ def load_diffuser(checkpoint_info=None, already_loaded_state_dict=None, timer=No
import torch._dynamo # pylint: disable=unused-import,redefined-outer-name
if shared.opts.cuda_compile_backend == "openvino_fx":
torch._dynamo.reset()
from modules.intel.openvino import openvino_fx, openvino_clear_caches, model_state # pylint: disable=unused-import
from modules.intel.openvino import openvino_fx, openvino_clear_caches, ModelState # pylint: disable=unused-import
openvino_clear_caches()
model_state.partition_id = 0
sd_model.compiled_model_state = ModelState()
sd_model.compiled_model_state.first_pass = True if not shared.opts.cuda_compile_precompile else False
log_level = logging.WARNING if shared.opts.cuda_compile_verbose else logging.CRITICAL # pylint: disable=protected-access
if hasattr(torch, '_logging'):
torch._logging.set_logs(dynamo=log_level, aot=log_level, inductor=log_level) # pylint: disable=protected-access
Expand Down

0 comments on commit 8ef4aa7

Please sign in to comment.