Skip to content

Commit

Permalink
Select more proper model weight file according to commands run just b…
Browse files Browse the repository at this point in the history
…efore (#2696)

* consider more complex case when prepare eval and optimize

* update readme

* align with pre-commit

* add comment
  • Loading branch information
eunwoosh authored Dec 6, 2023
1 parent 5333049 commit 73a7442
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 14 deletions.
8 changes: 4 additions & 4 deletions tools/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ The primary goal is to reduce the manual effort required in running experiments
### Automated Experiment Execution

- Given multiple variables, it automatically generates all combinations and runs the experiments.
- Proper model files are selected automatically when the "otx eval" command is executed, based on the preceding command.
- Proper model files are selected automatically when the "otx eval" or "otx optimize" command is executed, based on the preceding command.

### Fault Tolerance

Expand Down Expand Up @@ -59,7 +59,7 @@ Sample Experiment Recipe YAML File:

Arguments for recipe

- output*path (optional) : Output path where all experiment outputs are saved. Default is "./experiment*{executed_time}"
- output_path (optional) : Output path where all experiment outputs are saved. Default is "./experiment\_{executed_time}"
- constant (optional) :
It's similar as constant or variable in programming languages.
You can use it to replace duplicated string by using ${constant_name} in variables or commands.
Expand All @@ -80,9 +80,9 @@ If there are failed cases, variables and error logs are both printed and saved a

Note that all commands within each case are executed within the same workspace,
obviating the need to set a template path from the second command.
When the "otx eval" command is executed, the model file (model weight or exported model, etc.)
When the "otx eval" or "otx optimize" command is executed, the model file (model weight or exported model, etc.)
is automatically selected based on the preceding command.
The output file of "otx eval" is then stored at "workspace*path/outputs/XXXX*{train, export, optimize, etc.}/"
The output file of "otx eval" is then stored at "workspace_path/outputs/XXXX\_{train, export, optimize, etc.}/"
under the name "performance.json".

### Feature 2 : organize experiment result from single workspace
Expand Down
36 changes: 26 additions & 10 deletions tools/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,10 @@ class OtxCommandRunner:
repeat_idx (int): repeat index.
"""

OUTPUT_FILE_NAME = {"export": "openvino.bin", "optimize": "weights.pth"}
OUTPUT_FILE_NAME: Dict[str, List[str]] = {
"export": ["openvino.bin"],
"optimize": ["weights.pth", "openvino.bin"]
}

def __init__(self, command_ins: Command, repeat_idx: int):
self._command_ins = command_ins
Expand All @@ -676,7 +679,7 @@ def __init__(self, command_ins: Command, repeat_idx: int):
self._workspace = Path("_".join(self._command_var.values()).replace("/", "_") + f"_repeat_{repeat_idx}")
self._command_var["repeat"] = str(repeat_idx)
self._fail_logs: List[CommandFailInfo] = []
self._previous_cmd_entry: Optional[str] = None
self._previous_cmd_entry: Optional[List[str]] = []

@property
def fail_logs(self) -> List[CommandFailInfo]:
Expand All @@ -696,26 +699,37 @@ def run_command_list(self, dryrun: bool = False):
else:
print(" ".join(command))

self._previous_cmd_entry = command[1]
self._previous_cmd_entry.append(command[1])

if not dryrun:
organize_exp_result(self._workspace, self._command_var)

def _prepare_run_command(self, command: List[str]) -> bool:
self.set_arguments_to_cmd(command, "--workspace", str(self._workspace))
cmd_entry = command[1]
previous_cmd = None
for previous_cmd in reversed(self._previous_cmd_entry):
if previous_cmd != "eval":
break

if cmd_entry == "train":
self.set_arguments_to_cmd(command, "--seed", str(self._repeat_idx))
elif cmd_entry == "eval":
if self._previous_cmd_entry in self.OUTPUT_FILE_NAME:
file_path = self._find_model_path(self._previous_cmd_entry)
if previous_cmd in ["export", "optimize"]:
file_path = self._find_model_path(previous_cmd)
if file_path is None:
return False
self.set_arguments_to_cmd(command, "--load-weights", str(file_path))
output_path = str(file_path.parents[1])
else:
output_path = str(self._workspace / "outputs" / "latest_trained_model")
self.set_arguments_to_cmd(command, "--output", output_path)
elif cmd_entry == "optimize":
if previous_cmd == "export": # execute PTQ. If not, execute QAT
file_path = self._find_model_path(previous_cmd)
if file_path is None:
return False
self.set_arguments_to_cmd(command, "--load-weights", str(file_path))

return True

Expand All @@ -731,11 +745,13 @@ def _find_model_path(self, cmd_entry: str):
if output_dir is None:
print(f"There is no {cmd_entry} output directory.")
return None
file_path = list(output_dir.rglob(self.OUTPUT_FILE_NAME[cmd_entry]))
if not file_path:
print(f"{self.OUTPUT_FILE_NAME[cmd_entry]} can't be found.")
return None
return file_path[0]
for file_name in self.OUTPUT_FILE_NAME[cmd_entry]:
file_path = list(output_dir.rglob(file_name))
if file_path:
return file_path[0]

print(f"{', '.join(self.OUTPUT_FILE_NAME[cmd_entry])} can't be found.")
return None

@staticmethod
def set_arguments_to_cmd(command: List[str], key: str, value: Optional[str] = None, before_params: bool = True):
Expand Down

0 comments on commit 73a7442

Please sign in to comment.