Skip to content

Commit

Permalink
Fix technique (#18)
Browse files Browse the repository at this point in the history
* fix case issue #14

* fix issue causing failure due to url length limit #17
  • Loading branch information
fqrious authored Jan 8, 2025
1 parent 08acf9a commit 8f0f711
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 6 deletions.
1 change: 1 addition & 0 deletions src/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def process_tags_and_labels(cls, data: dict):
if value := data.get(key):
references.append(dict(source_name='sigma-rule', external_id=key, description=value))
for tag in data.get('tags', []):
tag = tag.lower()
if match := re.match(r'detection\.(.*)', tag):
references.append(dict(source_name='sigma-rule', external_id='detection', description=match.group(1)))
elif match := re.match(r'(cve\..*)', tag):
Expand Down
21 changes: 15 additions & 6 deletions src/retriever.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,15 @@ def get_attack_objects(self, matrix, attack_id):
return self._retrieve_objects(endpoint)

def get_objects_by_external_ids(self, ids, type, key='objects', query_filter='id'):
objects = self._retrieve_objects(urljoin(self.api_root, f"v1/{type}/objects/?{query_filter}={','.join(ids)}"), key)
objects_map : dict[str, list[dict]] = {}
for obj in objects:
object_id = obj['external_references'][0]['external_id']
arr = objects_map.setdefault(object_id, [])
arr.append(obj)
ids = list(set(ids))

for chunked_ids in chunked(ids, 100):
objects = self._retrieve_objects(urljoin(self.api_root, f"v1/{type}/objects/?{query_filter}={','.join(chunked_ids)}"), key)
for obj in objects:
object_id = obj['external_references'][0]['external_id']
arr = objects_map.setdefault(object_id, [])
arr.append(obj)
return objects_map

def get_attack_tactics(self, matrix):
Expand Down Expand Up @@ -61,4 +64,10 @@ def _retrieve_objects(self, endpoint, key='objects'):
if d['page_results_count'] < d['page_size']:
break
return data



def chunked(iterable, n):
if not iterable:
return []
for i in range(0, len(iterable), n):
yield iterable[i : i + n]

0 comments on commit 8f0f711

Please sign in to comment.