diff --git a/docs/contribute/release_process.rst b/docs/contribute/release_process.rst index e2bf6455b5af..685b56cf1310 100644 --- a/docs/contribute/release_process.rst +++ b/docs/contribute/release_process.rst @@ -51,7 +51,13 @@ Prepare the Release Notes Release note contains new features, improvement, bug fixes, known issues and deprecation, etc. TVM provides `monthly dev report `_ collects developing progress each month. It could be helpful to who writes the release notes. -It is recommended to open a Github issue to collect feedbacks for the release note draft before cutting the release branch. +It is recommended to open a Github issue to collect feedbacks for the release note draft before cutting the release branch. See the scripts in ``tests/scripts/release`` for some starting points. + + +Prepare the Release Candidate +----------------------------- + +There may be some code changes necessary to the release branch before the release. Ensure all version numbers are up to date Prepare the GPG Key @@ -72,7 +78,7 @@ The last step is to update the KEYS file with your code signing key https://www. cd svn-tvm # edit KEYS file svn ci --username $ASF_USERNAME --password "$ASF_PASSWORD" -m "Update KEYS" - # update downloads.apache.org + # update downloads.apache.org (note that only PMC members can update the dist/release directory) svn rm --username $ASF_USERNAME --password "$ASF_PASSWORD" https://dist.apache.org/repos/dist/release/tvm/KEYS -m "Update KEYS" svn cp --username $ASF_USERNAME --password "$ASF_PASSWORD" https://dist.apache.org/repos/dist/dev/tvm/KEYS https://dist.apache.org/repos/dist/release/tvm/ -m "Update KEYS" @@ -86,6 +92,7 @@ To cut a release candidate, one needs to first cut a branch using selected versi git clone https://github.com/apache/tvm.git cd tvm/ + # Replace v0.6.0 with the relevant version git branch v0.6.0 git push --set-upstream origin v0.6.0 @@ -111,8 +118,9 @@ Create source code artifacts, .. code-block:: bash - git clone git@github.com:apache/tvm.git apache-tvm-src-v0.6.0.rc0 - cd apache-tvm-src-v0.6.0.rc0 + # Replace v0.6.0 with the relevant version + git clone git@github.com:apache/tvm.git apache-tvm-src-v0.6.0 + cd apache-tvm-src-v0.6.0 git checkout v0.6 git submodule update --init --recursive git checkout v0.6.0.rc0 @@ -120,7 +128,7 @@ Create source code artifacts, find . -name ".git*" -print0 | xargs -0 rm -rf cd .. brew install gnu-tar - gtar -czvf apache-tvm-src-v0.6.0.rc0.tar.gz apache-tvm-src-v0.6.0.rc0 + gtar -czvf apache-tvm-src-v0.6.0.rc0.tar.gz apache-tvm-src-v0.6.0 Use your GPG key to sign the created artifact. First make sure your GPG is set to use the correct private key, @@ -191,13 +199,35 @@ Remember to create a new release TAG (v0.6.0 in this case) on Github and remove .. code-block:: bash - git push --delete origin v0.6.0.rc2 + git push --delete origin v0.6.0.rc2 Update the TVM Website ---------------------- -The website repository is located at `https://github.com/apache/tvm-site `_. Modify the download page to include the release artifacts as well as the GPG signature and SHA hash. +The website repository is located at `https://github.com/apache/tvm-site `_. Modify the download page to include the release artifacts as well as the GPG signature and SHA hash. Since TVM's docs are continually updated, upload a fixed version of the release docs. If CI has deleted the docs from the release by the time you go to update the website, you can restart the CI build for the release branch on Jenkins. See the example code below for a starting point + +.. code-block:: bash + + git clone https://github.com/apache/tvm-site.git + pushd tvm-site + git checkout asf-site + pushd docs + + # make release docs directory + mkdir v0.9.0 + pushd v0.9.0 + + # download the release docs from CI + # find this URL by inspecting the CI logs for the most recent build of the release branch + curl -LO https://tvm-jenkins-artifacts-prod.s3.us-west-2.amazonaws.com/tvm/v0.9.0/1/docs/docs.tgz + tar xf docs.tgz + rm docs.tgz + + # add the docs and push + git add . + git commit -m"Add v0.9.0 docs" + git push Post the Announcement diff --git a/tests/scripts/release/.gitignore b/tests/scripts/release/.gitignore new file mode 100644 index 000000000000..3f183296deea --- /dev/null +++ b/tests/scripts/release/.gitignore @@ -0,0 +1,5 @@ +*.md +!README.md +*.csv +*.pkl + diff --git a/tests/scripts/release/README.md b/tests/scripts/release/README.md new file mode 100644 index 000000000000..82e2e5040ba6 --- /dev/null +++ b/tests/scripts/release/README.md @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + +These scripts can be helpful when creating release notes. + +```bash +# example: create a csv file of all PRs since the v0.8 and v0.9.0 releases +# the result will be in 2 CSV files based on the --threshold arg (small PRs vs large PRs) +export GITHUB_TOKEN= +python release/gather_prs.py --from-commit $(git rev-parse v0.9.0) --to-commit $(git merge-base origin/main v0.8.0) +``` + +You can then import this CSV into a collaborative spreadsheet editor to distribute the work of categorizing PRs for the notes. Once done, you can download the resulting CSV and convert it to readable release notes. + +```bash +# example: use a csv of cateogrized PRs to create a markdown file +python make_notes.py --notes-csv categorized_prs.csv > out.md +``` + +You can also create a list of RFCs + +```bash +git clone https://github.com/apache/tvm-rfcs.git + +# example: list RFCs since a specific commit in the tvm-rfcs repo +python list_rfcs.py --since-commit --rfcs-repo ./tvm-rfcs > rfc.md +``` + +Finally, combine `rfc.md` and `out.md` along with some prose to create the final release notes. \ No newline at end of file diff --git a/tests/scripts/release/gather_prs.py b/tests/scripts/release/gather_prs.py new file mode 100644 index 000000000000..0720a87d042b --- /dev/null +++ b/tests/scripts/release/gather_prs.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import argparse +import os +import pickle +from pathlib import Path +import csv +import sys +from typing import Callable, Dict, List, Any + +REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent +sys.path.append(str(REPO_ROOT / "tests" / "scripts")) + +from git_utils import git, GitHubRepo +from github_tag_teams import tags_from_title + +GITHUB_TOKEN = os.environ["GITHUB_TOKEN"] + + +PRS_QUERY = """ +query ($owner: String!, $name: String!, $after: String, $pageSize: Int!) { + repository(owner: $owner, name: $name) { + defaultBranchRef { + name + target { + ... on Commit { + oid + history(after: $after, first: $pageSize) { + pageInfo { + hasNextPage + endCursor + } + nodes { + oid + committedDate + associatedPullRequests(first: 1) { + nodes { + number + additions + changedFiles + deletions + author { + login + } + title + body + } + } + } + } + } + } + } + } +} +""" + + +def append_and_save(items, file): + if not file.exists(): + data = [] + else: + with open(file, "rb") as f: + data = pickle.load(f) + + data += items + with open(file, "wb") as f: + pickle.dump(data, f) + + +def fetch_pr_data(args, cache): + github = GitHubRepo(user=user, repo=repo, token=GITHUB_TOKEN) + + if args.from_commit is None or args.to_commit is None: + print("--from-commit and --to-commit must be specified if --skip-query is not used") + exit(1) + + i = 0 + page_size = 80 + cursor = f"{args.from_commit} {i}" + + while True: + r = github.graphql( + query=PRS_QUERY, + variables={ + "owner": user, + "name": repo, + "after": cursor, + "pageSize": page_size, + }, + ) + data = r["data"]["repository"]["defaultBranchRef"]["target"]["history"] + if not data["pageInfo"]["hasNextPage"]: + break + cursor = data["pageInfo"]["endCursor"] + results = data["nodes"] + + to_add = [] + stop = False + for r in results: + if r["oid"] == args.to_commit: + print(f"Found {r['oid']}, stopping") + stop = True + break + else: + to_add.append(r) + + oids = [r["oid"] for r in to_add] + print(oids) + append_and_save(to_add, cache) + if stop: + break + print(i) + i += page_size + + +def write_csv( + filename: str, data: List[Dict[str, Any]], filter: Callable[[Dict[str, Any]], bool] +) -> None: + with open(filename, "w", newline="") as csvfile: + writer = csv.writer(csvfile, quotechar='"') + writer.writerow( + ( + "category", + "description", + "date", + "number", + "author", + "tags", + "title", + "additions", + "deletions", + "changed files", + ) + ) + for item in data: + pr = item["associatedPullRequests"]["nodes"][0] + if not filter(pr): + continue + tags = tags_from_title(pr["title"]) + actual_tags = [] + for t in tags: + items = [x.strip() for x in t.split(",")] + actual_tags += items + tags = actual_tags + tags = [t.lower() for t in tags] + category = "" + if len(tags) == 1: + category = tags[0] + writer.writerow( + ( + category, + "", + item["committedDate"], + f'https://github.com/apache/tvm/pull/{pr["number"]}', + pr["author"]["login"], + ", ".join(tags), + pr["title"], + pr["additions"], + pr["deletions"], + pr["changedFiles"], + ) + ) + + +if __name__ == "__main__": + help = "List out commits with attached PRs since a certain commit" + parser = argparse.ArgumentParser(description=help) + parser.add_argument("--from-commit", help="commit to start checking PRs from") + parser.add_argument("--to-commit", help="commit to stop checking PRs from") + parser.add_argument( + "--threshold", default=150, help="sum of additions + deletions to consider large" + ) + parser.add_argument( + "--skip-query", action="store_true", help="don't query GitHub and instead use cache file" + ) + args = parser.parse_args() + user = "apache" + repo = "tvm" + threshold = int(args.threshold) + + cache = Path("out.pkl") + if not args.skip_query: + fetch_pr_data(args, cache) + + with open(cache, "rb") as f: + data = pickle.load(f) + + print(f"Found {len(data)} PRs") + + write_csv( + filename="out-large.csv", + data=data, + filter=lambda pr: pr["additions"] + pr["deletions"] > threshold, + ) + write_csv( + filename="out-small.csv", + data=data, + filter=lambda pr: pr["additions"] + pr["deletions"] <= threshold, + ) diff --git a/tests/scripts/release/list_rfcs.py b/tests/scripts/release/list_rfcs.py new file mode 100644 index 000000000000..5f62fb0ef7a7 --- /dev/null +++ b/tests/scripts/release/list_rfcs.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import argparse +import subprocess +import sys + +LINK_BASE = "https://github.com/apache/tvm-rfcs/blob/main/" +COMMIT_BASE = "https://github.com/apache/tvm-rfcs/commit/" + + +def sprint(*args): + print(*args, file=sys.stderr) + + +if __name__ == "__main__": + help = "List out RFCs since a commit" + parser = argparse.ArgumentParser(description=help) + parser.add_argument("--since-commit", required=True, help="last commit to include") + parser.add_argument("--rfcs-repo", required=True, help="path to checkout of apache/tvm-rfcs") + args = parser.parse_args() + user = "apache" + repo = "tvm" + rfc_repo = args.rfcs_repo + subprocess.run("git fetch origin main", cwd=rfc_repo, shell=True) + subprocess.run("git checkout main", cwd=rfc_repo, shell=True) + subprocess.run("git reset --hard origin/main", cwd=rfc_repo, shell=True) + r = subprocess.run( + f"git log {args.since_commit}..HEAD --format='%H %s'", + cwd=rfc_repo, + shell=True, + stdout=subprocess.PIPE, + encoding="utf-8", + ) + commits = r.stdout.strip().split("\n") + for commit in commits: + parts = commit.split() + commit = parts[0] + subject = " ".join(parts[1:]) + + r2 = subprocess.run( + f"git diff-tree --no-commit-id --name-only -r {commit}", + cwd=rfc_repo, + shell=True, + stdout=subprocess.PIPE, + encoding="utf-8", + ) + files = r2.stdout.strip().split("\n") + rfc_file = None + for file in files: + if file.startswith("rfcs/") and file.endswith(".md"): + if rfc_file is not None: + sprint(f"error on {commit} {subject}") + rfc_file = file + + if rfc_file is None: + sprint(f"error on {commit} {subject}") + continue + + print(f" * [{subject}]({LINK_BASE + rfc_file}) ([`{commit[:7]}`]({COMMIT_BASE + commit}))") diff --git a/tests/scripts/release/make_notes.py b/tests/scripts/release/make_notes.py new file mode 100644 index 000000000000..95cb15197275 --- /dev/null +++ b/tests/scripts/release/make_notes.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import argparse +import os +import pickle +from pathlib import Path +import csv +import sys +from collections import defaultdict +from typing import Callable, Dict, List, Any + +REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent +sys.path.append(str(REPO_ROOT / "tests" / "scripts")) + + +def strip_header(title: str, header: str) -> str: + pos = title.lower().find(header.lower()) + if pos == -1: + return title + + return title[0:pos] + title[pos + len(header) :].strip() + + +def sprint(*args): + print(*args, file=sys.stderr) + + +if __name__ == "__main__": + help = "List out commits with attached PRs since a certain commit" + parser = argparse.ArgumentParser(description=help) + parser.add_argument("--notes-csv", required=True, help="csv file of categorized PRs in order") + args = parser.parse_args() + user = "apache" + repo = "tvm" + + cache = Path("out.pkl") + if not cache.exists(): + sprint("run gather_prs.py first to generate out.pkl") + exit(1) + + with open(cache, "rb") as f: + data = pickle.load(f) + + sprint(data[1]) + reverse = {} + for item in data: + prs = item["associatedPullRequests"]["nodes"] + if len(prs) != 1: + continue + + pr = prs[0] + reverse[pr["number"]] = pr + + def pr_title(number, heading): + title = reverse[int(number)]["title"] + title = strip_header(title, heading) + return title + + headings = defaultdict(lambda: defaultdict(list)) + output = "" + + sprint("Opening CSV") + with open(args.notes_csv) as f: + # Skip header stuff + f.readline() + f.readline() + f.readline() + + input_file = csv.DictReader(f) + + i = 0 + for row in input_file: + category = row["category"].strip() + subject = row["subject"].strip() + pr_number = row["url"].split("/")[-1] + if category == "" or subject == "": + sprint(f"Skipping {pr_number}") + continue + + headings[category][subject].append(pr_number) + i += 1 + # if i > 30: + # break + + def sorter(x): + if x == "Misc": + return 10 + return 0 + + keys = list(headings.keys()) + keys = list(sorted(keys)) + keys = list(sorted(keys, key=sorter)) + for key in keys: + value = headings[key] + if key == "DO NOT INCLUDE": + continue + value = dict(value) + output += f"### {key}\n" + + misc = [] + misc += value.get("n/a", []) + misc += value.get("Misc", []) + for pr_number in misc: + output += f" * #{pr_number} - {pr_title(pr_number, '[' + key + ']')}\n" + + for subheading, pr_numbers in value.items(): + if subheading == "DO NOT INCLUDE": + continue + if subheading == "n/a" or subheading == "Misc": + continue + else: + output += f" * {subheading} - " + ", ".join([f"#{n}" for n in pr_numbers]) + "\n" + # print(value) + + output += "\n" + + print(output)