From 761372f50224285e400a94e4d0dea1e88f512b77 Mon Sep 17 00:00:00 2001 From: Blake <89327689+dienachtderwelt@users.noreply.github.com> Date: Thu, 5 Jan 2023 19:33:16 +0800 Subject: [PATCH 1/3] [Docs] fix docstring (#1578) --- mmedit/datasets/transforms/aug_pixel.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mmedit/datasets/transforms/aug_pixel.py b/mmedit/datasets/transforms/aug_pixel.py index c4e637cb2b..72147d8293 100644 --- a/mmedit/datasets/transforms/aug_pixel.py +++ b/mmedit/datasets/transforms/aug_pixel.py @@ -20,8 +20,8 @@ class BinarizeImage(BaseTransform): Args: keys (Sequence[str]): The images to be binarized. binary_thr (float): Threshold for binarization. - amin (int): Lower limits of pixel value. - amx (int): Upper limits of pixel value. + a_min (int): Lower limits of pixel value. + a_max (int): Upper limits of pixel value. dtype (np.dtype): Set the data type of the output. Default: np.uint8 """ @@ -86,8 +86,8 @@ class Clip(BaseTransform): Args: keys (list[str]): The keys whose values are clipped. - amin (int): Lower limits of pixel value. - amx (int): Upper limits of pixel value. + a_min (int): Lower limits of pixel value. + a_max (int): Upper limits of pixel value. """ def __init__(self, keys, a_min=0, a_max=255): From 80c33b20cbfa3ba59c8127eb477ec6e5d4c5adc1 Mon Sep 17 00:00:00 2001 From: rangoliu Date: Mon, 9 Jan 2023 10:31:22 +0800 Subject: [PATCH 2/3] [Fix] fix ci follow mmengine (#1589) fix ci follow mmengine --- .circleci/test.yml | 2 +- .github/workflows/merge_stage_test.yml | 2 +- setup.py | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.circleci/test.yml b/.circleci/test.yml index 2cd5a7a5f8..721d76b9a0 100644 --- a/.circleci/test.yml +++ b/.circleci/test.yml @@ -144,7 +144,7 @@ workflows: name: minimum_version_cpu torch: 1.6.0 torchvision: 0.7.0 - python: 3.6.9 # The lowest python 3.6.x version available on CircleCI images + python: 3.7.4 requires: - lint - build_cpu: diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml index ff94784262..0e395cdde5 100644 --- a/.github/workflows/merge_stage_test.yml +++ b/.github/workflows/merge_stage_test.yml @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - python-version: [3.6, 3.8, 3.9] + python-version: [3.8, 3.9] torch: [1.8.1] include: - torch: 1.8.1 diff --git a/setup.py b/setup.py index 73297ee901..dc8924b9b5 100644 --- a/setup.py +++ b/setup.py @@ -252,7 +252,6 @@ def add_mim_extention(): 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', From 6de38d01b294db502af456905127b7de95827515 Mon Sep 17 00:00:00 2001 From: rangoliu Date: Mon, 9 Jan 2023 11:29:36 +0800 Subject: [PATCH 3/3] [Enhancememt] add more infomation in sd readme (#1582) add more infomation in sd readme --- .gitignore | 1 + configs/stable_diffusion/README.md | 29 +++++++++++++++++++++++++---- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index eafffbfb23..2a5bba964c 100644 --- a/.gitignore +++ b/.gitignore @@ -124,6 +124,7 @@ mmedit/.mim demo/*.png *.csv out/*.png +resources/ # Pytorch *.pth diff --git a/configs/stable_diffusion/README.md b/configs/stable_diffusion/README.md index 5f11460a2e..73ee3304b6 100644 --- a/configs/stable_diffusion/README.md +++ b/configs/stable_diffusion/README.md @@ -10,13 +10,34 @@ -Stable Diffusion is a latent diffusion model conditioned on the text embeddings of a CLIP text encoder, which allows you to create images from text inputs. +Stable Diffusion is a latent diffusion model conditioned on the text embeddings of a CLIP text encoder, which allows you to create images from text inputs. This model builds upon the CVPR'22 work [High-Resolution Image Synthesis with Latent Diffusion Models](https://ommer-lab.com/research/latent-diffusion-models/). The official code was released at [stable-diffusion](https://github.com/CompVis/stable-diffusion) and also implemented at [diffusers](https://github.com/huggingface/diffusers). We support this algorithm here to facilitate the community to learn together and compare it with other text2image methods. -
- -
+ + + + + + + + +
+
+ +
+ A mecha robot in a favela in expressionist style +
+
+ +
+ A Chinese palace is beside a beautiful lake +
+
+ +
+ A panda is having dinner in KFC +
## Pretrained models