diff --git a/README.md b/README.md index 44b71343..a28e0aa4 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # 🔥 Learning Interpretability Tool (LIT) - + The Learning Interpretability Tool (🔥LIT, formerly known as the Language Interpretability Tool) is a visual, interactive ML model-understanding tool that @@ -21,8 +21,8 @@ LIT is built to answer questions such as: LIT supports a variety of debugging workflows through a browser-based UI. Features include: -* **Local explanations** via salience maps, attention, and rich visualization - of model predictions. +* **Local explanations** via salience maps and rich visualization of model + predictions. * **Aggregate analysis** including custom metrics, slicing and binning, and visualization of embedding spaces. * **Counterfactual generation** via manual edits or generator plug-ins to @@ -90,8 +90,8 @@ git clone https://github.com/PAIR-code/lit.git && cd lit ``` -Note: be sure you are running Python 3.10. If you have a different version on -your system, use the `conda` instructions below to set up a Python 3.10 +Note: be sure you are running Python 3.9+. If you have a different version on +your system, use the `conda` instructions below to set up a Python 3.9 environment. Set up a Python environment with `venv`: @@ -106,7 +106,7 @@ Or set up a Python environment using `conda`: ```sh conda create --name lit-nlp conda activate lit-nlp -conda install python=3.10 +conda install python=3.9 conda install pip ``` @@ -142,13 +142,7 @@ To explore classification and regression models tasks from the popular [GLUE benchmark](https://gluebenchmark.com/): ```sh -python -m lit_nlp.examples.glue_demo --port=5432 --quickstart -``` - -Or, using `docker`: - -```sh -docker run --rm -e DEMO_NAME=glue_demo -p 5432:5432 -t lit-nlp --quickstart +python -m lit_nlp.examples.glue.demo --port=5432 --quickstart ``` Navigate to http://localhost:5432 to access the LIT UI. @@ -160,19 +154,6 @@ but you can switch to [STS-B](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark) or [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/) using the toolbar or the gear icon in the upper right. - -### Quick-start: language modeling - -To explore predictions from a pre-trained language model (BERT or GPT-2), run: - -```sh -python -m lit_nlp.examples.lm_demo --models=bert-base-uncased --port=5432 -``` - -Or, using `docker`: - -```sh -docker run --rm -e DEMO_NAME=lm_demo -p 5432:5432 -t lit-nlp --models=bert-base-uncased ``` And navigate to http://localhost:5432 for the UI. @@ -192,7 +173,7 @@ See [lit_nlp/examples](./lit_nlp/examples). Most are run similarly to the quickstart example above: ```sh -python -m lit_nlp.examples. --port=5432 [optional --args] +python -m lit_nlp.examples..demo --port=5432 [optional --args] ``` ## User Guide diff --git a/lit_nlp/examples/gunicorn_config.py b/lit_nlp/examples/gunicorn_config.py index 7817674a..e4e635ea 100644 --- a/lit_nlp/examples/gunicorn_config.py +++ b/lit_nlp/examples/gunicorn_config.py @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Config for gunicorn for cloud-hosted demos.""" +"""gunicorn configuration for cloud-hosted demos.""" import os -_DEMO_NAME = os.getenv('DEMO_NAME', 'glue_demo') +_DEMO_NAME = os.getenv('DEMO_NAME', 'glue') _DEMO_PORT = os.getenv('DEMO_PORT', '5432') bind = f'0.0.0.0:{_DEMO_PORT}' timeout = 3600 threads = 8 worker_class = 'gthread' -wsgi_app = f'lit_nlp.examples.{_DEMO_NAME}:get_wsgi_app()' +wsgi_app = f'lit_nlp.examples.{_DEMO_NAME}.demo:get_wsgi_app()' diff --git a/lit_nlp/examples/tydi/demo.py b/lit_nlp/examples/tydi/demo.py index 307ad8a3..5619e3d0 100644 --- a/lit_nlp/examples/tydi/demo.py +++ b/lit_nlp/examples/tydi/demo.py @@ -12,6 +12,7 @@ from absl import app from absl import flags +from absl import logging from lit_nlp import dev_server from lit_nlp import server_flags from lit_nlp.components import word_replacer @@ -40,7 +41,9 @@ def get_wsgi_app() -> Optional[dev_server.LitServerType]: # Parse flags without calling app.run(main), to avoid conflict with # gunicorn command line flags. unused = flags.FLAGS(sys.argv, known_only=True) - return main(unused) + if unused: + logging.info("tydi_demo:get_wsgi_app() called with unused args: %s", unused) + return main([]) def main(argv: Sequence[str]) -> Optional[dev_server.LitServerType]: diff --git a/website/sphinx_src/docker.md b/website/sphinx_src/docker.md index c6b501d4..b58279c6 100644 --- a/website/sphinx_src/docker.md +++ b/website/sphinx_src/docker.md @@ -49,13 +49,11 @@ below. ```shell # DEMO_NAME is used to complete the Python module path # -# "lit_nlp.examples.$DEMO_NAME" +# "lit_nlp.examples.$DEMO_NAME.demo:get_wsgi_app()" # # Therefore, valid values for DEMO_NAME are Python module paths in the -# lit_nlp/examples directory, such as -# -# * direct children -- glue_demo, lm_demo, image_demo, t5_demo, etc. -docker run --rm -p 5432:5432 -e DEMO_NAME=lm_demo lit-nlp +# lit_nlp/examples directory, such as glue, penguin, tydi, etc. +docker run --rm -p 5432:5432 -e DEMO_NAME=penguin lit-nlp # Use the DEMO_PORT environment variable as to change the port that LIT uses in # the container. Be sure to also change the -p option to map the container's @@ -66,8 +64,8 @@ docker run --rm -p 2345:2345 -e DEMO_PORT=2345 lit-nlp # containers on your machine using the combination of the DEMO_NAME and # DEMO_PORT arguments, and docker run with the -d flag to run the container in # the background. -docker run -d -p 5432:5432 -e DEMO_NAME=t5_demo lit-nlp -docker run -d -p 2345:2345 -e DEMO_NAME=lm_demo -e DEMO_PORT=2345 lit-nlp +docker run -d -p 5432:5432 -e DEMO_NAME=penguin lit-nlp +docker run -d -p 2345:2345 -e DEMO_NAME=tydi -e DEMO_PORT=2345 lit-nlp ``` ## Integrating Custom LIT Instances with the Default Docker Image