diff --git a/notebooks/elasticsearch-writer.ipynb b/notebooks/elasticsearch-writer.ipynb index 4dfab0d17..43063d42d 100644 --- a/notebooks/elasticsearch-writer.ipynb +++ b/notebooks/elasticsearch-writer.ipynb @@ -9,6 +9,16 @@ "You can also run the Aryn Partitioner locally by setting `use_partitioning_service` to `False`. Though you can use CPU to run the Aryn Partitioner, it is recommended to use an NVIDIA GPU for good performance." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If any of the packages below are missing, you can install them using the following command:\n", + "```bash\n", + "!pip install -q PACKAGE_NAME\n", + "```" + ] + }, { "cell_type": "code", "execution_count": null, @@ -237,7 +247,7 @@ "response = client.search(\n", " index = index_name,\n", " knn={\n", - " \"field\": \"embeddings\",\n", + " \"field\": \"embedding\",\n", " \"query_vector\": embedder.embed_query(\"How do I prevent accidents?\"),\n", " \"k\": 10,\n", " \"num_candidates\": 10\n", @@ -327,7 +337,7 @@ "from langchain.chains import RetrievalQAWithSourcesChain \n", "import os\n", "\n", - "vector_store = ElasticsearchStore(index_name=index_name, es_connection=client, vector_query_field=\"embeddings\", \n", + "vector_store = ElasticsearchStore(index_name=index_name, es_connection=client, vector_query_field=\"embedding\", \n", " query_field=\"text_representation\", embedding=embedder)\n", "\n", "llm = ChatOpenAI( \n", @@ -342,11 +352,18 @@ ") \n", "qa.invoke({\"question\": \"How many accidents happened?\"})" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": ".venv", "language": "python", "name": "python3" }, @@ -360,7 +377,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.9" + "version": "3.12.4" } }, "nbformat": 4,