Skip to content

Commit

Permalink
Merge pull request #6 from Elias2660/formatting-changes
Browse files Browse the repository at this point in the history
Automated formatting changes
  • Loading branch information
Elias2660 authored Jul 7, 2024
2 parents 2a1d816 + 59311da commit 8f1ce75
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 35 deletions.
52 changes: 32 additions & 20 deletions cogs/ChatBotPrompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,29 +46,34 @@ async def on_ready(self):

@app_commands.command()
async def prompting_help(self, interaction: discord.Interaction): ...

@app_commands.command(name="list_models", description="List all the models that can be used by prompting")

@app_commands.command(
name="list_models",
description="List all the models that can be used by prompting",
)
async def list_models(self, interaction: discord.Interaction):
models = {
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-3.5-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4",
"gpt-4-0613",
"gpt-4-0314",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-instruct",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-3.5-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4",
"gpt-4-0613",
"gpt-4-0314",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-instruct",
}
await interaction.response.send_message("\n".join(models))


@app_commands.command(name="max_tokens", description="lists the max number of context tokens a model can have")

@app_commands.command(
name="max_tokens",
description="lists the max number of context tokens a model can have",
)
async def max_tokens(self, interaction: discord.Interaction):
openAI_max_context = {
"gpt-4o": 128000,
Expand All @@ -86,7 +91,14 @@ async def max_tokens(self, interaction: discord.Interaction):
"gpt-3.5-turbo-1106": 16385,
"gpt-3.5-turbo-instruct": 4096,
}
await interaction.response.send_message("\n".join([f"{key}:" + " {:,}".format(int(value)) for key, value in openAI_max_context.items()]))
await interaction.response.send_message(
"\n".join(
[
f"{key}:" + " {:,}".format(int(value))
for key, value in openAI_max_context.items()
]
)
)


async def setup(client):
Expand Down
15 changes: 9 additions & 6 deletions testing/getStuff.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,31 @@
import asyncio
from playwright.async_api import async_playwright


async def scrape_p_text(url):
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)

# Get all text from <p> tags
p_tags = await page.query_selector_all('p')
p_tags = await page.query_selector_all("p")
p_texts = [await p.text_content() for p in p_tags]

await browser.close()

return p_texts


async def main():
url = 'https://www.bbc.com/news/articles/cq5xjzqree2o' # Replace with the URL you want to scrape
url = "https://www.bbc.com/news/articles/cq5xjzqree2o" # Replace with the URL you want to scrape
p_texts = await scrape_p_text(url)
print("Text in <p> tags:")
text = " ".join(p_texts)
text.replace(" ", " ")
return text

if __name__ == '__main__':

if __name__ == "__main__":
print(asyncio.run(main()))
# scrape_p_text("https://www.bbc.com/news/articles/cq5xjzqree2o")
5 changes: 4 additions & 1 deletion utils/gptFunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,9 @@ async def createQOTW(websites: str, model: str = "gpt-4o") -> str:
return f"Invalid model. Please run the prompting help command to find the right model to use."
try:
client = OpenAI(api_key=API_KEY)
website_list = [website.strip().replace("\"", "") for website in websites.split(",")]
website_list = [
website.strip().replace('"', "") for website in websites.split(",")
]
data = "\n".join(
[
f"Website Source: {website} \n {await scrape_p_text(website)}"
Expand Down Expand Up @@ -167,6 +169,7 @@ async def main():
)
return response


if __name__ == "__main__":
response = asyncio.run(main())
print(response)
16 changes: 8 additions & 8 deletions utils/scrapeWebsites.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,23 @@
from playwright.async_api import async_playwright


async def scrape_p_text(url:str) -> str:
'''
async def scrape_p_text(url: str) -> str:
"""
Scrapes all the text from the <p> tags on a webpage
This usually only works for the websites BBC, the Verge, or AP News (which are the only websites I have tested this on)
'''
"""
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)

# Get all text from <p> tags
p_tags = await page.query_selector_all('p')
p_tags = await page.query_selector_all("p")
p_texts = [await p.text_content() for p in p_tags]

await browser.close()

text = " ".join(p_texts)
text.replace(" ", " ")
return text

0 comments on commit 8f1ce75

Please sign in to comment.