Skip to content

Commit

Permalink
Merge pull request #6746 from oobabooga/dev
Browse files Browse the repository at this point in the history
Merge dev branch
  • Loading branch information
oobabooga authored Feb 15, 2025
2 parents 461d1fd + cf9676c commit 7c883ef
Show file tree
Hide file tree
Showing 13 changed files with 29 additions and 96 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ A Gradio web UI for Large Language Models.

Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) of text generation.

[Try the Deep Reason extension](https://oobabooga.gumroad.com/l/deep_reason)

|![Image1](https://github.com/oobabooga/screenshots/raw/main/AFTER-INSTRUCT.png) | ![Image2](https://github.com/oobabooga/screenshots/raw/main/AFTER-CHAT.png) |
|:---:|:---:|
|![Image1](https://github.com/oobabooga/screenshots/raw/main/AFTER-DEFAULT.png) | ![Image2](https://github.com/oobabooga/screenshots/raw/main/AFTER-PARAMETERS.png) |
Expand Down
38 changes: 0 additions & 38 deletions convert-to-safetensors.py

This file was deleted.

2 changes: 1 addition & 1 deletion css/html_instruct_style.css
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
}

.chat .user-message {
background: #f4f4f4;
background: #f5f5f5;
padding: 1.5rem 1rem;
padding-bottom: 2rem;
border-radius: 0;
Expand Down
2 changes: 1 addition & 1 deletion css/main.css
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
--darker-gray: #202123;
--dark-gray: #343541;
--light-gray: #444654;
--light-theme-gray: #f4f4f4;
--light-theme-gray: #f5f5f5;
--border-color-dark: #525252;
--header-width: 112px;
--selected-item-color-dark: #32333e;
Expand Down
7 changes: 5 additions & 2 deletions docs/10 - WSL.md
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,12 @@ When you git clone a repository, put it inside WSL and not outside. To understan

### Bonus: Port Forwarding

By default, you won't be able to access the webui from another device on your local network. You will need to setup the appropriate port forwarding using the following command (using PowerShell or Terminal with administrator privileges).
By default, you won't be able to access the webui from another device on your local network. You will need to setup the appropriate port forwarding using the following steps:

1. First, get the IP address of the WSL by typing `wsl hostname -I`. This will output the IP address, for example `172.20.134.111`.
2. Then, use the following command (using PowerShell or Terminal with administrator privileges) to set up port forwarding, replacing `172.20.134.111` with the IP address you obtained in step 1:

```
netsh interface portproxy add v4tov4 listenaddress=0.0.0.0 listenport=7860 connectaddress=localhost connectport=7860
netsh interface portproxy add v4tov4 listenaddress=0.0.0.0 listenport=7860 connectaddress=172.20.134.111 connectport=7860
```

9 changes: 6 additions & 3 deletions extensions/sd_api_pictures/script.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from PIL import Image

from modules import shared
from modules.models import reload_model, unload_model
from modules.models import load_model, unload_model
from modules.ui import create_refresh_button

torch._C._jit_set_profiling_mode(False)
Expand All @@ -38,14 +38,16 @@
'cfg_scale': 7,
'textgen_prefix': 'Please provide a detailed and vivid description of [subject]',
'sd_checkpoint': ' ',
'checkpoint_list': [" "]
'checkpoint_list': [" "],
'last_model': ""
}


def give_VRAM_priority(actor):
global shared, params

if actor == 'SD':
params["last_model"] = shared.model_name
unload_model()
print("Requesting Auto1111 to re-load last checkpoint used...")
response = requests.post(url=f'{params["address"]}/sdapi/v1/reload-checkpoint', json='')
Expand All @@ -55,7 +57,8 @@ def give_VRAM_priority(actor):
print("Requesting Auto1111 to vacate VRAM...")
response = requests.post(url=f'{params["address"]}/sdapi/v1/unload-checkpoint', json='')
response.raise_for_status()
reload_model()
if params["last_model"]:
shared.model, shared.tokenizer = load_model(params["last_model"])

elif actor == 'set':
print("VRAM mangement activated -- requesting Auto1111 to vacate VRAM...")
Expand Down
10 changes: 9 additions & 1 deletion modules/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,8 +412,16 @@ def generate_chat_reply(text, state, regenerate=False, _continue=False, loading_
yield history
return

show_after = html.escape(state["show_after"]) if state["show_after"] else None
for history in chatbot_wrapper(text, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message, for_ui=for_ui):
yield history
if show_after:
after = history["visible"][-1][1].partition(show_after)[2] or "*Is thinking...*"
yield {
'internal': history['internal'],
'visible': history['visible'][:-1] + [[history['visible'][-1][0], after]]
}
else:
yield history


def character_is_loaded(state, raise_exception=False):
Expand Down
49 changes: 0 additions & 49 deletions modules/html_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,52 +106,6 @@ def replace_blockquote(m):
return m.group().replace('\n', '\n> ').replace('\\begin{blockquote}', '').replace('\\end{blockquote}', '')


def add_long_list_class(html):
'''
Adds a long-list class to <ul> or <ol> containing long <li> items.
These will receive a smaller margin/padding in the CSS.
'''

# Helper function to check if a tag is within <pre> or <code>
def is_within_block(start_idx, end_idx, block_matches):
return any(start < start_idx < end or start < end_idx < end for start, end in block_matches)

# Find all <pre>...</pre> and <code>...</code> blocks
pre_blocks = [(m.start(), m.end()) for m in re.finditer(r'<pre.*?>.*?</pre>', html, re.DOTALL)]
code_blocks = [(m.start(), m.end()) for m in re.finditer(r'<code.*?>.*?</code>', html, re.DOTALL)]
all_blocks = pre_blocks + code_blocks

# Pattern to find <ul>...</ul> and <ol>...</ol> blocks and their contents
list_pattern = re.compile(r'(<[uo]l.*?>)(.*?)(</[uo]l>)', re.DOTALL)
li_pattern = re.compile(r'<li.*?>(.*?)</li>', re.DOTALL)

def process_list(match):
start_idx, end_idx = match.span()
if is_within_block(start_idx, end_idx, all_blocks):
return match.group(0) # Leave the block unchanged if within <pre> or <code>

opening_tag = match.group(1)
list_content = match.group(2)
closing_tag = match.group(3)

# Find all list items within this list
li_matches = li_pattern.finditer(list_content)
has_long_item = any(len(li_match.group(1).strip()) > 224 for li_match in li_matches)

if has_long_item:
# Add class="long-list" to the opening tag if it doesn't already have a class
if 'class=' not in opening_tag:
opening_tag = opening_tag[:-1] + ' class="long-list">'
else:
# If there's already a class, append long-list to it
opening_tag = re.sub(r'class="([^"]*)"', r'class="\1 long-list"', opening_tag)

return opening_tag + list_content + closing_tag

# Process HTML and replace list blocks
return list_pattern.sub(process_list, html)


@functools.lru_cache(maxsize=None)
def convert_to_markdown(string):
if not string:
Expand Down Expand Up @@ -251,9 +205,6 @@ def convert_to_markdown(string):
# Unescape backslashes
html_output = html_output.replace('\\\\', '\\')

# Add "long-list" class to <ul> or <ol> containing a long <li> item
html_output = add_long_list_class(html_output)

return html_output


Expand Down
2 changes: 1 addition & 1 deletion modules/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
local_rank = None
if shared.args.deepspeed:
import deepspeed
from transformers.deepspeed import (
from transformers.integrations.deepspeed import (
HfDeepSpeedConfig,
is_deepspeed_zero3_enabled
)
Expand Down
1 change: 1 addition & 0 deletions modules/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@
'seed': -1,
'custom_stopping_strings': '',
'custom_token_bans': '',
'show_after': '',
'negative_prompt': '',
'autoload_model': False,
'dark_theme': True,
Expand Down
1 change: 1 addition & 0 deletions modules/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ def list_interface_input_elements():
'sampler_priority',
'custom_stopping_strings',
'custom_token_bans',
'show_after',
'negative_prompt',
'dry_sequence_breakers',
'grammar_string',
Expand Down
1 change: 1 addition & 0 deletions modules/ui_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ def create_ui(default_preset):
shared.gradio['sampler_priority'] = gr.Textbox(value=generate_params['sampler_priority'], lines=12, label='Sampler priority', info='Parameter names separated by new lines or commas.', elem_classes=['add_scrollbar'])
shared.gradio['custom_stopping_strings'] = gr.Textbox(lines=2, value=shared.settings["custom_stopping_strings"] or None, label='Custom stopping strings', info='Written between "" and separated by commas.', placeholder='"\\n", "\\nYou:"')
shared.gradio['custom_token_bans'] = gr.Textbox(value=shared.settings['custom_token_bans'] or None, label='Token bans', info='Token IDs to ban, separated by commas. The IDs can be found in the Default or Notebook tab.')
shared.gradio['show_after'] = gr.Textbox(value=shared.settings['show_after'] or None, label='Show after', info='Hide the reply before this text.', placeholder="</think>")
shared.gradio['negative_prompt'] = gr.Textbox(value=shared.settings['negative_prompt'], label='Negative prompt', info='For CFG. Only used when guidance_scale is different than 1.', lines=3, elem_classes=['add_scrollbar'])
shared.gradio['dry_sequence_breakers'] = gr.Textbox(value=generate_params['dry_sequence_breakers'], label='dry_sequence_breakers', info='Tokens across which sequence matching is not continued. Specified as a comma-separated list of quoted strings.')
with gr.Row() as shared.gradio['grammar_file_row']:
Expand Down
1 change: 1 addition & 0 deletions settings-template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ truncation_length: 2048
seed: -1
custom_stopping_strings: ''
custom_token_bans: ''
show_after: ''
negative_prompt: ''
autoload_model: false
dark_theme: true
Expand Down

0 comments on commit 7c883ef

Please sign in to comment.