Skip to content

Commit

Permalink
Adding support for OAIWUI_PROMPT_PRESETS_ONLY
Browse files Browse the repository at this point in the history
  • Loading branch information
mmartial committed Oct 9, 2024
1 parent 77b1ca8 commit 73bec6f
Show file tree
Hide file tree
Showing 6 changed files with 155 additions and 61 deletions.
7 changes: 7 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,10 @@ OAIWUI_DALLE_MODELS=dall-e-3 dall-e-2

# Default username (leave commented to be prompted -- default: mutli-user mode)
#OAIWUI_USERNAME=

# Prompt presets directory
#OAIWUI_PROMPT_PRESETS_DIR=prompt_presets.example

# Prompt presets only: disables the selection of model, tokens and temperature. Only the preset prompts selection is available.
# Requires a valid OAIWUI_PROMPT_PRESETS_DIR and a JSON file with the presets set.
#OAIWUI_PROMPT_PRESETS_ONLY=prompt_presets_settings-example.json
11 changes: 4 additions & 7 deletions OpenAI_GPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,6 @@ def get_chat_history(self, run_file):
if len(dict_array) == 0:
return "No messages in chat history"
for msg in dict_array:
print (msg)
if 'oaiwui_skip' in msg:
continue
if 'content' not in msg:
Expand Down Expand Up @@ -278,14 +277,12 @@ def chatgpt_it(self, model_engine, prompt, max_tokens, temperature, clear_chat,
to_add = { 'role': role, 'content': [ {'type': 'text', 'text': prompt} ] }
messages.append(to_add)

clean_messages = []
clean_messages = copy.deepcopy(messages)
msg_count = 0
for msg in messages:
msg_copy = copy.deepcopy(msg)
for msg in clean_messages:
msg_count += 1
if 'oaiwui_skip' in msg_copy:
del msg_copy['oaiwui_skip']
clean_messages.append(msg_copy)
if 'oaiwui_skip' in msg:
del msg['oaiwui_skip']

# Call the GPT API
err, response = simpler_gpt_call(self.apikey, clean_messages, model_engine, max_tokens, temperature, **kwargs)
Expand Down
132 changes: 89 additions & 43 deletions OpenAI_GPT_WUI.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

##########
class OAI_GPT_WUI:
def __init__(self, oai_gpt: OAI_GPT, enable_vision: bool = True, prompt_presets_dir: str = None) -> None:
def __init__(self, oai_gpt: OAI_GPT, enable_vision: bool = True, prompt_presets_dir: str = None, prompt_presets_file: str = None) -> None:
self.last_gpt_query = "last_gpt_query"

self.oai_gpt = oai_gpt
Expand All @@ -42,6 +42,8 @@ def __init__(self, oai_gpt: OAI_GPT, enable_vision: bool = True, prompt_presets_
self.prompt_presets_dir = prompt_presets_dir
self.prompt_presets = {}

self.prompt_presets_file = prompt_presets_file
self.prompt_presets_settings = {}

def resize_rectangle(self, original_width, original_height, max_width, max_height):
aspect_ratio = original_width / original_height
Expand Down Expand Up @@ -127,21 +129,51 @@ def file_uploader(self, details_selection):

#####

def load_prompt_presets(self, prompt_presets_dir=None):
if prompt_presets_dir is None:
self.prompt_presets = {}
def load_prompt_presets(self):
if self.prompt_presets_dir is None:
return ""

prompt_presets = {}
for file in os.listdir(prompt_presets_dir):
for file in os.listdir(self.prompt_presets_dir):
if file.endswith(".json"):
err = cf.check_file_r(os.path.join(prompt_presets_dir, file))
err = cf.check_file_r(os.path.join(self.prompt_presets_dir, file))
if cf.isNotBlank(err):
return err
with open(os.path.join(prompt_presets_dir, file), "r") as f:
with open(os.path.join(self.prompt_presets_dir, file), "r") as f:
prompt_presets[file.split(".json")[0]] = json.load(f)

self.prompt_presets = prompt_presets

if self.prompt_presets_file is not None:
err = cf.check_file_r(self.prompt_presets_file)
if cf.isNotBlank(err):
return err
with open(self.prompt_presets_file, "r") as f:
self.prompt_presets_settings = json.load(f)
if 'model' not in self.prompt_presets_settings:
return f"Could not find 'model' in {self.prompt_presets_file}"
model = self.prompt_presets_settings['model']
if model not in self.models:
return f"Could not find requested 'model' ({model}) in available models: {list(self.models.keys())} (from {self.prompt_presets_file})"
if 'tokens' not in self.prompt_presets_settings:
return f"Could not find 'tokens' in {self.prompt_presets_file}"
tmp = self.prompt_presets_settings['tokens']
if tmp is None:
return f"Invalid 'tokens' ({tmp}) in {self.prompt_presets_file}"
if tmp <= 0:
return f"Invalid 'tokens' ({tmp}) in {self.prompt_presets_file}"
if tmp > self.models[model]['max_token']:
return f"Requested 'tokens' ({tmp}) is greater than model's 'max_token' ({self.models[model]['max_token']}) in {self.prompt_presets_file}"
if 'temperature' not in self.prompt_presets_settings:
return f"Could not find 'temperature' in {self.prompt_presets_file}"
tmp = self.prompt_presets_settings['temperature']
if tmp is None:
return f"Invalid 'temperature' ({tmp}) in {self.prompt_presets_file}"
if tmp < 0:
return f"Invalid 'temperature' ({tmp}) in {self.prompt_presets_file}"
if tmp > 1:
return f"Invalid 'temperature' ({tmp}) in {self.prompt_presets_file}"

return ""


Expand All @@ -151,65 +183,79 @@ def set_ui(self):
vision_capable = False
vision_mode = False
disable_preset_prompts = False
clear_chat = False

if 'gpt_last_prompt' in st.session_state:
if st.session_state['gpt_last_prompt'] != "":
disable_preset_prompts = True

with st.sidebar:
st.text("Check the various ? for help", help=f"[Run Details]\n\nRunID: {cfw.get_runid()}\n\nSave location: {self.save_location}\n\nUTC time: {cf.get_timeUTC()}\n")

if st.button("Clear Chat History"):
clear_chat = True
st.session_state['gpt_last_prompt'] = ''
if self.last_gpt_query in st.session_state:
del st.session_state[self.last_gpt_query]
disable_preset_prompts = False
st.session_state['clear_chat'] = True

if vision_mode is False and self.prompt_presets_dir is not None:
if self.prompt_presets == {}:
err = self.load_prompt_presets(self.prompt_presets_dir)
err = self.load_prompt_presets()
if cf.isNotBlank(err):
st.error(err)
cf.error_exit(err)
prompt_preset = st.selectbox("Prompt preset", options=list(self.prompt_presets.keys()), index=None, key="prompt_preset", help="Load a prompt preset. Can only be used with new chats.", disabled=disable_preset_prompts)

model = st.selectbox("model", options=list(self.models.keys()), index=0, key="model", help=self.model_help)
if model in self.models_status:
st.info(f"{model}: {self.models_status[model]}")
if self.model_capability[model] == "vision":
vision_capable = True
m_token = self.models[model]['max_token']

# vision mode bypass
if self.enable_vision is False:
vision_mode = False
vision_capable = False

if vision_capable:
vision_mode = st.toggle(label="Vision", value=False, help="Enable the upload of an image. Vision's limitation and cost can be found at https://platform.openai.com/docs/guides/vision/limitations.\n\nDisables the role and presets selectors. Image(s) are resized when over the max of the \'details\' selected. Please be aware that each 512px x 512px title is expected to cost 170 tokens. Using this mode disables roles, presets and chat (the next prompt will not have knowledge of past thread of conversation)")
if self.prompt_presets_settings == {}:
# Only available if not in "preset only" mode
model = st.selectbox("model", options=list(self.models.keys()), index=0, key="model", help=self.model_help)
if model in self.models_status:
st.info(f"{model}: {self.models_status[model]}")
if self.model_capability[model] == "vision":
vision_capable = True
m_token = self.models[model]['max_token']

# vision mode bypass
if self.enable_vision is False:
vision_mode = False
vision_capable = False

if vision_capable:
vision_mode = st.toggle(label="Vision", value=False, help="Enable the upload of an image. Vision's limitation and cost can be found at https://platform.openai.com/docs/guides/vision/limitations.\n\nDisables the role and presets selectors. Image(s) are resized when over the max of the \'details\' selected. Please be aware that each 512px x 512px title is expected to cost 170 tokens. Using this mode disables roles, presets and chat (the next prompt will not have knowledge of past thread of conversation)")

if vision_mode:
vision_details = st.selectbox("Vision Details", options=["auto", "low", "high"], index=0, key="vision_details", help="The model will use the auto setting which will look at the image input size and decide if it should use the low or high setting.\n\n- low: the model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 85 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.\n\n- high will first allows the model to first see the low res image (using 85 tokens) and then creates detailed crops using 170 tokens for each 512px x 512px tile.\n\n\n\nImage inputs are metered and charged in tokens, just as text inputs are. The token cost of a given image is determined by two factors: its size, and the detail option on each image_url block. All images with detail: low cost 85 tokens each. detail: high images are first scaled to fit within a 2048 x 2048 square, maintaining their aspect ratio. Then, they are scaled such that the shortest side of the image is 768px long. Finally, a count of how many 512px squares the image consists of is performed. Each of those squares costs 170 tokens. Another 85 tokens are always added to the final total. More details at https://platform.openai.com/docs/guides/vision/calculating-costs")

role = list(self.gpt_roles.keys())[0]
if vision_mode is False:
role = st.selectbox("Role", options=self.gpt_roles, index=0, key="input_role", help = "Role of the input text\n\n" + self.gpt_roles_help)

if vision_mode is True:
clear_chat = True

if vision_mode:
vision_details = st.selectbox("Vision Details", options=["auto", "low", "high"], index=0, key="vision_details", help="The model will use the auto setting which will look at the image input size and decide if it should use the low or high setting.\n\n- low: the model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 85 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.\n\n- high will first allows the model to first see the low res image (using 85 tokens) and then creates detailed crops using 170 tokens for each 512px x 512px tile.\n\n\n\nImage inputs are metered and charged in tokens, just as text inputs are. The token cost of a given image is determined by two factors: its size, and the detail option on each image_url block. All images with detail: low cost 85 tokens each. detail: high images are first scaled to fit within a 2048 x 2048 square, maintaining their aspect ratio. Then, they are scaled such that the shortest side of the image is 768px long. Finally, a count of how many 512px squares the image consists of is performed. Each of those squares costs 170 tokens. Another 85 tokens are always added to the final total. More details at https://platform.openai.com/docs/guides/vision/calculating-costs")
max_tokens = st.slider('max_tokens', 0, m_token, 1000, 100, "%i", "max_tokens", "The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model\'s context length.")
temperature = st.slider('temperature', 0.0, 1.0, 0.5, 0.01, "%0.2f", "temperature", "The temperature of the model. Higher temperature results in more surprising text.")

role = list(self.gpt_roles.keys())[0]
if vision_mode is False:
role = st.selectbox("Role", options=self.gpt_roles, index=0, key="input_role", help = "Role of the input text\n\n" + self.gpt_roles_help)
if vision_mode is False:
presets = st.selectbox("GPT Task", options=list(self.gpt_presets.keys()), index=0, key="presets", help=self.gpt_presets_help)
else:
presets = list(self.gpt_presets.keys())[0]

clear_chat = False
if vision_mode is True:
clear_chat = True

max_tokens = st.slider('max_tokens', 0, m_token, 1000, 100, "%i", "max_tokens", "The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model\'s context length.")
temperature = st.slider('temperature', 0.0, 1.0, 0.5, 0.01, "%0.2f", "temperature", "The temperature of the model. Higher temperature results in more surprising text.")

if vision_mode is False:
presets = st.selectbox("GPT Task", options=list(self.gpt_presets.keys()), index=0, key="presets", help=self.gpt_presets_help)
else:
else: # "preset only" mode
model = self.prompt_presets_settings['model']
max_tokens = self.prompt_presets_settings['tokens']
temperature = self.prompt_presets_settings['temperature']
presets = list(self.gpt_presets.keys())[0]
role = list(self.gpt_roles.keys())[0]


gpt_show_tooltip = st.toggle(label="Show Tips", value=False, help="Show some tips on how to use the tool", key="gpt_show_tooltip")
gpt_show_history = st.toggle(label='Show Prompt History', value=False, help="Show a list of prompts that you have used in the past (most recent first). Loading a selected prompt does not load the parameters used for the generation.", key="gpt_show_history")
if gpt_show_history:
gpt_allow_history_deletion = st.toggle('Allow Prompt History Deletion', value=False, help="This will allow you to delete a prompt from the history. This will delete the prompt and all its associated files. This cannot be undone.", key="gpt_allow_history_deletion")

if st.button("Clear Chat History"):
clear_chat = True
st.session_state['gpt_last_prompt'] = ''
if self.last_gpt_query in st.session_state:
del st.session_state[self.last_gpt_query]
st.session_state['clear_chat'] = True

# Main window
if gpt_show_tooltip:
Expand Down
37 changes: 26 additions & 11 deletions OpenAI_WebUI.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,24 +153,39 @@ def main():

prompt_presets_dir = None
if 'OAIWUI_PROMPT_PRESETS_DIR' in os.environ:
prompt_presets_dir = os.environ.get('OAIWUI_PROMPT_PRESETS_DIR')
if cf.isBlank(prompt_presets_dir):
tmp = os.environ.get('OAIWUI_PROMPT_PRESETS_DIR')
if cf.isBlank(tmp):
st.warning(f"OAIWUI_PROMPT_PRESETS_DIR provided but empty, will not use prompt presets")

else:
err = cf.check_dir(prompt_presets_dir, "OAIWUI_PROMPT_PRESETS_DIR directory")
err = cf.check_dir(tmp, "OAIWUI_PROMPT_PRESETS_DIR directory")
if cf.isNotBlank(err):
st.warning(f"While checking OAIWUI_PROMPT_PRESETS_DIR: {err}")
prompt_presets_dir = None
else:
has_json = False
for file in os.listdir(prompt_presets_dir):
for file in os.listdir(tmp):
if file.endswith(".json"):
has_json = True
break
if not has_json:
st.warning(f"OAIWUI_PROMPT_PRESETS_DIR provided but empty, will not use prompt presets")
prompt_presets_dir = None

st.warning(f"OAIWUI_PROMPT_PRESETS_DIR provided but appears to not contain prompts, will not use prompt presets")
else: # all the conditions are met
prompt_presets_dir = tmp

prompt_presets_file = None
if 'OAIWUI_PROMPT_PRESETS_ONLY' in os.environ:
tmp = os.environ.get('OAIWUI_PROMPT_PRESETS_ONLY')
if cf.isBlank(tmp):
st.warning(f"OAIWUI_PROMPT_PRESETS_ONLY provided but empty, will not use prompt presets")
else:
err = cf.check_file_r(tmp)
if cf.isNotBlank(err):
st.warning(f"While checking OAIWUI_PROMPT_PRESETS_ONLY: {err}")
else:
if prompt_presets_dir is None:
st.warning(f"OAIWUI_PROMPT_PRESETS_ONLY provided but no OAIWUI_PROMPT_PRESETS_DIR, will not use prompt presets")
else: # all the conditions are met
prompt_presets_file = tmp

# Store the initial value of widgets in session state
if "visibility" not in st.session_state:
Expand Down Expand Up @@ -203,7 +218,7 @@ def main():
long_save_location = os.path.join(save_location, iti_version)
cf.make_wdir_error(os.path.join(long_save_location))

set_ui(long_save_location, username, apikey, gpt_models, av_gpt_models, gpt_vision, dalle_models, av_dalle_models, prompt_presets_dir)
set_ui(long_save_location, username, apikey, gpt_models, av_gpt_models, gpt_vision, dalle_models, av_dalle_models, prompt_presets_dir, prompt_presets_file)

#####

Expand All @@ -217,11 +232,11 @@ def process_error_warning(err, warn):

#####

def set_ui(long_save_location, username, apikey, gpt_models, av_gpt_models, gpt_vision, dalle_models, av_dalle_models, prompt_presets_dir: str = None):
def set_ui(long_save_location, username, apikey, gpt_models, av_gpt_models, gpt_vision, dalle_models, av_dalle_models, prompt_presets_dir: str = None, prompt_presets_file: str = None):
oai_gpt = OAI_GPT(apikey, long_save_location, username)
err, warn = oai_gpt.set_parameters(gpt_models, av_gpt_models)
process_error_warning(err, warn)
oai_gpt_st = OAI_GPT_WUI(oai_gpt, gpt_vision, prompt_presets_dir)
oai_gpt_st = OAI_GPT_WUI(oai_gpt, gpt_vision, prompt_presets_dir, prompt_presets_file)
oai_dalle = None
oai_dalle_st = None
if 'OAIWUI_GPT_ONLY' in os.environ:
Expand Down
Loading

0 comments on commit 73bec6f

Please sign in to comment.