Update app.py
Browse files
app.py
CHANGED
|
@@ -54,8 +54,6 @@ st.set_page_config(
|
|
| 54 |
|
| 55 |
# My Inference API Copy
|
| 56 |
API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
|
| 57 |
-
# Meta's Original - Chat HF Free Version:
|
| 58 |
-
#API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf"
|
| 59 |
API_KEY = os.getenv('API_KEY')
|
| 60 |
MODEL1="meta-llama/Llama-2-7b-chat-hf"
|
| 61 |
MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
|
|
@@ -111,10 +109,6 @@ def SpeechSynthesis(result):
|
|
| 111 |
|
| 112 |
|
| 113 |
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
# GPT4o documentation
|
| 119 |
# 1. Cookbook: https://cookbook.openai.com/examples/gpt4o/introduction_to_gpt4o
|
| 120 |
# 2. Configure your Project and Orgs to limit/allow Models: https://platform.openai.com/settings/organization/general
|
|
@@ -685,42 +679,6 @@ def generate_html(local_files):
|
|
| 685 |
|
| 686 |
|
| 687 |
|
| 688 |
-
|
| 689 |
-
|
| 690 |
-
#from gradio_client import Client
|
| 691 |
-
#client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
| 692 |
-
#result = client.predict(
|
| 693 |
-
# message="Hello!!",
|
| 694 |
-
# llm_results_use=5,
|
| 695 |
-
# database_choice="Semantic Search",
|
| 696 |
-
# llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
|
| 697 |
-
# api_name="/update_with_rag_md"
|
| 698 |
-
#)
|
| 699 |
-
#print(result)
|
| 700 |
-
#Accepts 4 parameters:
|
| 701 |
-
#message str Required
|
| 702 |
-
##The input value that is provided in the "Search" Textbox component.
|
| 703 |
-
#llm_results_use float Default: 5
|
| 704 |
-
#The input value that is provided in the "Top n results as context" Slider component.
|
| 705 |
-
#database_choice Literal['Semantic Search', 'Arxiv Search - Latest - (EXPERIMENTAL)'] Default: "Semantic Search"
|
| 706 |
-
#The input value that is provided in the "Search Source" Dropdown component.
|
| 707 |
-
#llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
|
| 708 |
-
#The input value that is provided in the "LLM Model" Dropdown component.
|
| 709 |
-
#Returns tuple of 2 elements
|
| 710 |
-
#[0] str
|
| 711 |
-
#The output value that appears in the "value_14" Markdown component.
|
| 712 |
-
#[1] str
|
| 713 |
-
#The output value that appears in the "value_13" Textbox component.
|
| 714 |
-
|
| 715 |
-
|
| 716 |
-
|
| 717 |
-
|
| 718 |
-
|
| 719 |
-
|
| 720 |
-
|
| 721 |
-
|
| 722 |
-
|
| 723 |
-
|
| 724 |
#@st.cache_resource
|
| 725 |
def search_arxiv(query):
|
| 726 |
start_time = time.strftime("%Y-%m-%d %H:%M:%S")
|
|
@@ -732,14 +690,7 @@ def search_arxiv(query):
|
|
| 732 |
llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
|
| 733 |
api_name="/update_with_rag_md"
|
| 734 |
)
|
| 735 |
-
|
| 736 |
-
#response1 = client.predict(
|
| 737 |
-
# query,
|
| 738 |
-
# 20,
|
| 739 |
-
# "Semantic Search - up to 10 Mar 2024",
|
| 740 |
-
# "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 741 |
-
# api_name="/update_with_rag_md"
|
| 742 |
-
#)
|
| 743 |
Question = '### 🔎 ' + query + '\r\n' # Format for markdown display with links
|
| 744 |
References = response1[0]
|
| 745 |
References2 = response1[1]
|
|
@@ -778,9 +729,6 @@ def search_arxiv(query):
|
|
| 778 |
st.write(f"Start time: {start_time}")
|
| 779 |
st.write(f"Finish time: {end_time}")
|
| 780 |
st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
|
| 781 |
-
|
| 782 |
-
#filename = generate_filename(query, "md")
|
| 783 |
-
#create_file(filename, query, results, should_save)
|
| 784 |
|
| 785 |
return results
|
| 786 |
|
|
@@ -1215,16 +1163,11 @@ def FileSidebar():
|
|
| 1215 |
if next_action=='search':
|
| 1216 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
| 1217 |
user_prompt = file_contents
|
| 1218 |
-
#try:
|
| 1219 |
-
#search_glossary(file_contents)
|
| 1220 |
filesearch = PromptPrefix2 + file_content_area
|
| 1221 |
st.markdown(filesearch)
|
| 1222 |
if st.button(key='rerun', label='🔍Re-Code' ):
|
| 1223 |
-
#search_glossary(filesearch)
|
| 1224 |
search_arxiv(filesearch)
|
| 1225 |
|
| 1226 |
-
#except:
|
| 1227 |
-
#st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
| 1228 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
| 1229 |
|
| 1230 |
# Randomly select a title
|
|
@@ -1558,22 +1501,10 @@ def create_file(filename, prompt, response, should_save=True):
|
|
| 1558 |
if ext in ['.txt', '.htm', '.md']:
|
| 1559 |
|
| 1560 |
|
| 1561 |
-
|
| 1562 |
-
# ****** line 344 is read utf-8 encoding was needed when running locally to save utf-8 encoding and not fail on write
|
| 1563 |
-
|
| 1564 |
-
#with open(f"{base_filename}.md", 'w') as file:
|
| 1565 |
-
#with open(f"{base_filename}.md", 'w', encoding="ascii", errors="surrogateescape") as file:
|
| 1566 |
with open(f"{base_filename}.md", 'w', encoding='utf-8') as file:
|
| 1567 |
-
#try:
|
| 1568 |
-
#content = (prompt.strip() + '\r\n' + decode(response, ))
|
| 1569 |
file.write(response)
|
| 1570 |
-
#except:
|
| 1571 |
-
# st.write('.')
|
| 1572 |
-
# ****** utf-8 encoding was needed when running locally to save utf-8 encoding and not fail on write
|
| 1573 |
-
|
| 1574 |
-
|
| 1575 |
-
|
| 1576 |
|
|
|
|
| 1577 |
#has_python_code = re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response)
|
| 1578 |
#has_python_code = bool(re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response))
|
| 1579 |
#if has_python_code:
|
|
@@ -1961,29 +1892,8 @@ if filename is not None: # whisper1
|
|
| 1961 |
st.session_state.messages.append({"role": "assistant", "content": result})
|
| 1962 |
except:
|
| 1963 |
st.write(' ')
|
| 1964 |
-
#st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
| 1965 |
filename = None
|
| 1966 |
|
| 1967 |
-
#MODEL = "gpt-4o-2024-05-13"
|
| 1968 |
-
#openai.api_key = os.getenv('OPENAI_API_KEY')
|
| 1969 |
-
#openai.organization = os.getenv('OPENAI_ORG_ID')
|
| 1970 |
-
#client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
|
| 1971 |
-
#st.session_state.messages.append({"role": "user", "content": transcript})
|
| 1972 |
-
|
| 1973 |
-
#with st.chat_message("user"):
|
| 1974 |
-
# st.markdown(transcript)
|
| 1975 |
-
|
| 1976 |
-
#with st.chat_message("assistant"):
|
| 1977 |
-
# completion = client.chat.completions.create(
|
| 1978 |
-
# model=MODEL,
|
| 1979 |
-
# messages = st.session_state.messages,
|
| 1980 |
-
# stream=True
|
| 1981 |
-
# )
|
| 1982 |
-
|
| 1983 |
-
# response = process_text2(text_input=prompt)
|
| 1984 |
-
|
| 1985 |
-
#st.session_state.messages.append({"role": "assistant", "content": response})
|
| 1986 |
-
|
| 1987 |
|
| 1988 |
# Scholary ArXiV Search ------------------------- !!
|
| 1989 |
session_state = {}
|
|
@@ -2088,15 +1998,7 @@ def main():
|
|
| 2088 |
if audio_input is not None:
|
| 2089 |
# To read file as bytes:
|
| 2090 |
bytes_data = uploaded_file.getvalue()
|
| 2091 |
-
|
| 2092 |
-
|
| 2093 |
-
# To convert to a string based IO:
|
| 2094 |
-
#stringio = StringIO(uploaded_file.getvalue().decode("utf-8"))
|
| 2095 |
-
#st.write(stringio)
|
| 2096 |
-
|
| 2097 |
-
# To read file as string:
|
| 2098 |
-
#string_data = stringio.read()
|
| 2099 |
-
#st.write(string_data)
|
| 2100 |
|
| 2101 |
process_audio(audio_input, text_input)
|
| 2102 |
|
|
|
|
| 54 |
|
| 55 |
# My Inference API Copy
|
| 56 |
API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
|
|
|
|
|
|
|
| 57 |
API_KEY = os.getenv('API_KEY')
|
| 58 |
MODEL1="meta-llama/Llama-2-7b-chat-hf"
|
| 59 |
MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
|
|
|
|
| 109 |
|
| 110 |
|
| 111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
# GPT4o documentation
|
| 113 |
# 1. Cookbook: https://cookbook.openai.com/examples/gpt4o/introduction_to_gpt4o
|
| 114 |
# 2. Configure your Project and Orgs to limit/allow Models: https://platform.openai.com/settings/organization/general
|
|
|
|
| 679 |
|
| 680 |
|
| 681 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 682 |
#@st.cache_resource
|
| 683 |
def search_arxiv(query):
|
| 684 |
start_time = time.strftime("%Y-%m-%d %H:%M:%S")
|
|
|
|
| 690 |
llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
|
| 691 |
api_name="/update_with_rag_md"
|
| 692 |
)
|
| 693 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 694 |
Question = '### 🔎 ' + query + '\r\n' # Format for markdown display with links
|
| 695 |
References = response1[0]
|
| 696 |
References2 = response1[1]
|
|
|
|
| 729 |
st.write(f"Start time: {start_time}")
|
| 730 |
st.write(f"Finish time: {end_time}")
|
| 731 |
st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
|
|
|
|
|
|
|
|
|
|
| 732 |
|
| 733 |
return results
|
| 734 |
|
|
|
|
| 1163 |
if next_action=='search':
|
| 1164 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
| 1165 |
user_prompt = file_contents
|
|
|
|
|
|
|
| 1166 |
filesearch = PromptPrefix2 + file_content_area
|
| 1167 |
st.markdown(filesearch)
|
| 1168 |
if st.button(key='rerun', label='🔍Re-Code' ):
|
|
|
|
| 1169 |
search_arxiv(filesearch)
|
| 1170 |
|
|
|
|
|
|
|
| 1171 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
| 1172 |
|
| 1173 |
# Randomly select a title
|
|
|
|
| 1501 |
if ext in ['.txt', '.htm', '.md']:
|
| 1502 |
|
| 1503 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1504 |
with open(f"{base_filename}.md", 'w', encoding='utf-8') as file:
|
|
|
|
|
|
|
| 1505 |
file.write(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1506 |
|
| 1507 |
+
# Code Interpreter
|
| 1508 |
#has_python_code = re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response)
|
| 1509 |
#has_python_code = bool(re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response))
|
| 1510 |
#if has_python_code:
|
|
|
|
| 1892 |
st.session_state.messages.append({"role": "assistant", "content": result})
|
| 1893 |
except:
|
| 1894 |
st.write(' ')
|
|
|
|
| 1895 |
filename = None
|
| 1896 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1897 |
|
| 1898 |
# Scholary ArXiV Search ------------------------- !!
|
| 1899 |
session_state = {}
|
|
|
|
| 1998 |
if audio_input is not None:
|
| 1999 |
# To read file as bytes:
|
| 2000 |
bytes_data = uploaded_file.getvalue()
|
| 2001 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2002 |
|
| 2003 |
process_audio(audio_input, text_input)
|
| 2004 |
|