Spaces:
Runtime error
Runtime error
| import streamlit.components.v1 as components | |
| from streamlit_player import st_player | |
| from transformers import pipeline | |
| import streamlit as st | |
| import random | |
| st.header("stream your emotions") | |
| st.write("insert details ng huggignaspce") | |
| def tester(text): | |
| classifier = pipeline("sentiment-analysis", model='bhadresh-savani/distilbert-base-uncased-emotion') | |
| results = classifier(text) | |
| #st.subheader(results[0]['label']) | |
| #tester(emo) | |
| generator = st.button("Generate Song!") | |
| if (generator == True): | |
| st.subheader(results[0]['label']) | |
| if (results[0]['label']=="joy"): #songs for joy emotion | |
| with open('joyplaylist.txt') as f: | |
| contents = f.read() | |
| components.html(contents,width=560,height=325) | |
| elif (results[0]['label']=="fear"): | |
| with open('fearplaylist.txt') as f: | |
| contents = f.read() | |
| components.html(contents,width=560,height=325) | |
| elif (results[0]['label']=="anger"): #songs for anger emotion | |
| with open('angryplaylist.txt') as f: | |
| contents = f.read() | |
| components.html(contents,width=560,height=325) | |
| elif (results[0]['label']=="sadness"): #songs for sadness emotion | |
| with open('sadplaylist.txt') as f: | |
| contents = f.read() | |
| components.html(contents,width=560,height=325) | |
| elif (results[0]['label']=="surprise"): | |
| st.write("gulat ka noh") | |
| elif (results[0]['label']=="love"): | |
| with open('loveplaylist.txt') as f: | |
| contents = f.read() | |
| components.html(contents,width=560,height=325) | |
| emo = st.text_input("Enter a text/phrase/sentence. A corresponding song will be recommended based on its emotion.") | |
| st.sidebar.subheader("Model Description") | |
| st.sidebar.write("This application uses the DistilBERT model, a distilled version of BERT. The BERT framework uses a bidirectional transformer that allows it to learn the context of a word based on the left and right of the word. According to a paper by V. Sanh, et al., DistilBERT can \"reduce the size of a BERT model by 40%, while retaining 97% of its language understanding capabilities, and being 60% faster.\" This is why the DistilBERT model was used. For more information about the paper, please check out this [link](https://share.streamlit.io/mesmith027/streamlit_webapps/main/MC_pi/streamlit_app.py).") | |
| st.sidebar.write("The specific DistilBERT model used for this is Bhadresh Savani's [distilbert-base-uncased-emotion] (https://huggingface.co/bhadresh-savani/distilbert-base-uncased-emotion). It is fine-tuned on the Emotion Dataset from Twitter, which can be found [here](https://huggingface.co/datasets/viewer/?dataset=emotion).") | |
| st.sidebar.subheader("Disclaimer/Limitations") | |
| st.sidebar.write("The model only outputs sadness, joy, love, anger, fear, and surprise. With that said, it does not completely encompass the emotions that a human being feels. Despite that, this application only predicts your emotion from your input text based on the six aforementioned emotions.") | |
| tester(emo) | |