nagarmayank commited on
Commit
f45803c
·
1 Parent(s): 037e9c2

gemma3:1b model

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. start.sh +4 -4
app.py CHANGED
@@ -119,7 +119,7 @@ def greetings():
119
  @app.post("/write_message")
120
  def write_message(data: dict):
121
  message = data['message']
122
- model = ChatOllama(model="gemma3:12b", temperature=1)
123
  transaction_bot = Agent(model, system=prompt)
124
  transaction_bot.graph.invoke({"messages": [message]})
125
  return {"message": "Transaction completed successfully"}
 
119
  @app.post("/write_message")
120
  def write_message(data: dict):
121
  message = data['message']
122
+ model = ChatOllama(model="gemma3:1b", temperature=1)
123
  transaction_bot = Agent(model, system=prompt)
124
  transaction_bot.graph.invoke({"messages": [message]})
125
  return {"message": "Transaction completed successfully"}
start.sh CHANGED
@@ -13,9 +13,9 @@ export CUDA_VISIBLE_DEVICES=-1
13
  ollama serve &
14
 
15
  # Pull the model if not already present
16
- echo "gemma3:12b will be download"
17
- if ! ollama list | grep -q "gemma3:12b"; then
18
- ollama pull gemma3:12b
19
  fi
20
 
21
  # Wait for Ollama to start up
@@ -30,6 +30,6 @@ while ! curl -s http://localhost:11434/api/tags >/dev/null; do
30
  fi
31
  done
32
 
33
- echo "Ollama is Ready - gemma3:12b is Loaded"
34
 
35
  python app.py
 
13
  ollama serve &
14
 
15
  # Pull the model if not already present
16
+ echo "gemma3:1b will be download"
17
+ if ! ollama list | grep -q "gemma3:1b"; then
18
+ ollama pull gemma3:1b
19
  fi
20
 
21
  # Wait for Ollama to start up
 
30
  fi
31
  done
32
 
33
+ echo "Ollama is Ready - gemma3:1b is Loaded"
34
 
35
  python app.py