Skip to content

Commit

Permalink
Assistant startup fix
Browse files Browse the repository at this point in the history
  • Loading branch information
artitw committed Sep 22, 2024
1 parent 3f0774d commit fb043a7
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 7 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

setuptools.setup(
name="text2text",
version="1.4.9",
version="1.5.0",
author="artitw",
author_email="[email protected]",
description="Text2Text: Crosslingual NLP/G toolkit",
Expand Down
14 changes: 8 additions & 6 deletions text2text/assistant.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os
import ollama
import psutil
import time
import subprocess

from llama_index.llms.ollama import Ollama
from llama_index.core.llms import ChatMessage

Expand All @@ -21,15 +22,16 @@ def __init__(self, **kwargs):
return_code = os.system("curl -fsSL https://ollama.com/install.sh | sh")
if return_code != 0:
print("Cannot install ollama.")
return_code = os.system("sudo systemctl enable ollama")
self.load_model()
self.client = ollama.Client(host=self.model_url)

def load_model(self):
return_code = os.system("sudo service ollama stop")
return_code = os.system(f"ollama serve & ollama pull {self.model_name}")
time.sleep(5.0)
if return_code != 0:
print(f"{self.model_name} is not loading up. Restarting and trying again might help. Maybe needs more memory.")
sub = subprocess.Popen(
f"ollama serve & ollama pull {self.model_name}",
shell=True,
stdout=subprocess.PIPE
)

def chat_completion(self, messages=[{"role": "user", "content": "hello"}], stream=False, schema=None, **kwargs):
if is_port_in_use(self.port):
Expand Down

0 comments on commit fb043a7

Please sign in to comment.