forked from yaroslavyaroslav/OpenAI-sublime-text
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cacher.py
129 lines (107 loc) · 4.91 KB
/
cacher.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import sublime
import os
from . import jl_utility as jl
import json
from json.decoder import JSONDecodeError
from typing import List, Dict, Iterator, Any, Optional
class Cacher():
def __init__(self, name: str = '') -> None:
cache_dir = sublime.cache_path()
plugin_cache_dir = os.path.join(cache_dir, 'OpenAI completion')
if not os.path.exists(plugin_cache_dir):
os.makedirs(plugin_cache_dir)
# Create the file path to store the data
self.history_file = os.path.join(plugin_cache_dir, "{file_name}chat_history.jl".format(file_name=name + "_" if len(name) > 0 else ""))
self.current_model_file = os.path.join(plugin_cache_dir, "{file_name}current_assistant.json".format(file_name=name + "_" if len(name) > 0 else ""))
self.tokens_count_file = os.path.join(plugin_cache_dir, "{file_name}tokens_count.json".format(file_name=name + "_" if len(name) > 0 else ""))
def check_and_create(self, path: str):
if not os.path.isfile(path):
open(path, 'w').close()
def append_tokens_count(self, data: Dict[str, int]):
try:
with open(self.tokens_count_file, 'r') as file:
existing_data: Dict[str, int] = json.load(file)
except (FileNotFoundError, JSONDecodeError):
existing_data = {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
for key, value in data.items():
if key in existing_data:
existing_data[key] += value
else:
existing_data[key] = value
with open(self.tokens_count_file, 'w') as file:
json.dump(existing_data, file)
def reset_tokens_count(self):
with open(self.tokens_count_file, 'w') as _:
pass # Truncate the file by opening it in 'w' mode and doing nothing
def read_tokens_count(self) -> Optional[Dict[str, int]]:
self.check_and_create(self.tokens_count_file)
with open(self.tokens_count_file, 'r') as file:
try:
data: Optional[Dict[str, int]] = json.load(file)
except JSONDecodeError:
data = {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
return data
return data
def save_model(self, data: Dict[str, Any]):
with open(self.current_model_file, 'w') as file:
json.dump(data, file)
def read_model(self) -> Optional[Dict[str, Any]]:
self.check_and_create(self.current_model_file)
with open(self.current_model_file, 'r') as file:
try:
data: Optional[Dict[str, Any]] = json.load(file)
except JSONDecodeError:
# TODO: Handle this state, but keep in mind
# that it's completely legal to being a file empty for some (yet unspecified) state
print('Empty file I belive')
return None
return data
def read_all(self) -> List[Dict[str, str]]:
self.check_and_create(self.history_file)
json_objects: List[Dict[str, str]] = []
reader: Iterator[Dict[str, str]] = jl.reader(self.history_file)
for json_object in reader:
json_objects.append(json_object)
return json_objects
def read_last(self, number: int) -> List[Dict[str, str]]:
self.check_and_create(self.history_file)
json_objects: List[Dict[str, str]] = []
with open(self.history_file, 'r', encoding='utf-8') as file:
lines = file.readlines()
last_n_lines = lines[-number:] # Get the last n lines
for line in last_n_lines:
try:
json_object: Dict[str, str] = json.loads(line)
json_objects.append(json_object)
except json.JSONDecodeError:
# FIXME: raise an error here that should be handled somewhere on top of the file
print('Error decoding JSON from line:', line)
pass
return json_objects
def append_to_cache(self, cache_lines: List[Dict[str, str]]):
# Create a new JSON Lines writer for output.jl
writer = jl.writer(self.history_file)
next(writer)
for line in cache_lines:
writer.send(line)
def drop_first(self, number = 4):
self.check_and_create(self.history_file)
# Read all lines from the JSON Lines file
with open(self.history_file, 'r') as file:
lines = file.readlines()
# Remove the specified number of lines from the beginning
lines = lines[number:]
# Write the remaining lines back to the cache file
with open(self.history_file, 'w') as file:
file.writelines(lines)
def drop_all(self):
with open(self.history_file, 'w') as _:
pass # Truncate the file by opening it in 'w' mode and doing nothing