From 0bb7569d4640095adf29bf6cf7bb701f36931f8c Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Thu, 12 Sep 2024 11:28:14 +0800 Subject: [PATCH 01/25] fix: markdown paragraph margin (#8289) --- web/app/components/base/markdown.tsx | 4 ++-- web/app/styles/markdown.scss | 12 +++--------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/web/app/components/base/markdown.tsx b/web/app/components/base/markdown.tsx index 11bcd84e1825e..d4e7dac4ae5d9 100644 --- a/web/app/components/base/markdown.tsx +++ b/web/app/components/base/markdown.tsx @@ -198,11 +198,11 @@ const Paragraph = (paragraph: any) => { return ( <> -
{paragraph.children.slice(1)}
+

{paragraph.children.slice(1)}

) } - return
{paragraph.children}
+ return

{paragraph.children}

} const Img = ({ src }: any) => { diff --git a/web/app/styles/markdown.scss b/web/app/styles/markdown.scss index b0a1f60cd2175..214d8d2782605 100644 --- a/web/app/styles/markdown.scss +++ b/web/app/styles/markdown.scss @@ -321,18 +321,12 @@ .markdown-body h4, .markdown-body h5, .markdown-body h6 { - margin-top: 24px; - margin-bottom: 16px; + padding-top: 12px; + margin-bottom: 12px; font-weight: var(--base-text-weight-semibold, 600); line-height: 1.25; } - -.markdown-body p { - margin-top: 0; - margin-bottom: 10px; -} - .markdown-body blockquote { margin: 0; padding: 0 8px; @@ -449,7 +443,7 @@ .markdown-body pre, .markdown-body details { margin-top: 0; - margin-bottom: 16px; + margin-bottom: 12px; } .markdown-body blockquote> :first-child { From 0f1487325531bf4d8b38225218a91f06624dcc35 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Thu, 12 Sep 2024 12:55:45 +0800 Subject: [PATCH 02/25] chore: cleanup ruff flake8-simplify linter rules (#8286) Co-authored-by: -LAN- --- .../based_generate_task_pipeline.py | 2 +- .../baichuan/llm/baichuan_turbo.py | 4 +- .../model_providers/google/llm/llm.py | 4 +- .../model_providers/oci/llm/llm.py | 4 +- .../model_providers/tongyi/llm/llm.py | 4 +- .../model_providers/vertex_ai/llm/llm.py | 4 +- .../model_providers/xinference/llm/llm.py | 6 +-- .../model_providers/zhipuai/llm/llm.py | 11 ++--- api/core/moderation/keywords/keywords.py | 13 ++--- api/core/ops/ops_trace_manager.py | 2 +- .../rag/datasource/vdb/relyt/relyt_vector.py | 48 +++++++++---------- .../datasource/vdb/tencent/tencent_vector.py | 5 +- .../datasource/vdb/tidb_vector/tidb_vector.py | 34 +++++++------ api/core/rag/extractor/word_extractor.py | 3 +- api/core/rag/rerank/weight_rerank.py | 4 +- api/core/rag/retrieval/dataset_retrieval.py | 4 +- .../hap/tools/list_worksheet_records.py | 4 +- .../novitaai/tools/novitaai_modelquery.py | 2 +- .../builtin/qrcode/tools/qrcode_generator.py | 2 +- .../builtin/searchapi/tools/google.py | 20 ++++---- .../builtin/searchapi/tools/google_jobs.py | 6 +-- .../builtin/searchapi/tools/google_news.py | 10 ++-- .../searchapi/tools/youtube_transcripts.py | 4 +- .../builtin/stability/tools/text2image.py | 2 +- .../stablediffusion/tools/stable_diffusion.py | 7 ++- .../wikipedia/tools/wikipedia_search.py | 2 +- api/core/tools/utils/message_transformer.py | 4 +- .../workflow/graph_engine/entities/graph.py | 4 +- .../answer/answer_stream_generate_router.py | 2 +- .../nodes/end/end_stream_generate_router.py | 2 +- api/core/workflow/nodes/tool/entities.py | 2 +- api/pyproject.toml | 15 ++++-- api/services/file_service.py | 4 +- api/services/ops_service.py | 4 +- 34 files changed, 110 insertions(+), 138 deletions(-) diff --git a/api/core/app/task_pipeline/based_generate_task_pipeline.py b/api/core/app/task_pipeline/based_generate_task_pipeline.py index 49f58af12c9f7..a43be5fdf291a 100644 --- a/api/core/app/task_pipeline/based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/based_generate_task_pipeline.py @@ -65,7 +65,7 @@ def _handle_error(self, event: QueueErrorEvent, message: Optional[Message] = Non if isinstance(e, InvokeAuthorizationError): err = InvokeAuthorizationError("Incorrect API key provided") - elif isinstance(e, InvokeError) or isinstance(e, ValueError): + elif isinstance(e, InvokeError | ValueError): err = e else: err = Exception(e.description if getattr(e, "description", None) is not None else str(e)) diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo.py b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo.py index 39f867118b464..d5fda73009bba 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo.py +++ b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo.py @@ -45,7 +45,7 @@ def _build_parameters( parameters: dict[str, Any], tools: Optional[list[PromptMessageTool]] = None, ) -> dict[str, Any]: - if model in self._model_mapping.keys(): + if model in self._model_mapping: # the LargeLanguageModel._code_block_mode_wrapper() method will remove the response_format of parameters. # we need to rename it to res_format to get its value if parameters.get("res_format") == "json_object": @@ -94,7 +94,7 @@ def generate( timeout: int, tools: Optional[list[PromptMessageTool]] = None, ) -> Union[Iterator, dict]: - if model in self._model_mapping.keys(): + if model in self._model_mapping: api_base = "https://api.baichuan-ai.com/v1/chat/completions" else: raise BadRequestError(f"Unknown model: {model}") diff --git a/api/core/model_runtime/model_providers/google/llm/llm.py b/api/core/model_runtime/model_providers/google/llm/llm.py index 274ff02095b10..307c15e1fdee8 100644 --- a/api/core/model_runtime/model_providers/google/llm/llm.py +++ b/api/core/model_runtime/model_providers/google/llm/llm.py @@ -337,9 +337,7 @@ def _convert_one_message_to_text(self, message: PromptMessage) -> str: message_text = f"{human_prompt} {content}" elif isinstance(message, AssistantPromptMessage): message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, ToolPromptMessage): + elif isinstance(message, SystemPromptMessage | ToolPromptMessage): message_text = f"{human_prompt} {content}" else: raise ValueError(f"Got unknown type {message}") diff --git a/api/core/model_runtime/model_providers/oci/llm/llm.py b/api/core/model_runtime/model_providers/oci/llm/llm.py index ad5197a154c28..51b634c6cf8b0 100644 --- a/api/core/model_runtime/model_providers/oci/llm/llm.py +++ b/api/core/model_runtime/model_providers/oci/llm/llm.py @@ -442,9 +442,7 @@ def _convert_one_message_to_text(self, message: PromptMessage) -> str: message_text = f"{human_prompt} {content}" elif isinstance(message, AssistantPromptMessage): message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, ToolPromptMessage): + elif isinstance(message, SystemPromptMessage | ToolPromptMessage): message_text = f"{human_prompt} {content}" else: raise ValueError(f"Got unknown type {message}") diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index 72c319d395657..db0b2deaa5ab2 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -350,9 +350,7 @@ def _convert_one_message_to_text(self, message: PromptMessage) -> str: break elif isinstance(message, AssistantPromptMessage): message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = content - elif isinstance(message, ToolPromptMessage): + elif isinstance(message, SystemPromptMessage | ToolPromptMessage): message_text = content else: raise ValueError(f"Got unknown type {message}") diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py index 09a7f53f2816d..110028a288a7d 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py @@ -633,9 +633,7 @@ def _convert_one_message_to_text(self, message: PromptMessage) -> str: message_text = f"{human_prompt} {content}" elif isinstance(message, AssistantPromptMessage): message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, ToolPromptMessage): + elif isinstance(message, SystemPromptMessage | ToolPromptMessage): message_text = f"{human_prompt} {content}" else: raise ValueError(f"Got unknown type {message}") diff --git a/api/core/model_runtime/model_providers/xinference/llm/llm.py b/api/core/model_runtime/model_providers/xinference/llm/llm.py index b2c837dee100e..bc7531ee20187 100644 --- a/api/core/model_runtime/model_providers/xinference/llm/llm.py +++ b/api/core/model_runtime/model_providers/xinference/llm/llm.py @@ -272,11 +272,7 @@ def _convert_prompt_message_to_text(self, message: list[PromptMessage]) -> str: """ text = "" for item in message: - if isinstance(item, UserPromptMessage): - text += item.content - elif isinstance(item, SystemPromptMessage): - text += item.content - elif isinstance(item, AssistantPromptMessage): + if isinstance(item, UserPromptMessage | SystemPromptMessage | AssistantPromptMessage): text += item.content else: raise NotImplementedError(f"PromptMessage type {type(item)} is not supported") diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py index 484ac088dbc0c..498962bd0f8ac 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py +++ b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py @@ -209,9 +209,10 @@ def _generate( ): new_prompt_messages[-1].content += "\n\n" + copy_prompt_message.content else: - if copy_prompt_message.role == PromptMessageRole.USER: - new_prompt_messages.append(copy_prompt_message) - elif copy_prompt_message.role == PromptMessageRole.TOOL: + if ( + copy_prompt_message.role == PromptMessageRole.USER + or copy_prompt_message.role == PromptMessageRole.TOOL + ): new_prompt_messages.append(copy_prompt_message) elif copy_prompt_message.role == PromptMessageRole.SYSTEM: new_prompt_message = SystemPromptMessage(content=copy_prompt_message.content) @@ -461,9 +462,7 @@ def _convert_one_message_to_text(self, message: PromptMessage) -> str: message_text = f"{human_prompt} {content}" elif isinstance(message, AssistantPromptMessage): message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = content - elif isinstance(message, ToolPromptMessage): + elif isinstance(message, SystemPromptMessage | ToolPromptMessage): message_text = content else: raise ValueError(f"Got unknown type {message}") diff --git a/api/core/moderation/keywords/keywords.py b/api/core/moderation/keywords/keywords.py index 17e48b8fbe576..dc6a7ec5644a7 100644 --- a/api/core/moderation/keywords/keywords.py +++ b/api/core/moderation/keywords/keywords.py @@ -56,14 +56,7 @@ def moderation_for_outputs(self, text: str) -> ModerationOutputsResult: ) def _is_violated(self, inputs: dict, keywords_list: list) -> bool: - for value in inputs.values(): - if self._check_keywords_in_value(keywords_list, value): - return True + return any(self._check_keywords_in_value(keywords_list, value) for value in inputs.values()) - return False - - def _check_keywords_in_value(self, keywords_list, value): - for keyword in keywords_list: - if keyword.lower() in value.lower(): - return True - return False + def _check_keywords_in_value(self, keywords_list, value) -> bool: + return any(keyword.lower() in value.lower() for keyword in keywords_list) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index d6156e479af16..68fcdf32da66d 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -223,7 +223,7 @@ def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: :return: """ # auth check - if tracing_provider not in provider_config_map.keys() and tracing_provider is not None: + if tracing_provider not in provider_config_map and tracing_provider is not None: raise ValueError(f"Invalid tracing provider: {tracing_provider}") app_config: App = db.session.query(App).filter(App.id == app_id).first() diff --git a/api/core/rag/datasource/vdb/relyt/relyt_vector.py b/api/core/rag/datasource/vdb/relyt/relyt_vector.py index 0c9d3b343d3c1..54290eaa5d212 100644 --- a/api/core/rag/datasource/vdb/relyt/relyt_vector.py +++ b/api/core/rag/datasource/vdb/relyt/relyt_vector.py @@ -127,27 +127,26 @@ def add_texts(self, documents: list[Document], embeddings: list[list[float]], ** ) chunks_table_data = [] - with self.client.connect() as conn: - with conn.begin(): - for document, metadata, chunk_id, embedding in zip(texts, metadatas, ids, embeddings): - chunks_table_data.append( - { - "id": chunk_id, - "embedding": embedding, - "document": document, - "metadata": metadata, - } - ) - - # Execute the batch insert when the batch size is reached - if len(chunks_table_data) == 500: - conn.execute(insert(chunks_table).values(chunks_table_data)) - # Clear the chunks_table_data list for the next batch - chunks_table_data.clear() - - # Insert any remaining records that didn't make up a full batch - if chunks_table_data: + with self.client.connect() as conn, conn.begin(): + for document, metadata, chunk_id, embedding in zip(texts, metadatas, ids, embeddings): + chunks_table_data.append( + { + "id": chunk_id, + "embedding": embedding, + "document": document, + "metadata": metadata, + } + ) + + # Execute the batch insert when the batch size is reached + if len(chunks_table_data) == 500: conn.execute(insert(chunks_table).values(chunks_table_data)) + # Clear the chunks_table_data list for the next batch + chunks_table_data.clear() + + # Insert any remaining records that didn't make up a full batch + if chunks_table_data: + conn.execute(insert(chunks_table).values(chunks_table_data)) return ids @@ -186,11 +185,10 @@ def delete_by_uuids(self, ids: list[str] = None): ) try: - with self.client.connect() as conn: - with conn.begin(): - delete_condition = chunks_table.c.id.in_(ids) - conn.execute(chunks_table.delete().where(delete_condition)) - return True + with self.client.connect() as conn, conn.begin(): + delete_condition = chunks_table.c.id.in_(ids) + conn.execute(chunks_table.delete().where(delete_condition)) + return True except Exception as e: print("Delete operation failed:", str(e)) return False diff --git a/api/core/rag/datasource/vdb/tencent/tencent_vector.py b/api/core/rag/datasource/vdb/tencent/tencent_vector.py index ada0c5cf460f4..dbedc1d4e9a22 100644 --- a/api/core/rag/datasource/vdb/tencent/tencent_vector.py +++ b/api/core/rag/datasource/vdb/tencent/tencent_vector.py @@ -63,10 +63,7 @@ def to_index_struct(self) -> dict: def _has_collection(self) -> bool: collections = self._db.list_collections() - for collection in collections: - if collection.collection_name == self._collection_name: - return True - return False + return any(collection.collection_name == self._collection_name for collection in collections) def _create_collection(self, dimension: int) -> None: lock_name = "vector_indexing_lock_{}".format(self._collection_name) diff --git a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py index e1ac9d596c314..7eaf189292da1 100644 --- a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py +++ b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py @@ -124,20 +124,19 @@ def add_texts(self, documents: list[Document], embeddings: list[list[float]], ** texts = [d.page_content for d in documents] chunks_table_data = [] - with self._engine.connect() as conn: - with conn.begin(): - for id, text, meta, embedding in zip(ids, texts, metas, embeddings): - chunks_table_data.append({"id": id, "vector": embedding, "text": text, "meta": meta}) - - # Execute the batch insert when the batch size is reached - if len(chunks_table_data) == 500: - conn.execute(insert(table).values(chunks_table_data)) - # Clear the chunks_table_data list for the next batch - chunks_table_data.clear() - - # Insert any remaining records that didn't make up a full batch - if chunks_table_data: + with self._engine.connect() as conn, conn.begin(): + for id, text, meta, embedding in zip(ids, texts, metas, embeddings): + chunks_table_data.append({"id": id, "vector": embedding, "text": text, "meta": meta}) + + # Execute the batch insert when the batch size is reached + if len(chunks_table_data) == 500: conn.execute(insert(table).values(chunks_table_data)) + # Clear the chunks_table_data list for the next batch + chunks_table_data.clear() + + # Insert any remaining records that didn't make up a full batch + if chunks_table_data: + conn.execute(insert(table).values(chunks_table_data)) return ids def text_exists(self, id: str) -> bool: @@ -160,11 +159,10 @@ def _delete_by_ids(self, ids: list[str]) -> bool: raise ValueError("No ids provided to delete.") table = self._table(self._dimension) try: - with self._engine.connect() as conn: - with conn.begin(): - delete_condition = table.c.id.in_(ids) - conn.execute(table.delete().where(delete_condition)) - return True + with self._engine.connect() as conn, conn.begin(): + delete_condition = table.c.id.in_(ids) + conn.execute(table.delete().where(delete_condition)) + return True except Exception as e: print("Delete operation failed:", str(e)) return False diff --git a/api/core/rag/extractor/word_extractor.py b/api/core/rag/extractor/word_extractor.py index 2db00d161b366..c6f15e55b65d0 100644 --- a/api/core/rag/extractor/word_extractor.py +++ b/api/core/rag/extractor/word_extractor.py @@ -48,7 +48,8 @@ def __init__(self, file_path: str, tenant_id: str, user_id: str): raise ValueError(f"Check the url of your file; returned status code {r.status_code}") self.web_path = self.file_path - self.temp_file = tempfile.NamedTemporaryFile() + # TODO: use a better way to handle the file + self.temp_file = tempfile.NamedTemporaryFile() # noqa: SIM115 self.temp_file.write(r.content) self.file_path = self.temp_file.name elif not os.path.isfile(self.file_path): diff --git a/api/core/rag/rerank/weight_rerank.py b/api/core/rag/rerank/weight_rerank.py index 4375079ee5277..16d6b879a4bea 100644 --- a/api/core/rag/rerank/weight_rerank.py +++ b/api/core/rag/rerank/weight_rerank.py @@ -120,8 +120,8 @@ def cosine_similarity(vec1, vec2): intersection = set(vec1.keys()) & set(vec2.keys()) numerator = sum(vec1[x] * vec2[x] for x in intersection) - sum1 = sum(vec1[x] ** 2 for x in vec1.keys()) - sum2 = sum(vec2[x] ** 2 for x in vec2.keys()) + sum1 = sum(vec1[x] ** 2 for x in vec1) + sum2 = sum(vec2[x] ** 2 for x in vec2) denominator = math.sqrt(sum1) * math.sqrt(sum2) if not denominator: diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index 4948ec6ba87c2..e4ad78ed2b345 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -581,8 +581,8 @@ def cosine_similarity(vec1, vec2): intersection = set(vec1.keys()) & set(vec2.keys()) numerator = sum(vec1[x] * vec2[x] for x in intersection) - sum1 = sum(vec1[x] ** 2 for x in vec1.keys()) - sum2 = sum(vec2[x] ** 2 for x in vec2.keys()) + sum1 = sum(vec1[x] ** 2 for x in vec1) + sum2 = sum(vec2[x] ** 2 for x in vec2) denominator = math.sqrt(sum1) * math.sqrt(sum2) if not denominator: diff --git a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py index 592fa230cf4d6..71f8356ab81c4 100644 --- a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py +++ b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py @@ -201,9 +201,7 @@ def process_value(self, value): elif value.startswith('[{"organizeId"'): value = json.loads(value) value = "、".join([item["organizeName"] for item in value]) - elif value.startswith('[{"file_id"'): - value = "" - elif value == "[]": + elif value.startswith('[{"file_id"') or value == "[]": value = "" elif hasattr(value, "accountId"): value = value["fullname"] diff --git a/api/core/tools/provider/builtin/novitaai/tools/novitaai_modelquery.py b/api/core/tools/provider/builtin/novitaai/tools/novitaai_modelquery.py index fe105f70a7262..9ca14b327caae 100644 --- a/api/core/tools/provider/builtin/novitaai/tools/novitaai_modelquery.py +++ b/api/core/tools/provider/builtin/novitaai/tools/novitaai_modelquery.py @@ -35,7 +35,7 @@ def _invoke( models_data=[], headers=headers, params=params, - recursive=False if result_type == "first sd_name" or result_type == "first name sd_name pair" else True, + recursive=not (result_type == "first sd_name" or result_type == "first name sd_name pair"), ) result_str = "" diff --git a/api/core/tools/provider/builtin/qrcode/tools/qrcode_generator.py b/api/core/tools/provider/builtin/qrcode/tools/qrcode_generator.py index cac59f76d8de5..d8ca20bde6ffc 100644 --- a/api/core/tools/provider/builtin/qrcode/tools/qrcode_generator.py +++ b/api/core/tools/provider/builtin/qrcode/tools/qrcode_generator.py @@ -39,7 +39,7 @@ def _invoke( # get error_correction error_correction = tool_parameters.get("error_correction", "") - if error_correction not in self.error_correction_levels.keys(): + if error_correction not in self.error_correction_levels: return self.create_text_message("Invalid parameter error_correction") try: diff --git a/api/core/tools/provider/builtin/searchapi/tools/google.py b/api/core/tools/provider/builtin/searchapi/tools/google.py index d632304a46bba..6d88d74635e85 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/google.py +++ b/api/core/tools/provider/builtin/searchapi/tools/google.py @@ -44,36 +44,36 @@ def get_params(self, query: str, **kwargs: Any) -> dict[str, str]: @staticmethod def _process_response(res: dict, type: str) -> str: """Process response from SearchAPI.""" - if "error" in res.keys(): + if "error" in res: raise ValueError(f"Got error from SearchApi: {res['error']}") toret = "" if type == "text": - if "answer_box" in res.keys() and "answer" in res["answer_box"].keys(): + if "answer_box" in res and "answer" in res["answer_box"]: toret += res["answer_box"]["answer"] + "\n" - if "answer_box" in res.keys() and "snippet" in res["answer_box"].keys(): + if "answer_box" in res and "snippet" in res["answer_box"]: toret += res["answer_box"]["snippet"] + "\n" - if "knowledge_graph" in res.keys() and "description" in res["knowledge_graph"].keys(): + if "knowledge_graph" in res and "description" in res["knowledge_graph"]: toret += res["knowledge_graph"]["description"] + "\n" - if "organic_results" in res.keys() and "snippet" in res["organic_results"][0].keys(): + if "organic_results" in res and "snippet" in res["organic_results"][0]: for item in res["organic_results"]: toret += "content: " + item["snippet"] + "\n" + "link: " + item["link"] + "\n" if toret == "": toret = "No good search result found" elif type == "link": - if "answer_box" in res.keys() and "organic_result" in res["answer_box"].keys(): - if "title" in res["answer_box"]["organic_result"].keys(): + if "answer_box" in res and "organic_result" in res["answer_box"]: + if "title" in res["answer_box"]["organic_result"]: toret = f"[{res['answer_box']['organic_result']['title']}]({res['answer_box']['organic_result']['link']})\n" - elif "organic_results" in res.keys() and "link" in res["organic_results"][0].keys(): + elif "organic_results" in res and "link" in res["organic_results"][0]: toret = "" for item in res["organic_results"]: toret += f"[{item['title']}]({item['link']})\n" - elif "related_questions" in res.keys() and "link" in res["related_questions"][0].keys(): + elif "related_questions" in res and "link" in res["related_questions"][0]: toret = "" for item in res["related_questions"]: toret += f"[{item['title']}]({item['link']})\n" - elif "related_searches" in res.keys() and "link" in res["related_searches"][0].keys(): + elif "related_searches" in res and "link" in res["related_searches"][0]: toret = "" for item in res["related_searches"]: toret += f"[{item['title']}]({item['link']})\n" diff --git a/api/core/tools/provider/builtin/searchapi/tools/google_jobs.py b/api/core/tools/provider/builtin/searchapi/tools/google_jobs.py index 1544061c08f65..d29cb0ae3f1cb 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/google_jobs.py +++ b/api/core/tools/provider/builtin/searchapi/tools/google_jobs.py @@ -44,12 +44,12 @@ def get_params(self, query: str, **kwargs: Any) -> dict[str, str]: @staticmethod def _process_response(res: dict, type: str) -> str: """Process response from SearchAPI.""" - if "error" in res.keys(): + if "error" in res: raise ValueError(f"Got error from SearchApi: {res['error']}") toret = "" if type == "text": - if "jobs" in res.keys() and "title" in res["jobs"][0].keys(): + if "jobs" in res and "title" in res["jobs"][0]: for item in res["jobs"]: toret += ( "title: " @@ -65,7 +65,7 @@ def _process_response(res: dict, type: str) -> str: toret = "No good search result found" elif type == "link": - if "jobs" in res.keys() and "apply_link" in res["jobs"][0].keys(): + if "jobs" in res and "apply_link" in res["jobs"][0]: for item in res["jobs"]: toret += f"[{item['title']} - {item['company_name']}]({item['apply_link']})\n" else: diff --git a/api/core/tools/provider/builtin/searchapi/tools/google_news.py b/api/core/tools/provider/builtin/searchapi/tools/google_news.py index 95a7aad7365f8..8458c8c958e9e 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/google_news.py +++ b/api/core/tools/provider/builtin/searchapi/tools/google_news.py @@ -44,25 +44,25 @@ def get_params(self, query: str, **kwargs: Any) -> dict[str, str]: @staticmethod def _process_response(res: dict, type: str) -> str: """Process response from SearchAPI.""" - if "error" in res.keys(): + if "error" in res: raise ValueError(f"Got error from SearchApi: {res['error']}") toret = "" if type == "text": - if "organic_results" in res.keys() and "snippet" in res["organic_results"][0].keys(): + if "organic_results" in res and "snippet" in res["organic_results"][0]: for item in res["organic_results"]: toret += "content: " + item["snippet"] + "\n" + "link: " + item["link"] + "\n" - if "top_stories" in res.keys() and "title" in res["top_stories"][0].keys(): + if "top_stories" in res and "title" in res["top_stories"][0]: for item in res["top_stories"]: toret += "title: " + item["title"] + "\n" + "link: " + item["link"] + "\n" if toret == "": toret = "No good search result found" elif type == "link": - if "organic_results" in res.keys() and "title" in res["organic_results"][0].keys(): + if "organic_results" in res and "title" in res["organic_results"][0]: for item in res["organic_results"]: toret += f"[{item['title']}]({item['link']})\n" - elif "top_stories" in res.keys() and "title" in res["top_stories"][0].keys(): + elif "top_stories" in res and "title" in res["top_stories"][0]: for item in res["top_stories"]: toret += f"[{item['title']}]({item['link']})\n" else: diff --git a/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py b/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py index 88def504fca12..d7bfb53bd70eb 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py +++ b/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py @@ -44,11 +44,11 @@ def get_params(self, video_id: str, language: str, **kwargs: Any) -> dict[str, s @staticmethod def _process_response(res: dict) -> str: """Process response from SearchAPI.""" - if "error" in res.keys(): + if "error" in res: raise ValueError(f"Got error from SearchApi: {res['error']}") toret = "" - if "transcripts" in res.keys() and "text" in res["transcripts"][0].keys(): + if "transcripts" in res and "text" in res["transcripts"][0]: for item in res["transcripts"]: toret += item["text"] + " " if toret == "": diff --git a/api/core/tools/provider/builtin/stability/tools/text2image.py b/api/core/tools/provider/builtin/stability/tools/text2image.py index 12b6cc33529fe..9f415ceb5509b 100644 --- a/api/core/tools/provider/builtin/stability/tools/text2image.py +++ b/api/core/tools/provider/builtin/stability/tools/text2image.py @@ -35,7 +35,7 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMe if model in ["sd3", "sd3-turbo"]: payload["model"] = tool_parameters.get("model") - if not model == "sd3-turbo": + if model != "sd3-turbo": payload["negative_prompt"] = tool_parameters.get("negative_prompt", "") response = post( diff --git a/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py b/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py index 46137886bda43..344f9164945f4 100644 --- a/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py +++ b/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py @@ -206,10 +206,9 @@ def img2img( # Convert image to RGB and save as PNG try: - with Image.open(io.BytesIO(image_binary)) as image: - with io.BytesIO() as buffer: - image.convert("RGB").save(buffer, format="PNG") - image_binary = buffer.getvalue() + with Image.open(io.BytesIO(image_binary)) as image, io.BytesIO() as buffer: + image.convert("RGB").save(buffer, format="PNG") + image_binary = buffer.getvalue() except Exception as e: return self.create_text_message(f"Failed to process the image: {str(e)}") diff --git a/api/core/tools/provider/builtin/wikipedia/tools/wikipedia_search.py b/api/core/tools/provider/builtin/wikipedia/tools/wikipedia_search.py index 67efcf0954739..cb88e9519a434 100644 --- a/api/core/tools/provider/builtin/wikipedia/tools/wikipedia_search.py +++ b/api/core/tools/provider/builtin/wikipedia/tools/wikipedia_search.py @@ -27,7 +27,7 @@ def __init__(self, doc_content_chars_max: int = 4000): self.doc_content_chars_max = doc_content_chars_max def run(self, query: str, lang: str = "") -> str: - if lang in wikipedia.languages().keys(): + if lang in wikipedia.languages(): self.lang = lang wikipedia.set_lang(self.lang) diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py index c4983ebc65ea8..1109ed7df2f8c 100644 --- a/api/core/tools/utils/message_transformer.py +++ b/api/core/tools/utils/message_transformer.py @@ -19,9 +19,7 @@ def transform_tool_invoke_messages( result = [] for message in messages: - if message.type == ToolInvokeMessage.MessageType.TEXT: - result.append(message) - elif message.type == ToolInvokeMessage.MessageType.LINK: + if message.type == ToolInvokeMessage.MessageType.TEXT or message.type == ToolInvokeMessage.MessageType.LINK: result.append(message) elif message.type == ToolInvokeMessage.MessageType.IMAGE: # try to download image diff --git a/api/core/workflow/graph_engine/entities/graph.py b/api/core/workflow/graph_engine/entities/graph.py index f1f677b8c1b77..c156dd8c98646 100644 --- a/api/core/workflow/graph_engine/entities/graph.py +++ b/api/core/workflow/graph_engine/entities/graph.py @@ -224,9 +224,7 @@ def get_leaf_node_ids(self) -> list[str]: """ leaf_node_ids = [] for node_id in self.node_ids: - if node_id not in self.edge_mapping: - leaf_node_ids.append(node_id) - elif ( + if node_id not in self.edge_mapping or ( len(self.edge_mapping[node_id]) == 1 and self.edge_mapping[node_id][0].target_node_id == self.root_node_id ): diff --git a/api/core/workflow/nodes/answer/answer_stream_generate_router.py b/api/core/workflow/nodes/answer/answer_stream_generate_router.py index 06050e15490cb..e31a1479a8f41 100644 --- a/api/core/workflow/nodes/answer/answer_stream_generate_router.py +++ b/api/core/workflow/nodes/answer/answer_stream_generate_router.py @@ -24,7 +24,7 @@ def init( # parse stream output node value selectors of answer nodes answer_generate_route: dict[str, list[GenerateRouteChunk]] = {} for answer_node_id, node_config in node_id_config_mapping.items(): - if not node_config.get("data", {}).get("type") == NodeType.ANSWER.value: + if node_config.get("data", {}).get("type") != NodeType.ANSWER.value: continue # get generate route for stream output diff --git a/api/core/workflow/nodes/end/end_stream_generate_router.py b/api/core/workflow/nodes/end/end_stream_generate_router.py index 30ce8fe018cef..a38d98239385d 100644 --- a/api/core/workflow/nodes/end/end_stream_generate_router.py +++ b/api/core/workflow/nodes/end/end_stream_generate_router.py @@ -17,7 +17,7 @@ def init( # parse stream output node value selector of end nodes end_stream_variable_selectors_mapping: dict[str, list[list[str]]] = {} for end_node_id, node_config in node_id_config_mapping.items(): - if not node_config.get("data", {}).get("type") == NodeType.END.value: + if node_config.get("data", {}).get("type") != NodeType.END.value: continue # skip end node in parallel diff --git a/api/core/workflow/nodes/tool/entities.py b/api/core/workflow/nodes/tool/entities.py index 28fbf789fdd68..9d222b10b98c2 100644 --- a/api/core/workflow/nodes/tool/entities.py +++ b/api/core/workflow/nodes/tool/entities.py @@ -20,7 +20,7 @@ def validate_tool_configurations(cls, value, values: ValidationInfo): if not isinstance(value, dict): raise ValueError("tool_configurations must be a dictionary") - for key in values.data.get("tool_configurations", {}).keys(): + for key in values.data.get("tool_configurations", {}): value = values.data.get("tool_configurations", {}).get(key) if not isinstance(value, str | int | float | bool): raise ValueError(f"{key} must be a string") diff --git a/api/pyproject.toml b/api/pyproject.toml index 23e2b5c549aee..3d100ebc58163 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -17,14 +17,12 @@ select = [ "F", # pyflakes rules "I", # isort rules "N", # pep8-naming - "UP", # pyupgrade rules "RUF019", # unnecessary-key-check "RUF100", # unused-noqa "RUF101", # redirected-noqa "S506", # unsafe-yaml-load - "SIM116", # if-else-block-instead-of-dict-lookup - "SIM401", # if-else-block-instead-of-dict-get - "SIM910", # dict-get-with-none-default + "SIM", # flake8-simplify rules + "UP", # pyupgrade rules "W191", # tab-indentation "W605", # invalid-escape-sequence ] @@ -50,6 +48,15 @@ ignore = [ "B905", # zip-without-explicit-strict "N806", # non-lowercase-variable-in-function "N815", # mixed-case-variable-in-class-scope + "SIM102", # collapsible-if + "SIM103", # needless-bool + "SIM105", # suppressible-exception + "SIM107", # return-in-try-except-finally + "SIM108", # if-else-block-instead-of-if-exp + "SIM113", # eumerate-for-loop + "SIM117", # multiple-with-statements + "SIM210", # if-expr-with-true-false + "SIM300", # yoda-conditions ] [tool.ruff.lint.per-file-ignores] diff --git a/api/services/file_service.py b/api/services/file_service.py index 5780abb2beee5..bedec763343b5 100644 --- a/api/services/file_service.py +++ b/api/services/file_service.py @@ -56,9 +56,7 @@ def upload_file(file: FileStorage, user: Union[Account, EndUser], only_image: bo if etl_type == "Unstructured" else ALLOWED_EXTENSIONS + IMAGE_EXTENSIONS ) - if extension.lower() not in allowed_extensions: - raise UnsupportedFileTypeError() - elif only_image and extension.lower() not in IMAGE_EXTENSIONS: + if extension.lower() not in allowed_extensions or only_image and extension.lower() not in IMAGE_EXTENSIONS: raise UnsupportedFileTypeError() # read file content diff --git a/api/services/ops_service.py b/api/services/ops_service.py index 1e7935d299a0d..d8e2b1689ad53 100644 --- a/api/services/ops_service.py +++ b/api/services/ops_service.py @@ -54,7 +54,7 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ - if tracing_provider not in provider_config_map.keys() and tracing_provider: + if tracing_provider not in provider_config_map and tracing_provider: return {"error": f"Invalid tracing provider: {tracing_provider}"} config_class, other_keys = ( @@ -113,7 +113,7 @@ def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ - if tracing_provider not in provider_config_map.keys(): + if tracing_provider not in provider_config_map: raise ValueError(f"Invalid tracing provider: {tracing_provider}") # check if trace config already exists From 56c90e212a1b3df7a17a6f13ce60a5940bebed16 Mon Sep 17 00:00:00 2001 From: takatost Date: Thu, 12 Sep 2024 13:59:48 +0800 Subject: [PATCH 03/25] fix(workflow): missing content in the answer node stream output during iterations (#8292) Co-authored-by: -LAN- --- api/core/rag/extractor/word_extractor.py | 4 ++-- api/core/workflow/nodes/iteration/iteration_node.py | 7 ++++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/api/core/rag/extractor/word_extractor.py b/api/core/rag/extractor/word_extractor.py index c6f15e55b65d0..947644a90b19c 100644 --- a/api/core/rag/extractor/word_extractor.py +++ b/api/core/rag/extractor/word_extractor.py @@ -7,8 +7,8 @@ import re import tempfile import uuid -import xml.etree.ElementTree as ET from urllib.parse import urlparse +from xml.etree import ElementTree import requests from docx import Document as DocxDocument @@ -218,7 +218,7 @@ def parse_docx(self, docx_path, image_folder): hyperlinks_url = None if "HYPERLINK" in run.element.xml: try: - xml = ET.XML(run.element.xml) + xml = ElementTree.XML(run.element.xml) x_child = [c for c in xml.iter() if c is not None] for x in x_child: if x_child is None: diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index 77b14e36a19f8..4d944e93db200 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -16,6 +16,7 @@ IterationRunNextEvent, IterationRunStartedEvent, IterationRunSucceededEvent, + NodeRunStreamChunkEvent, NodeRunSucceededEvent, ) from core.workflow.graph_engine.entities.graph import Graph @@ -154,7 +155,11 @@ def _run(self) -> Generator[RunEvent | InNodeEvent, None, None]: if isinstance(event, (BaseNodeEvent | BaseParallelBranchEvent)) and not event.in_iteration_id: event.in_iteration_id = self.node_id - if isinstance(event, BaseNodeEvent) and event.node_type == NodeType.ITERATION_START: + if ( + isinstance(event, BaseNodeEvent) + and event.node_type == NodeType.ITERATION_START + and not isinstance(event, NodeRunStreamChunkEvent) + ): continue if isinstance(event, NodeRunSucceededEvent): From c69f5b07ba241e6ef08c7028c12b1125c93c1d01 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Thu, 12 Sep 2024 14:00:36 +0800 Subject: [PATCH 04/25] chore: apply ruff E501 line-too-long linter rule (#8275) Co-authored-by: -LAN- --- api/commands.py | 3 +- api/controllers/console/app/statistic.py | 170 +++++++++++------- .../console/app/workflow_statistic.py | 102 ++++++----- api/controllers/console/wraps.py | 3 +- api/controllers/web/wraps.py | 3 +- api/core/agent/prompt/template.py | 6 +- .../app/apps/workflow_logging_callback.py | 15 +- api/core/file/message_file_parser.py | 3 +- .../helper/code_executor/code_executor.py | 3 +- api/core/helper/tool_parameter_cache.py | 5 +- api/core/llm_generator/prompts.py | 19 +- api/core/model_runtime/entities/defaults.py | 16 +- .../model_providers/__base/ai_model.py | 4 +- .../__base/large_language_model.py | 5 +- .../model_providers/anthropic/llm/llm.py | 2 +- .../model_providers/azure_openai/_constant.py | 69 +++---- .../baichuan/llm/baichuan_tokenizer.py | 3 +- .../model_providers/bedrock/llm/llm.py | 6 +- .../model_providers/google/llm/llm.py | 2 +- .../huggingface_tei/tei_helper.py | 3 +- .../model_providers/hunyuan/llm/llm.py | 3 +- .../model_providers/nvidia/llm/llm.py | 3 +- .../model_providers/oci/llm/llm.py | 3 +- .../oci/text_embedding/text_embedding.py | 3 +- .../model_providers/ollama/llm/llm.py | 7 +- .../model_providers/openai/llm/llm.py | 2 +- .../openai_api_compatible/llm/llm.py | 6 +- .../model_providers/spark/llm/_client.py | 5 +- .../model_providers/upstage/llm/llm.py | 2 +- .../model_providers/vertex_ai/llm/llm.py | 6 +- .../model_providers/wenxin/llm/llm.py | 2 +- .../model_providers/xinference/llm/llm.py | 9 +- .../xinference/speech2text/speech2text.py | 10 +- .../xinference/xinference_helper.py | 6 +- .../model_providers/zhipuai/llm/llm.py | 2 +- .../zhipuai/zhipuai_sdk/core/_base_type.py | 3 +- .../schema_validators/common_validator.py | 3 +- .../advanced_prompt_templates.py | 8 +- .../rag/datasource/vdb/oracle/oraclevector.py | 6 +- .../rag/datasource/vdb/pgvector/pgvector.py | 3 +- api/core/rag/extractor/extract_processor.py | 5 +- .../router/multi_dataset_react_route.py | 9 +- api/core/tools/entities/tool_bundle.py | 3 +- api/core/tools/entities/values.py | 32 ++-- .../provider/builtin/aippt/tools/aippt.py | 3 +- .../builtin/arxiv/tools/arxiv_search.py | 3 +- .../builtin/aws/tools/apply_guardrail.py | 3 +- .../builtin/devdocs/tools/searchDevDocs.py | 3 +- .../builtin/gitlab/tools/gitlab_files.py | 5 +- .../google_translate/tools/translate.py | 3 +- .../builtin/hap/tools/get_worksheet_fields.py | 3 +- .../hap/tools/list_worksheet_records.py | 5 +- .../builtin/searchapi/tools/google.py | 5 +- .../stablediffusion/tools/stable_diffusion.py | 21 ++- .../trello/tools/create_list_on_board.py | 3 +- .../trello/tools/create_new_card_on_board.py | 3 +- .../builtin/trello/tools/delete_board.py | 3 +- .../builtin/trello/tools/delete_card.py | 3 +- .../builtin/trello/tools/get_board_actions.py | 3 +- .../builtin/trello/tools/get_board_by_id.py | 3 +- .../builtin/trello/tools/get_board_cards.py | 3 +- .../trello/tools/get_filterd_board_cards.py | 3 +- .../trello/tools/get_lists_on_board.py | 3 +- .../builtin/trello/tools/update_board.py | 3 +- .../builtin/trello/tools/update_card.py | 3 +- .../builtin/twilio/tools/send_message.py | 3 +- .../builtin/vectorizer/tools/test_data.py | 2 +- api/core/tools/tool_engine.py | 5 +- api/core/tools/utils/feishu_api_utils.py | 2 +- api/core/tools/utils/message_transformer.py | 2 +- api/core/tools/utils/parser.py | 6 +- api/core/tools/utils/web_reader_tool.py | 3 +- api/core/workflow/nodes/code/code_node.py | 18 +- .../knowledge_retrieval_node.py | 7 +- .../nodes/parameter_extractor/prompts.py | 12 +- .../question_classifier/template_prompts.py | 8 +- api/libs/gmpy2_pkcs10aep_cipher.py | 3 +- api/models/provider.py | 5 +- api/models/tools.py | 3 +- api/models/workflow.py | 3 +- api/poetry.lock | 2 +- api/pyproject.toml | 14 +- .../tools/api_tools_manage_service.py | 3 +- .../model_runtime/__mock/openai_embeddings.py | 2 +- .../core/app/segments/test_segment.py | 6 +- 85 files changed, 457 insertions(+), 322 deletions(-) diff --git a/api/commands.py b/api/commands.py index 3bf8bc0ecc45f..db96fbae461f4 100644 --- a/api/commands.py +++ b/api/commands.py @@ -411,7 +411,8 @@ def migrate_knowledge_vector_database(): try: click.echo( click.style( - f"Start to created vector index with {len(documents)} documents of {segments_count} segments for dataset {dataset.id}.", + f"Start to created vector index with {len(documents)} documents of {segments_count}" + f" segments for dataset {dataset.id}.", fg="green", ) ) diff --git a/api/controllers/console/app/statistic.py b/api/controllers/console/app/statistic.py index 4806b02b558df..3ef442812d5de 100644 --- a/api/controllers/console/app/statistic.py +++ b/api/controllers/console/app/statistic.py @@ -29,10 +29,13 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(*) AS message_count - FROM messages where app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(*) AS message_count +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -45,7 +48,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -55,10 +58,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -83,10 +86,13 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.conversation_id) AS conversation_count - FROM messages where app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(DISTINCT messages.conversation_id) AS conversation_count +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -99,7 +105,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -109,10 +115,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -137,10 +143,13 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.from_end_user_id) AS terminal_count - FROM messages where app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(DISTINCT messages.from_end_user_id) AS terminal_count +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -153,7 +162,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -163,10 +172,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -191,12 +200,14 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - (sum(messages.message_tokens) + sum(messages.answer_tokens)) as token_count, - sum(total_price) as total_price - FROM messages where app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + (SUM(messages.message_tokens) + SUM(messages.answer_tokens)) AS token_count, + SUM(total_price) AS total_price +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -209,7 +220,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -219,10 +230,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -249,12 +260,22 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """SELECT date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, -AVG(subquery.message_count) AS interactions -FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count - FROM conversations c - JOIN messages m ON c.id = m.conversation_id - WHERE c.override_model_configs IS NULL AND c.app_id = :app_id""" + sql_query = """SELECT + DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + AVG(subquery.message_count) AS interactions +FROM + ( + SELECT + m.conversation_id, + COUNT(m.id) AS message_count + FROM + conversations c + JOIN + messages m + ON c.id = m.conversation_id + WHERE + c.override_model_configs IS NULL + AND c.app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -267,7 +288,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and c.created_at >= :start" + sql_query += " AND c.created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -277,14 +298,19 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and c.created_at < :end" + sql_query += " AND c.created_at < :end" arg_dict["end"] = end_datetime_utc sql_query += """ - GROUP BY m.conversation_id) subquery -LEFT JOIN conversations c on c.id=subquery.conversation_id -GROUP BY date -ORDER BY date""" + GROUP BY m.conversation_id + ) subquery +LEFT JOIN + conversations c + ON c.id = subquery.conversation_id +GROUP BY + date +ORDER BY + date""" response_data = [] @@ -311,13 +337,17 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - COUNT(m.id) as message_count, COUNT(mf.id) as feedback_count - FROM messages m - LEFT JOIN message_feedbacks mf on mf.message_id=m.id and mf.rating='like' - WHERE m.app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(m.id) AS message_count, + COUNT(mf.id) AS feedback_count +FROM + messages m +LEFT JOIN + message_feedbacks mf + ON mf.message_id=m.id AND mf.rating='like' +WHERE + m.app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -330,7 +360,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and m.created_at >= :start" + sql_query += " AND m.created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -340,10 +370,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and m.created_at < :end" + sql_query += " AND m.created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -373,12 +403,13 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - AVG(provider_response_latency) as latency - FROM messages - WHERE app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + AVG(provider_response_latency) AS latency +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -391,7 +422,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -401,10 +432,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -429,13 +460,16 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - CASE + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + CASE WHEN SUM(provider_response_latency) = 0 THEN 0 ELSE (SUM(answer_tokens) / SUM(provider_response_latency)) END as tokens_per_second -FROM messages -WHERE app_id = :app_id""" +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -448,7 +482,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -458,10 +492,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] diff --git a/api/controllers/console/app/workflow_statistic.py b/api/controllers/console/app/workflow_statistic.py index 942271a634c21..c7e54f2be0c29 100644 --- a/api/controllers/console/app/workflow_statistic.py +++ b/api/controllers/console/app/workflow_statistic.py @@ -30,12 +30,14 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(id) AS runs - FROM workflow_runs - WHERE app_id = :app_id - AND triggered_from = :triggered_from - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(id) AS runs +FROM + workflow_runs +WHERE + app_id = :app_id + AND triggered_from = :triggered_from""" arg_dict = { "tz": account.timezone, "app_id": app_model.id, @@ -52,7 +54,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -62,10 +64,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -90,12 +92,14 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct workflow_runs.created_by) AS terminal_count - FROM workflow_runs - WHERE app_id = :app_id - AND triggered_from = :triggered_from - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(DISTINCT workflow_runs.created_by) AS terminal_count +FROM + workflow_runs +WHERE + app_id = :app_id + AND triggered_from = :triggered_from""" arg_dict = { "tz": account.timezone, "app_id": app_model.id, @@ -112,7 +116,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -122,10 +126,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -150,14 +154,14 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT - date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - SUM(workflow_runs.total_tokens) as token_count - FROM workflow_runs - WHERE app_id = :app_id - AND triggered_from = :triggered_from - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + SUM(workflow_runs.total_tokens) AS token_count +FROM + workflow_runs +WHERE + app_id = :app_id + AND triggered_from = :triggered_from""" arg_dict = { "tz": account.timezone, "app_id": app_model.id, @@ -174,7 +178,7 @@ def get(self, app_model): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -184,10 +188,10 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -217,23 +221,27 @@ def get(self, app_model): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT - AVG(sub.interactions) as interactions, - sub.date - FROM - (SELECT - date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - c.created_by, - COUNT(c.id) AS interactions - FROM workflow_runs c - WHERE c.app_id = :app_id - AND c.triggered_from = :triggered_from - {{start}} - {{end}} - GROUP BY date, c.created_by) sub - GROUP BY sub.date - """ + sql_query = """SELECT + AVG(sub.interactions) AS interactions, + sub.date +FROM + ( + SELECT + DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + c.created_by, + COUNT(c.id) AS interactions + FROM + workflow_runs c + WHERE + c.app_id = :app_id + AND c.triggered_from = :triggered_from + {{start}} + {{end}} + GROUP BY + date, c.created_by + ) sub +GROUP BY + sub.date""" arg_dict = { "tz": account.timezone, "app_id": app_model.id, @@ -262,7 +270,7 @@ def get(self, app_model): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query = sql_query.replace("{{end}}", " and c.created_at < :end") + sql_query = sql_query.replace("{{end}}", " AND c.created_at < :end") arg_dict["end"] = end_datetime_utc else: sql_query = sql_query.replace("{{end}}", "") diff --git a/api/controllers/console/wraps.py b/api/controllers/console/wraps.py index 7667b30e348a9..46223d104fe03 100644 --- a/api/controllers/console/wraps.py +++ b/api/controllers/console/wraps.py @@ -64,7 +64,8 @@ def decorated(*args, **kwargs): elif resource == "vector_space" and 0 < vector_space.limit <= vector_space.size: abort(403, "The capacity of the vector space has reached the limit of your subscription.") elif resource == "documents" and 0 < documents_upload_quota.limit <= documents_upload_quota.size: - # The api of file upload is used in the multiple places, so we need to check the source of the request from datasets + # The api of file upload is used in the multiple places, + # so we need to check the source of the request from datasets source = request.args.get("source") if source == "datasets": abort(403, "The number of documents has reached the limit of your subscription.") diff --git a/api/controllers/web/wraps.py b/api/controllers/web/wraps.py index 93dc691d62394..c327c3df18526 100644 --- a/api/controllers/web/wraps.py +++ b/api/controllers/web/wraps.py @@ -80,7 +80,8 @@ def _validate_web_sso_token(decoded, system_features, app_code): if not source or source != "sso": raise WebSSOAuthRequiredError() - # Check if SSO is not enforced for web, and if the token source is SSO, raise an error and redirect to normal passport login + # Check if SSO is not enforced for web, and if the token source is SSO, + # raise an error and redirect to normal passport login if not system_features.sso_enforced_for_web or not app_web_sso_enabled: source = decoded.get("token_source") if source and source == "sso": diff --git a/api/core/agent/prompt/template.py b/api/core/agent/prompt/template.py index cb98f5501ddd0..ef64fd29fc3a7 100644 --- a/api/core/agent/prompt/template.py +++ b/api/core/agent/prompt/template.py @@ -41,7 +41,8 @@ {{historic_messages}} Question: {{query}} {{agent_scratchpad}} -Thought:""" +Thought:""" # noqa: E501 + ENGLISH_REACT_COMPLETION_AGENT_SCRATCHPAD_TEMPLATES = """Observation: {{observation}} Thought:""" @@ -86,7 +87,8 @@ ``` Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. -""" +""" # noqa: E501 + ENGLISH_REACT_CHAT_AGENT_SCRATCHPAD_TEMPLATES = "" diff --git a/api/core/app/apps/workflow_logging_callback.py b/api/core/app/apps/workflow_logging_callback.py index cdd21bf7c2e94..388cb83180227 100644 --- a/api/core/app/apps/workflow_logging_callback.py +++ b/api/core/app/apps/workflow_logging_callback.py @@ -84,10 +84,12 @@ def on_workflow_node_execute_succeeded(self, event: NodeRunSucceededEvent) -> No if route_node_state.node_run_result: node_run_result = route_node_state.node_run_result self.print_text( - f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="green" + f"Inputs: " f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", + color="green", ) self.print_text( - f"Process Data: {jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", + f"Process Data: " + f"{jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", color="green", ) self.print_text( @@ -114,14 +116,17 @@ def on_workflow_node_execute_failed(self, event: NodeRunFailedEvent) -> None: node_run_result = route_node_state.node_run_result self.print_text(f"Error: {node_run_result.error}", color="red") self.print_text( - f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="red" + f"Inputs: " f"" f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", + color="red", ) self.print_text( - f"Process Data: {jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", + f"Process Data: " + f"{jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", color="red", ) self.print_text( - f"Outputs: {jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", color="red" + f"Outputs: " f"{jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", + color="red", ) def on_node_text_chunk(self, event: NodeRunStreamChunkEvent) -> None: diff --git a/api/core/file/message_file_parser.py b/api/core/file/message_file_parser.py index 8feaabedbbdf4..83059b216ee66 100644 --- a/api/core/file/message_file_parser.py +++ b/api/core/file/message_file_parser.py @@ -188,7 +188,8 @@ def _to_file_obj(self, file: Union[dict, MessageFile], file_extra_config: FileEx def _check_image_remote_url(self, url): try: headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/91.0.4472.124 Safari/537.36" } def is_s3_presigned_url(url): diff --git a/api/core/helper/code_executor/code_executor.py b/api/core/helper/code_executor/code_executor.py index 7ee6e6381755f..4932284540738 100644 --- a/api/core/helper/code_executor/code_executor.py +++ b/api/core/helper/code_executor/code_executor.py @@ -89,7 +89,8 @@ def execute_code(cls, language: CodeLanguage, preload: str, code: str) -> str: raise CodeExecutionError("Code execution service is unavailable") elif response.status_code != 200: raise Exception( - f"Failed to execute code, got status code {response.status_code}, please check if the sandbox service is running" + f"Failed to execute code, got status code {response.status_code}," + f" please check if the sandbox service is running" ) except CodeExecutionError as e: raise e diff --git a/api/core/helper/tool_parameter_cache.py b/api/core/helper/tool_parameter_cache.py index 4c3b736186383..e848b46c5633a 100644 --- a/api/core/helper/tool_parameter_cache.py +++ b/api/core/helper/tool_parameter_cache.py @@ -14,7 +14,10 @@ class ToolParameterCache: def __init__( self, tenant_id: str, provider: str, tool_name: str, cache_type: ToolParameterCacheType, identity_id: str ): - self.cache_key = f"{cache_type.value}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}:identity_id:{identity_id}" + self.cache_key = ( + f"{cache_type.value}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}" + f":identity_id:{identity_id}" + ) def get(self) -> Optional[dict]: """ diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index 7ab257872f54c..c40b6d180804c 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -59,24 +59,27 @@ } User Input: -""" +""" # noqa: E501 SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = ( "Please help me predict the three most likely questions that human would ask, " "and keeping each question under 20 characters.\n" - "MAKE SURE your output is the SAME language as the Assistant's latest response(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n" + "MAKE SURE your output is the SAME language as the Assistant's latest response" + "(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n" "The output must be an array in JSON format following the specified schema:\n" '["question1","question2","question3"]\n' ) GENERATOR_QA_PROMPT = ( - " The user will send a long text. Generate a Question and Answer pairs only using the knowledge in the long text. Please think step by step." + " The user will send a long text. Generate a Question and Answer pairs only using the knowledge" + " in the long text. Please think step by step." "Step 1: Understand and summarize the main content of this text.\n" "Step 2: What key information or concepts are mentioned in this text?\n" "Step 3: Decompose or combine multiple pieces of information and concepts.\n" "Step 4: Generate questions and answers based on these key information and concepts.\n" " The questions should be clear and detailed, and the answers should be detailed and complete. " - "You must answer in {language}, in a style that is clear and detailed in {language}. No language other than {language} should be used. \n" + "You must answer in {language}, in a style that is clear and detailed in {language}." + " No language other than {language} should be used. \n" " Use the following format: Q1:\nA1:\nQ2:\nA2:...\n" "" ) @@ -94,7 +97,7 @@ - Use the same language as task description. - Output in ``` xml ``` and start with Please generate the full prompt template with at least 300 words and output only the prompt template. -""" +""" # noqa: E501 RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """ Here is a task description for which I would like you to create a high-quality prompt template for: @@ -109,7 +112,7 @@ - Use the same language as task description. - Output in ``` xml ``` and start with Please generate the full prompt template and output only the prompt template. -""" +""" # noqa: E501 RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE = """ I need to extract the following information from the input text. The tag specifies the 'type', 'description' and 'required' of the information to be extracted. @@ -134,7 +137,7 @@ ### Answer I should always output a valid list. Output nothing other than the list of variable_name. Output an empty list if there is no variable name in input text. -""" +""" # noqa: E501 RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE = """ @@ -150,4 +153,4 @@ Here is the task description: {{INPUT_TEXT}} You just need to generate the output -""" +""" # noqa: E501 diff --git a/api/core/model_runtime/entities/defaults.py b/api/core/model_runtime/entities/defaults.py index e94be6f9181cd..4d0c9aa08f733 100644 --- a/api/core/model_runtime/entities/defaults.py +++ b/api/core/model_runtime/entities/defaults.py @@ -8,8 +8,11 @@ }, "type": "float", "help": { - "en_US": "Controls randomness. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions.", - "zh_Hans": "温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。较高的温度会导致更多的随机完成。", + "en_US": "Controls randomness. Lower temperature results in less random completions." + " As the temperature approaches zero, the model will become deterministic and repetitive." + " Higher temperature results in more random completions.", + "zh_Hans": "温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。" + "较高的温度会导致更多的随机完成。", }, "required": False, "default": 0.0, @@ -24,7 +27,8 @@ }, "type": "float", "help": { - "en_US": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered.", + "en_US": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options" + " are considered.", "zh_Hans": "通过核心采样控制多样性:0.5表示考虑了一半的所有可能性加权选项。", }, "required": False, @@ -88,7 +92,8 @@ }, "type": "int", "help": { - "en_US": "Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.", + "en_US": "Specifies the upper limit on the length of generated results." + " If the generated results are truncated, you can increase this parameter.", "zh_Hans": "指定生成结果长度的上限。如果生成结果截断,可以调大该参数。", }, "required": False, @@ -104,7 +109,8 @@ }, "type": "string", "help": { - "en_US": "Set a response format, ensure the output from llm is a valid code block as possible, such as JSON, XML, etc.", + "en_US": "Set a response format, ensure the output from llm is a valid code block as possible," + " such as JSON, XML, etc.", "zh_Hans": "设置一个返回格式,确保llm的输出尽可能是有效的代码块,如JSON、XML等", }, "required": False, diff --git a/api/core/model_runtime/model_providers/__base/ai_model.py b/api/core/model_runtime/model_providers/__base/ai_model.py index 09d2d7e54da9e..e7e343f00ddeb 100644 --- a/api/core/model_runtime/model_providers/__base/ai_model.py +++ b/api/core/model_runtime/model_providers/__base/ai_model.py @@ -72,7 +72,9 @@ def _transform_invoke_error(self, error: Exception) -> InvokeError: if isinstance(error, tuple(model_errors)): if invoke_error == InvokeAuthorizationError: return invoke_error( - description=f"[{provider_name}] Incorrect model credentials provided, please check and try again. " + description=( + f"[{provider_name}] Incorrect model credentials provided, please check and try again." + ) ) return invoke_error(description=f"[{provider_name}] {invoke_error.description}, {str(error)}") diff --git a/api/core/model_runtime/model_providers/__base/large_language_model.py b/api/core/model_runtime/model_providers/__base/large_language_model.py index 5c39186e6504e..e8789ec7df182 100644 --- a/api/core/model_runtime/model_providers/__base/large_language_model.py +++ b/api/core/model_runtime/model_providers/__base/large_language_model.py @@ -187,7 +187,7 @@ def _code_block_mode_wrapper( {{instructions}} -""" +""" # noqa: E501 code_block = model_parameters.get("response_format", "") if not code_block: @@ -830,7 +830,8 @@ def _validate_and_filter_model_parameters(self, model: str, model_parameters: di else: if parameter_value != round(parameter_value, parameter_rule.precision): raise ValueError( - f"Model Parameter {parameter_name} should be round to {parameter_rule.precision} decimal places." + f"Model Parameter {parameter_name} should be round to {parameter_rule.precision}" + f" decimal places." ) # validate parameter value range diff --git a/api/core/model_runtime/model_providers/anthropic/llm/llm.py b/api/core/model_runtime/model_providers/anthropic/llm/llm.py index 30e9d2e9f2e0d..0cb66842e7e26 100644 --- a/api/core/model_runtime/model_providers/anthropic/llm/llm.py +++ b/api/core/model_runtime/model_providers/anthropic/llm/llm.py @@ -51,7 +51,7 @@ {{instructions}} -""" +""" # noqa: E501 class AnthropicLargeLanguageModel(LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index c2744691c3ed6..0dada70cc5b7f 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -16,6 +16,15 @@ AZURE_OPENAI_API_VERSION = "2024-02-15-preview" +AZURE_DEFAULT_PARAM_SEED_HELP = I18nObject( + zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性," + "您应该参考 system_fingerprint 响应参数来监视变化。", + en_US="If specified, model will make a best effort to sample deterministically," + " such that repeated requests with the same seed and parameters should return the same result." + " Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter" + " to monitor changes in the backend.", +) + def _get_max_tokens(default: int, min_val: int, max_val: int) -> ParameterRule: rule = ParameterRule( @@ -229,10 +238,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -297,10 +303,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -365,10 +368,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -433,10 +433,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -502,10 +499,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -571,10 +565,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -650,10 +641,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -719,10 +707,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -788,10 +773,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -867,10 +849,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -936,10 +915,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -1000,10 +976,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py index bea6777f833a4..a7ca28d49d636 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py +++ b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py @@ -15,6 +15,7 @@ def count_english_vocabularies(cls, text: str) -> int: @classmethod def _get_num_tokens(cls, text: str) -> int: - # tokens = number of Chinese characters + number of English words * 1.3 (for estimation only, subject to actual return) + # tokens = number of Chinese characters + number of English words * 1.3 + # (for estimation only, subject to actual return) # https://platform.baichuan-ai.com/docs/text-Embedding return int(cls.count_chinese_characters(text) + cls.count_english_vocabularies(text) * 1.3) diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py index e07f2a419a00e..239ae52b4cd50 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py +++ b/api/core/model_runtime/model_providers/bedrock/llm/llm.py @@ -52,7 +52,7 @@ {{instructions}} -""" +""" # noqa: E501 class BedrockLargeLanguageModel(LargeLanguageModel): @@ -541,7 +541,9 @@ def validate_credentials(self, model: str, credentials: dict) -> None: "max_tokens": 32, } elif "ai21" in model: - # ValidationException: Malformed input request: #/temperature: expected type: Number, found: Null#/maxTokens: expected type: Integer, found: Null#/topP: expected type: Number, found: Null, please reformat your input and try again. + # ValidationException: Malformed input request: #/temperature: expected type: Number, + # found: Null#/maxTokens: expected type: Integer, found: Null#/topP: expected type: Number, found: Null, + # please reformat your input and try again. required_params = { "temperature": 0.7, "topP": 0.9, diff --git a/api/core/model_runtime/model_providers/google/llm/llm.py b/api/core/model_runtime/model_providers/google/llm/llm.py index 307c15e1fdee8..b10d0edba355e 100644 --- a/api/core/model_runtime/model_providers/google/llm/llm.py +++ b/api/core/model_runtime/model_providers/google/llm/llm.py @@ -45,7 +45,7 @@ {{instructions}} -""" +""" # noqa: E501 class GoogleLargeLanguageModel(LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py b/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py index 56c51e8888636..288637495f6c8 100644 --- a/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py +++ b/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py @@ -54,7 +54,8 @@ def _get_tei_extra_parameter(server_url: str) -> TeiModelExtraParameter: url = str(URL(server_url) / "info") - # this method is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3 + # this method is surrounded by a lock, and default requests may hang forever, + # so we just set a Adapter with max_retries=3 session = Session() session.mount("http://", HTTPAdapter(max_retries=3)) session.mount("https://", HTTPAdapter(max_retries=3)) diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py b/api/core/model_runtime/model_providers/hunyuan/llm/llm.py index c056ab7a0855c..b57e5e1c2b2ed 100644 --- a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py +++ b/api/core/model_runtime/model_providers/hunyuan/llm/llm.py @@ -131,7 +131,8 @@ def _convert_prompt_messages_to_dicts(self, prompt_messages: list[PromptMessage] { "Role": message.role.value, # fix set content = "" while tool_call request - # fix [hunyuan] None, [TencentCloudSDKException] code:InvalidParameter message:Messages Content and Contents not allowed empty at the same time. + # fix [hunyuan] None, [TencentCloudSDKException] code:InvalidParameter + # message:Messages Content and Contents not allowed empty at the same time. "Content": " ", # message.content if (message.content is not None) else "", "ToolCalls": dict_tool_calls, } diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llm.py b/api/core/model_runtime/model_providers/nvidia/llm/llm.py index 4d3747dc842ea..1c98c6be6ca72 100644 --- a/api/core/model_runtime/model_providers/nvidia/llm/llm.py +++ b/api/core/model_runtime/model_providers/nvidia/llm/llm.py @@ -93,7 +93,8 @@ def _add_custom_parameters(self, credentials: dict, model: str) -> None: def _validate_credentials(self, model: str, credentials: dict) -> None: """ - Validate model credentials using requests to ensure compatibility with all providers following OpenAI's API standard. + Validate model credentials using requests to ensure compatibility with all providers following + OpenAI's API standard. :param model: model name :param credentials: model credentials diff --git a/api/core/model_runtime/model_providers/oci/llm/llm.py b/api/core/model_runtime/model_providers/oci/llm/llm.py index 51b634c6cf8b0..1e1fc5b3ea89a 100644 --- a/api/core/model_runtime/model_providers/oci/llm/llm.py +++ b/api/core/model_runtime/model_providers/oci/llm/llm.py @@ -239,7 +239,8 @@ def _generate( config_items = oci_config_content.split("/") if len(config_items) != 5: raise CredentialsValidateFailedError( - "oci_config_content should be base64.b64encode('user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" + "oci_config_content should be base64.b64encode(" + "'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" ) oci_config["user"] = config_items[0] oci_config["fingerprint"] = config_items[1] diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py index df77db47d9fb5..80ad2be9f5687 100644 --- a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py @@ -146,7 +146,8 @@ def _embedding_invoke(self, model: str, credentials: dict, texts: list[str]) -> config_items = oci_config_content.split("/") if len(config_items) != 5: raise CredentialsValidateFailedError( - "oci_config_content should be base64.b64encode('user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" + "oci_config_content should be base64.b64encode(" + "'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" ) oci_config["user"] = config_items[0] oci_config["fingerprint"] = config_items[1] diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py index 160eea0148702..3f32f454e499d 100644 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py @@ -639,9 +639,10 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode type=ParameterType.STRING, help=I18nObject( en_US="Sets how long the model is kept in memory after generating a response. " - "This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours). " - "A negative number keeps the model loaded indefinitely, and '0' unloads the model immediately after generating a response. " - "Valid time units are 's','m','h'. (Default: 5m)" + "This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours)." + " A negative number keeps the model loaded indefinitely, and '0' unloads the model" + " immediately after generating a response." + " Valid time units are 's','m','h'. (Default: 5m)" ), ), ParameterRule( diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py index 5950b77a96a01..578687b5d3a81 100644 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai/llm/llm.py @@ -37,7 +37,7 @@ {{instructions}} -""" +""" # noqa: E501 class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py index 24317b488c2de..41ca163a92ff8 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py @@ -103,7 +103,8 @@ def get_num_tokens( def validate_credentials(self, model: str, credentials: dict) -> None: """ - Validate model credentials using requests to ensure compatibility with all providers following OpenAI's API standard. + Validate model credentials using requests to ensure compatibility with all providers following + OpenAI's API standard. :param model: model name :param credentials: model credentials @@ -262,7 +263,8 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode return entity - # validate_credentials method has been rewritten to use the requests library for compatibility with all providers following OpenAI's API standard. + # validate_credentials method has been rewritten to use the requests library for compatibility with all providers + # following OpenAI's API standard. def _generate( self, model: str, diff --git a/api/core/model_runtime/model_providers/spark/llm/_client.py b/api/core/model_runtime/model_providers/spark/llm/_client.py index 25223e8340510..b99a657e715ed 100644 --- a/api/core/model_runtime/model_providers/spark/llm/_client.py +++ b/api/core/model_runtime/model_providers/spark/llm/_client.py @@ -61,7 +61,10 @@ def create_url(self, host: str, path: str, api_base: str, api_key: str, api_secr signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding="utf-8") - authorization_origin = f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"' + authorization_origin = ( + f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line",' + f' signature="{signature_sha_base64}"' + ) authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(encoding="utf-8") diff --git a/api/core/model_runtime/model_providers/upstage/llm/llm.py b/api/core/model_runtime/model_providers/upstage/llm/llm.py index 1014b53f39ef6..9646e209b2636 100644 --- a/api/core/model_runtime/model_providers/upstage/llm/llm.py +++ b/api/core/model_runtime/model_providers/upstage/llm/llm.py @@ -34,7 +34,7 @@ {{instructions}} -""" +""" # noqa: E501 class UpstageLargeLanguageModel(_CommonUpstage, LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py index 110028a288a7d..1b9931d2c3194 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py @@ -114,7 +114,8 @@ def _generate_anthropic( credentials.refresh(request) token = credentials.token - # Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available in us-central1 region + # Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available + # in us-central1 region if "opus" in model or "claude-3-5-sonnet" in model: location = "us-east5" else: @@ -123,7 +124,8 @@ def _generate_anthropic( # use access token to authenticate if token: client = AnthropicVertex(region=location, project_id=project_id, access_token=token) - # When access token is empty, try to use the Google Cloud VM's built-in service account or the GOOGLE_APPLICATION_CREDENTIALS environment variable + # When access token is empty, try to use the Google Cloud VM's built-in service account + # or the GOOGLE_APPLICATION_CREDENTIALS environment variable else: client = AnthropicVertex( region=location, diff --git a/api/core/model_runtime/model_providers/wenxin/llm/llm.py b/api/core/model_runtime/model_providers/wenxin/llm/llm.py index 1ff0ac7ad27ce..8feedbfe55f08 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/llm.py +++ b/api/core/model_runtime/model_providers/wenxin/llm/llm.py @@ -28,7 +28,7 @@ You should also complete the text started with ``` but not tell ``` directly. -""" +""" # noqa: E501 class ErnieBotLargeLanguageModel(LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/xinference/llm/llm.py b/api/core/model_runtime/model_providers/xinference/llm/llm.py index bc7531ee20187..7ad236880b4c3 100644 --- a/api/core/model_runtime/model_providers/xinference/llm/llm.py +++ b/api/core/model_runtime/model_providers/xinference/llm/llm.py @@ -130,7 +130,8 @@ def validate_credentials(self, model: str, credentials: dict) -> None: credentials["completion_type"] = "completion" else: raise ValueError( - f"xinference model ability {extra_param.model_ability} is not supported, check if you have the right model type" + f"xinference model ability {extra_param.model_ability} is not supported," + f" check if you have the right model type" ) if extra_param.support_function_call: @@ -358,7 +359,8 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode help=I18nObject( en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they " "appear in the text so far, increasing the model's likelihood to talk about new topics.", - zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词是否已出现在文本中对其进行惩罚,从而增加模型谈论新话题的可能性。", + zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词是否已出现在文本中对其进行惩罚," + "从而增加模型谈论新话题的可能性。", ), default=0.0, min=-2.0, @@ -378,7 +380,8 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on their " "existing frequency in the text so far, decreasing the model's likelihood to repeat the " "same line verbatim.", - zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词在文本中的现有频率对其进行惩罚,从而降低模型逐字重复相同内容的可能性。", + zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词在文本中的现有频率对其进行惩罚," + "从而降低模型逐字重复相同内容的可能性。", ), default=0.0, min=-2.0, diff --git a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py b/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py index 54c8b51654bfb..18efde758c5b5 100644 --- a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py +++ b/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py @@ -101,12 +101,16 @@ def _speech2text_invoke( :param model: model name :param credentials: model credentials - :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpe g,mpga, m4a, ogg, wav, or webm. + :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, + mpga, m4a, ogg, wav, or webm. :param language: The language of the input audio. Supplying the input language in ISO-639-1 :param prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. - :param response_format: The format of the transcript output, in one of these options: json, text, srt, verbose _json, or vtt. - :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output mor e random,while lower values like 0.2 will make it more focused and deterministic.If set to 0, the model wi ll use log probability to automatically increase the temperature until certain thresholds are hit. + :param response_format: The format of the transcript output, in one of these options: json, text, srt, + verbose_json, or vtt. + :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random,while lower values like 0.2 will make it more focused and deterministic.If set to 0, the model will use + log probability to automatically increase the temperature until certain thresholds are hit. :return: text for given audio file """ server_url = credentials["server_url"] diff --git a/api/core/model_runtime/model_providers/xinference/xinference_helper.py b/api/core/model_runtime/model_providers/xinference/xinference_helper.py index 6ad10e690ddc1..1e05da9c56b27 100644 --- a/api/core/model_runtime/model_providers/xinference/xinference_helper.py +++ b/api/core/model_runtime/model_providers/xinference/xinference_helper.py @@ -76,7 +76,8 @@ def _get_xinference_extra_parameter(server_url: str, model_uid: str, api_key: st url = str(URL(server_url) / "v1" / "models" / model_uid) - # this method is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3 + # this method is surrounded by a lock, and default requests may hang forever, + # so we just set a Adapter with max_retries=3 session = Session() session.mount("http://", HTTPAdapter(max_retries=3)) session.mount("https://", HTTPAdapter(max_retries=3)) @@ -88,7 +89,8 @@ def _get_xinference_extra_parameter(server_url: str, model_uid: str, api_key: st raise RuntimeError(f"get xinference model extra parameter failed, url: {url}, error: {e}") if response.status_code != 200: raise RuntimeError( - f"get xinference model extra parameter failed, status code: {response.status_code}, response: {response.text}" + f"get xinference model extra parameter failed, status code: {response.status_code}," + f" response: {response.text}" ) response_json = response.json() diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py index 498962bd0f8ac..29b873fd06624 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py +++ b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py @@ -31,7 +31,7 @@ {{instructions}} -```JSON""" +```JSON""" # noqa: E501 class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py index b7cf6bb7fd06c..7a91f9b79627c 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py @@ -75,7 +75,8 @@ def __bool__(self) -> Literal[False]: ResponseT = TypeVar( "ResponseT", - bound="Union[str, None, BaseModel, list[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol, BinaryResponseContent]", + bound="Union[str, None, BaseModel, list[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol," + " BinaryResponseContent]", ) # for user input files diff --git a/api/core/model_runtime/schema_validators/common_validator.py b/api/core/model_runtime/schema_validators/common_validator.py index e4f3541475640..c05edb72e3552 100644 --- a/api/core/model_runtime/schema_validators/common_validator.py +++ b/api/core/model_runtime/schema_validators/common_validator.py @@ -67,7 +67,8 @@ def _validate_credential_form_schema( if credential_form_schema.max_length: if len(value) > credential_form_schema.max_length: raise ValueError( - f"Variable {credential_form_schema.variable} length should not greater than {credential_form_schema.max_length}" + f"Variable {credential_form_schema.variable} length should not" + f" greater than {credential_form_schema.max_length}" ) # check the type of value diff --git a/api/core/prompt/prompt_templates/advanced_prompt_templates.py b/api/core/prompt/prompt_templates/advanced_prompt_templates.py index e4b3a61cb4c00..0ab7f526cc4fe 100644 --- a/api/core/prompt/prompt_templates/advanced_prompt_templates.py +++ b/api/core/prompt/prompt_templates/advanced_prompt_templates.py @@ -1,11 +1,11 @@ -CONTEXT = "Use the following context as your learned knowledge, inside XML tags.\n\n\n{{#context#}}\n\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n" +CONTEXT = "Use the following context as your learned knowledge, inside XML tags.\n\n\n{{#context#}}\n\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n" # noqa: E501 -BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n\n" +BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n\n" # noqa: E501 CHAT_APP_COMPLETION_PROMPT_CONFIG = { "completion_prompt_config": { "prompt": { - "text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside XML tags.\n\n\n{{#histories#}}\n\n\n\nHuman: {{#query#}}\n\nAssistant: " + "text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside XML tags.\n\n\n{{#histories#}}\n\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501 }, "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"}, }, @@ -24,7 +24,7 @@ BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG = { "completion_prompt_config": { "prompt": { - "text": "{{#pre_prompt#}}\n\n用户和助手的历史对话内容如下:\n```\n{{#histories#}}\n```\n\n\n\n用户:{{#query#}}" + "text": "{{#pre_prompt#}}\n\n用户和助手的历史对话内容如下:\n```\n{{#histories#}}\n```\n\n\n\n用户:{{#query#}}" # noqa: E501 }, "conversation_histories_role": {"user_prefix": "用户", "assistant_prefix": "助手"}, }, diff --git a/api/core/rag/datasource/vdb/oracle/oraclevector.py b/api/core/rag/datasource/vdb/oracle/oraclevector.py index 06c20ceb5fdd5..d223b0decf3d2 100644 --- a/api/core/rag/datasource/vdb/oracle/oraclevector.py +++ b/api/core/rag/datasource/vdb/oracle/oraclevector.py @@ -195,7 +195,8 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc top_k = kwargs.get("top_k", 5) with self._get_cursor() as cur: cur.execute( - f"SELECT meta, text, vector_distance(embedding,:1) AS distance FROM {self.table_name} ORDER BY distance fetch first {top_k} rows only", + f"SELECT meta, text, vector_distance(embedding,:1) AS distance FROM {self.table_name}" + f" ORDER BY distance fetch first {top_k} rows only", [numpy.array(query_vector)], ) docs = [] @@ -254,7 +255,8 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: entities.append(token) with self._get_cursor() as cur: cur.execute( - f"select meta, text, embedding FROM {self.table_name} WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only", + f"select meta, text, embedding FROM {self.table_name}" + f" WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only", [" ACCUM ".join(entities)], ) docs = [] diff --git a/api/core/rag/datasource/vdb/pgvector/pgvector.py b/api/core/rag/datasource/vdb/pgvector/pgvector.py index 38dfd24b5626e..d2d9e5238b607 100644 --- a/api/core/rag/datasource/vdb/pgvector/pgvector.py +++ b/api/core/rag/datasource/vdb/pgvector/pgvector.py @@ -139,7 +139,8 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc with self._get_cursor() as cur: cur.execute( - f"SELECT meta, text, embedding <=> %s AS distance FROM {self.table_name} ORDER BY distance LIMIT {top_k}", + f"SELECT meta, text, embedding <=> %s AS distance FROM {self.table_name}" + f" ORDER BY distance LIMIT {top_k}", (json.dumps(query_vector),), ) docs = [] diff --git a/api/core/rag/extractor/extract_processor.py b/api/core/rag/extractor/extract_processor.py index a00b3cba533f6..244ef9614a48a 100644 --- a/api/core/rag/extractor/extract_processor.py +++ b/api/core/rag/extractor/extract_processor.py @@ -30,7 +30,10 @@ from models.model import UploadFile SUPPORT_URL_CONTENT_TYPES = ["application/pdf", "text/plain", "application/json"] -USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" +USER_AGENT = ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124" + " Safari/537.36" +) class ExtractProcessor: diff --git a/api/core/rag/retrieval/router/multi_dataset_react_route.py b/api/core/rag/retrieval/router/multi_dataset_react_route.py index 33841cac06adb..a0494adc60e86 100644 --- a/api/core/rag/retrieval/router/multi_dataset_react_route.py +++ b/api/core/rag/retrieval/router/multi_dataset_react_route.py @@ -14,7 +14,7 @@ PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:""" SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. -Thought:""" +Thought:""" # noqa: E501 FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). The nouns in the format of "Thought", "Action", "Action Input", "Final Answer" must be expressed in English. @@ -46,7 +46,7 @@ "action": "Final Answer", "action_input": "Final response to human" }} -```""" +```""" # noqa: E501 class ReactMultiDatasetRouter: @@ -204,7 +204,8 @@ def create_chat_prompt( tool_strings = [] for tool in tools: tool_strings.append( - f"{tool.name}: {tool.description}, args: {{'query': {{'title': 'Query', 'description': 'Query for the dataset to be used to retrieve the dataset.', 'type': 'string'}}}}" + f"{tool.name}: {tool.description}, args: {{'query': {{'title': 'Query'," + f" 'description': 'Query for the dataset to be used to retrieve the dataset.', 'type': 'string'}}}}" ) formatted_tools = "\n".join(tool_strings) unique_tool_names = {tool.name for tool in tools} @@ -236,7 +237,7 @@ def create_completion_prompt( suffix = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Question: {input} Thought: {agent_scratchpad} -""" +""" # noqa: E501 tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) diff --git a/api/core/tools/entities/tool_bundle.py b/api/core/tools/entities/tool_bundle.py index da6201c5aaecd..0c15b2a3711f1 100644 --- a/api/core/tools/entities/tool_bundle.py +++ b/api/core/tools/entities/tool_bundle.py @@ -7,7 +7,8 @@ class ApiToolBundle(BaseModel): """ - This class is used to store the schema information of an api based tool. such as the url, the method, the parameters, etc. + This class is used to store the schema information of an api based tool. + such as the url, the method, the parameters, etc. """ # server_url diff --git a/api/core/tools/entities/values.py b/api/core/tools/entities/values.py index f9db190f91e68..f460df7e25c91 100644 --- a/api/core/tools/entities/values.py +++ b/api/core/tools/entities/values.py @@ -4,52 +4,52 @@ ICONS = { ToolLabelEnum.SEARCH: """ -""", +""", # noqa: E501 ToolLabelEnum.IMAGE: """ -""", +""", # noqa: E501 ToolLabelEnum.VIDEOS: """ -""", +""", # noqa: E501 ToolLabelEnum.WEATHER: """ -""", +""", # noqa: E501 ToolLabelEnum.FINANCE: """ -""", +""", # noqa: E501 ToolLabelEnum.DESIGN: """ -""", +""", # noqa: E501 ToolLabelEnum.TRAVEL: """ -""", +""", # noqa: E501 ToolLabelEnum.SOCIAL: """ -""", +""", # noqa: E501 ToolLabelEnum.NEWS: """ -""", +""", # noqa: E501 ToolLabelEnum.MEDICAL: """ -""", +""", # noqa: E501 ToolLabelEnum.PRODUCTIVITY: """ -""", +""", # noqa: E501 ToolLabelEnum.EDUCATION: """ -""", +""", # noqa: E501 ToolLabelEnum.BUSINESS: """ -""", +""", # noqa: E501 ToolLabelEnum.ENTERTAINMENT: """ -""", +""", # noqa: E501 ToolLabelEnum.UTILITIES: """ -""", +""", # noqa: E501 ToolLabelEnum.OTHER: """ -""", +""", # noqa: E501 } default_tool_label_dict = { diff --git a/api/core/tools/provider/builtin/aippt/tools/aippt.py b/api/core/tools/provider/builtin/aippt/tools/aippt.py index 7cee8f9f799b8..a2d69fbcd1a5e 100644 --- a/api/core/tools/provider/builtin/aippt/tools/aippt.py +++ b/api/core/tools/provider/builtin/aippt/tools/aippt.py @@ -46,7 +46,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMe tool_parameters (dict[str, Any]): The parameters for the tool Returns: - ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. + ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, + which can be a single message or a list of messages. """ title = tool_parameters.get("title", "") if not title: diff --git a/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py b/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py index 98d82c233e071..2d65ba2d6f438 100644 --- a/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py +++ b/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py @@ -104,7 +104,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMe tool_parameters (dict[str, Any]): The parameters for the tool, including the 'query' parameter. Returns: - ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. + ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, + which can be a single message or a list of messages. """ query = tool_parameters.get("query", "") diff --git a/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py b/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py index d6a65b1708281..a04f5c0fe9f1a 100644 --- a/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py +++ b/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py @@ -62,7 +62,8 @@ def _invoke( if isinstance(policy_data, dict) and "topics" in policy_data: for topic in policy_data["topics"]: formatted_assessments.append( - f"Policy: {policy_type}, Topic: {topic['name']}, Type: {topic['type']}, Action: {topic['action']}" + f"Policy: {policy_type}, Topic: {topic['name']}, Type: {topic['type']}," + f" Action: {topic['action']}" ) else: formatted_assessments.append(f"Policy: {policy_type}, Data: {policy_data}") diff --git a/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py b/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py index e1effd066c611..57cf6d7a308db 100644 --- a/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py +++ b/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py @@ -24,7 +24,8 @@ def _invoke( tool_parameters (dict[str, Any]): The parameters for the tool, including 'doc' and 'topic'. Returns: - ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. + ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, + which can be a single message or a list of messages. """ doc = tool_parameters.get("doc", "") topic = tool_parameters.get("topic", "") diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py b/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py index 7606eee7af6cf..1e77f3c6dfc67 100644 --- a/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py +++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py @@ -70,7 +70,10 @@ def fetch_files( ) else: # It's a file if is_repository: - file_url = f"{domain}/api/v4/projects/{encoded_identifier}/repository/files/{item_path}/raw?ref={branch}" + file_url = ( + f"{domain}/api/v4/projects/{encoded_identifier}/repository/files" + f"/{item_path}/raw?ref={branch}" + ) else: file_url = ( f"{domain}/api/v4/projects/{project_id}/repository/files/{item_path}/raw?ref={branch}" diff --git a/api/core/tools/provider/builtin/google_translate/tools/translate.py b/api/core/tools/provider/builtin/google_translate/tools/translate.py index 5d57b5fabfb94..ea3f2077d5d48 100644 --- a/api/core/tools/provider/builtin/google_translate/tools/translate.py +++ b/api/core/tools/provider/builtin/google_translate/tools/translate.py @@ -35,7 +35,8 @@ def _translate(self, content: str, dest: str) -> str: params = {"client": "gtx", "sl": "auto", "tl": dest, "dt": "t", "q": content} headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/91.0.4472.124 Safari/537.36" } response_json = requests.get(url, params=params, headers=headers).json() diff --git a/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py b/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py index 69cf8aa740b16..9e0918afa9bd9 100644 --- a/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py +++ b/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py @@ -114,7 +114,8 @@ def get_controls(self, controls: list) -> dict: } fields.append(field) fields_list.append( - f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}|{field['options'] if field['options'] else ''}|" + f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}" + f"|{field['options'] if field['options'] else ''}|" ) fields.append( diff --git a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py index 71f8356ab81c4..5888f7443fc39 100644 --- a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py +++ b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py @@ -112,7 +112,10 @@ def _invoke( else: result_text = f"Found {result['total']} rows in worksheet \"{worksheet_name}\"." if result["total"] > 0: - result_text += f" The following are {result['total'] if result['total'] < limit else limit} pieces of data presented in a table format:\n\n{table_header}" + result_text += ( + f" The following are {result['total'] if result['total'] < limit else limit}" + f" pieces of data presented in a table format:\n\n{table_header}" + ) for row in rows: result_values = [] for f in fields: diff --git a/api/core/tools/provider/builtin/searchapi/tools/google.py b/api/core/tools/provider/builtin/searchapi/tools/google.py index 6d88d74635e85..16ae14549d21d 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/google.py +++ b/api/core/tools/provider/builtin/searchapi/tools/google.py @@ -64,7 +64,10 @@ def _process_response(res: dict, type: str) -> str: elif type == "link": if "answer_box" in res and "organic_result" in res["answer_box"]: if "title" in res["answer_box"]["organic_result"]: - toret = f"[{res['answer_box']['organic_result']['title']}]({res['answer_box']['organic_result']['link']})\n" + toret = ( + f"[{res['answer_box']['organic_result']['title']}]" + f"({res['answer_box']['organic_result']['link']})\n" + ) elif "organic_results" in res and "link" in res["organic_results"][0]: toret = "" for item in res["organic_results"]: diff --git a/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py b/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py index 344f9164945f4..64fdc961b4c5d 100644 --- a/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py +++ b/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py @@ -310,7 +310,8 @@ def get_runtime_parameters(self) -> list[ToolParameter]: ), type=ToolParameter.ToolParameterType.STRING, form=ToolParameter.ToolParameterForm.LLM, - llm_description="Image prompt of Stable Diffusion, you should describe the image you want to generate as a list of words as possible as detailed, the prompt must be written in English.", + llm_description="Image prompt of Stable Diffusion, you should describe the image you want to generate" + " as a list of words as possible as detailed, the prompt must be written in English.", required=True, ), ] @@ -320,12 +321,14 @@ def get_runtime_parameters(self) -> list[ToolParameter]: name="image_id", label=I18nObject(en_US="image_id", zh_Hans="image_id"), human_description=I18nObject( - en_US="Image id of the image you want to generate based on, if you want to generate image based on the default image, you can leave this field empty.", + en_US="Image id of the image you want to generate based on, if you want to generate image based" + " on the default image, you can leave this field empty.", zh_Hans="您想要生成的图像的图像 ID,如果您想要基于默认图像生成图像,则可以将此字段留空。", ), type=ToolParameter.ToolParameterType.STRING, form=ToolParameter.ToolParameterForm.LLM, - llm_description="Image id of the original image, you can leave this field empty if you want to generate a new image.", + llm_description="Image id of the original image, you can leave this field empty if you want to" + " generate a new image.", required=True, options=[ ToolParameterOption(value=i.name, label=I18nObject(en_US=i.name, zh_Hans=i.name)) @@ -343,12 +346,14 @@ def get_runtime_parameters(self) -> list[ToolParameter]: name="model", label=I18nObject(en_US="Model", zh_Hans="Model"), human_description=I18nObject( - en_US="Model of Stable Diffusion, you can check the official documentation of Stable Diffusion", + en_US="Model of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", zh_Hans="Stable Diffusion 的模型,您可以查看 Stable Diffusion 的官方文档", ), type=ToolParameter.ToolParameterType.SELECT, form=ToolParameter.ToolParameterForm.FORM, - llm_description="Model of Stable Diffusion, you can check the official documentation of Stable Diffusion", + llm_description="Model of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", required=True, default=models[0], options=[ @@ -367,12 +372,14 @@ def get_runtime_parameters(self) -> list[ToolParameter]: name="sampler_name", label=I18nObject(en_US="Sampling method", zh_Hans="Sampling method"), human_description=I18nObject( - en_US="Sampling method of Stable Diffusion, you can check the official documentation of Stable Diffusion", + en_US="Sampling method of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", zh_Hans="Stable Diffusion 的Sampling method,您可以查看 Stable Diffusion 的官方文档", ), type=ToolParameter.ToolParameterType.SELECT, form=ToolParameter.ToolParameterForm.FORM, - llm_description="Sampling method of Stable Diffusion, you can check the official documentation of Stable Diffusion", + llm_description="Sampling method of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", required=True, default=sample_methods[0], options=[ diff --git a/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py b/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py index 26f12864c3942..b32b0124dd31d 100644 --- a/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py +++ b/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID and list name. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID and list name. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py b/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py index dfc013a6b8bb7..e98efb81ca673 100644 --- a/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py +++ b/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool, Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including details for the new card. + tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, + including details for the new card. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/delete_board.py b/api/core/tools/provider/builtin/trello/tools/delete_board.py index 9dbd8f78d5e65..7fc9d1f13c266 100644 --- a/api/core/tools/provider/builtin/trello/tools/delete_board.py +++ b/api/core/tools/provider/builtin/trello/tools/delete_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/delete_card.py b/api/core/tools/provider/builtin/trello/tools/delete_card.py index 960c3055fe94a..1de98d639ebb7 100644 --- a/api/core/tools/provider/builtin/trello/tools/delete_card.py +++ b/api/core/tools/provider/builtin/trello/tools/delete_card.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the card ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the card ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_board_actions.py b/api/core/tools/provider/builtin/trello/tools/get_board_actions.py index 03510f196488b..cabc7ce09359d 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_board_actions.py +++ b/api/core/tools/provider/builtin/trello/tools/get_board_actions.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py b/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py index 5b41b128d07ca..fe42cd9c5cbf8 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py +++ b/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_board_cards.py b/api/core/tools/provider/builtin/trello/tools/get_board_cards.py index e3bed2e6e620b..ff2b1221e767d 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_board_cards.py +++ b/api/core/tools/provider/builtin/trello/tools/get_board_cards.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py b/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py index 4d8854747c696..3d7f9f4ad1c99 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py +++ b/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID and filter. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID and filter. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py b/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py index ca8aa9c2d5daf..ccf404068f225 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py +++ b/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/update_board.py b/api/core/tools/provider/builtin/trello/tools/update_board.py index 62681eea6b4a9..1e358b00f49ad 100644 --- a/api/core/tools/provider/builtin/trello/tools/update_board.py +++ b/api/core/tools/provider/builtin/trello/tools/update_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool, Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including board ID and updates. + tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, + including board ID and updates. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/update_card.py b/api/core/tools/provider/builtin/trello/tools/update_card.py index 26113f1229061..d25fcbafaa632 100644 --- a/api/core/tools/provider/builtin/trello/tools/update_card.py +++ b/api/core/tools/provider/builtin/trello/tools/update_card.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool, Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including the card ID and updates. + tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, + including the card ID and updates. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/twilio/tools/send_message.py b/api/core/tools/provider/builtin/twilio/tools/send_message.py index 822d0c0ebdd3a..156249bc960c8 100644 --- a/api/core/tools/provider/builtin/twilio/tools/send_message.py +++ b/api/core/tools/provider/builtin/twilio/tools/send_message.py @@ -72,7 +72,8 @@ class SendMessageTool(BuiltinTool): tool_parameters (Dict[str, Any]): The parameters required for sending the message. Returns: - Union[ToolInvokeMessage, List[ToolInvokeMessage]]: The result of invoking the tool, which includes the status of the message sending operation. + Union[ToolInvokeMessage, List[ToolInvokeMessage]]: The result of invoking the tool, + which includes the status of the message sending operation. """ def _invoke( diff --git a/api/core/tools/provider/builtin/vectorizer/tools/test_data.py b/api/core/tools/provider/builtin/vectorizer/tools/test_data.py index 8e1b097776670..8effa9818a0c4 100644 --- a/api/core/tools/provider/builtin/vectorizer/tools/test_data.py +++ b/api/core/tools/provider/builtin/vectorizer/tools/test_data.py @@ -1 +1 @@ -VECTORIZER_ICON_PNG = "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAACXBIWXMAACxLAAAsSwGlPZapAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAboSURBVHgB7Z09bBxFFMffRoAvcQqbguBUxu4wCUikMCZ0TmQK4NLQJCJOlQIkokgEGhQ7NCFIKEhQuIqNnIaGMxRY2GVwmlggDHS+pIHELmIXMTEULPP3eeXz7e7szO7MvE1ufpKV03nuNn7/mfcxH7tEHo/H42lXgqwG1bGw65+/aTQM6K0gpJdCoi7ypCIMui5s9Qv9R1OVTqrVxoL1jPbpvH4hrIp/rnmj5+YOhTQ++1kwmdZgT9ovRi6EF4Xhv/XGL0Sv6OLXYMu0BokjYOSDcBQfJI8xhKFP/HAlqCW8v5vqubBr8yn6maCexxiIDR376LnWmBBzQZtPEvx+L3mMAleOZKb1/XgM2EOnyWMFZJKt78UEQKpJHisk2TYmgM967JFk2z3kYcULwIwXgBkvADNeAGa8AMw8Qcwc6N55/eAh0cYmGaOzQtR/kOhQX+M6+/c23r+3RlT/i2ipTrSyRqw4F+CwMMbgANHQwG7jRywLw/wqDDNzI79xYPjqa2L262jjtYzaT0QT3xEbsck4MXUakgWOvUx08liy0ZPYEKNhel4Y6AZpgR7/8Tvq1wEQ+sMJN6Nh9kqwy+bWYwAM8elZovNv6xmlU7iLs280RNO9ls51os/h/8eBVQEig8Dt5OXUsNrno2tluZw0cI3qUXKONQHy9sYkVHqnjntLA2LnFTAv1gSA+zBhfIDvkfVO/B4xRgWZn4fbe2WAnGJFAAxn03+I7PtUXdzE90Sjl4ne+6L4d5nCigAyYyHPn7tFdPN30uJwX/qI6jtISkQZFVLdhd9SrtNPTrFSB6QZBAaYntsptpAyfvk+KYOCamVR/XrNtLqepduiFnkh3g4iIw6YLAhlOJmKwB9zaarhApr/MPREjAZVisSU1s/KYsGzhmKXClYEWLm/8xpV7btXhcv5I7lt2vtJFA3q/T07r1HopdG5l5xhxQVdn28YFn8kBJCBOZmiPHio1m5QuJzlu9ntXApgZwSsNYJslvGjtjrfm8Sq4neceFUtz3dZCzwW09Gqo2hreuPN7HZRnNqa1BP1x8lhczVNK+zT0TqkjYAF4e7Okxoo2PZX5K4IrhNpb/P8FTK2S1+TcUq1HpBFmquJYo1qEYU6RVarJE0c2ooL7C5IRwBZ5nJ9joyRtk5hA3YBdHqWzG1gBKgE/bzMaK5LqMIugKrbUDHu59/YWVRBsWhrsYZdANV5HBUXYGNlC9dFBW8LdgH6FQVYUnQvkQgm3NH8YuO7bM4LsWZBfT3qRY9OxRyJgJRz+Ij+FDPEQ1C3GVMiWAVQ7f31u/ncytxi4wdZTbRGgdcHnpYLD/FcwSrAoOKizfKfVAiIF4kBMPK+Opfe1iWsMUB1BJh2BRgBabSNAOiFqkXYbcNFUF9P+u82FGdWTcEmgGrvh0FUppB1kC073muXEaDq/21kIjLxV9tFAC7/n5X6tkUM0PH/dcP+P0v41fvkFBYBVHs/MD0CDmVsOzEdb7JgEYDT/8uq4rpj44NSjwDTc/CyzV1gxbH7Ac4F0PH/S4ZHAOaFZLiY+2nFuQA6/t9kQMTCz1CG66tbWvWS4VwAVf9vugAbel6efqrsYbKBcwFeVNz8ajobyTppw2F84FQAnfl/kwER6wJZcWdBc7e2KZwKoOP/TVakWb0f7md+kVhwOwI0BDCFyq42rt4PSiuAiRGAEXdK4ZQlV+8HTgVwefwHvR7nhbOA0FwBGDgTIM/Z3SLXUj2hOW1wR10eSrs7Ou9eTB3jo/dzuh/gTABdn35c8dhpM3BxOmeTuXs/cDoCdDY4qe7l32pbaZxL1jF+GXo/cLotBcWVTiZU3T7RMn8rHiijW9FgauP4Ef1TLdhHWgacCgAj6tYCqGKjU/DNbqxIkMYZNs7MpxmnLuhmwYJna1dbdzHjY42hDL4/wqkA6HWuDkAngRH0iYVjRkVwnoZO/0gsuLwpkw7OBcAtwlwvfESHxctmfMBSiOG0oStj4HCF7T3+RWARwIU7QK/HbWlqls52mYJtezqMj3v34C5VOveFy8Ll4QoTsJ8Txp0RsW8/Os2im2LCtSC1RIqLw3RldTVplOKkPEYDhMAPqttnune2rzTv5Y+WKdEem2ixkWqZYSeDSUp3qwIYNOrR7cBjcbOORxkvADNeAGa8AMx4AZjxAjATf5Ab0Tp5rJBk2/iD3PAwYo8Vkmyb9CjDGfLYIaCp1rdiAnT8S5PeDVkgoDuVCsWeJxwToHZ163m3Z8hjloDGk54vn5gFbT/5eZw8phifvZz8XPlA9qmRj8JRCumi+OkljzbbrvxM0qPMm9rIqY6FXZubVBUinMbzcP3jbuXA6Mh2kMx07KPJJLfj8Xg8Hg/4H+KfFYb2WM4MAAAAAElFTkSuQmCC" +VECTORIZER_ICON_PNG = "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAACXBIWXMAACxLAAAsSwGlPZapAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAboSURBVHgB7Z09bBxFFMffRoAvcQqbguBUxu4wCUikMCZ0TmQK4NLQJCJOlQIkokgEGhQ7NCFIKEhQuIqNnIaGMxRY2GVwmlggDHS+pIHELmIXMTEULPP3eeXz7e7szO7MvE1ufpKV03nuNn7/mfcxH7tEHo/H42lXgqwG1bGw65+/aTQM6K0gpJdCoi7ypCIMui5s9Qv9R1OVTqrVxoL1jPbpvH4hrIp/rnmj5+YOhTQ++1kwmdZgT9ovRi6EF4Xhv/XGL0Sv6OLXYMu0BokjYOSDcBQfJI8xhKFP/HAlqCW8v5vqubBr8yn6maCexxiIDR376LnWmBBzQZtPEvx+L3mMAleOZKb1/XgM2EOnyWMFZJKt78UEQKpJHisk2TYmgM967JFk2z3kYcULwIwXgBkvADNeAGa8AMw8Qcwc6N55/eAh0cYmGaOzQtR/kOhQX+M6+/c23r+3RlT/i2ipTrSyRqw4F+CwMMbgANHQwG7jRywLw/wqDDNzI79xYPjqa2L262jjtYzaT0QT3xEbsck4MXUakgWOvUx08liy0ZPYEKNhel4Y6AZpgR7/8Tvq1wEQ+sMJN6Nh9kqwy+bWYwAM8elZovNv6xmlU7iLs280RNO9ls51os/h/8eBVQEig8Dt5OXUsNrno2tluZw0cI3qUXKONQHy9sYkVHqnjntLA2LnFTAv1gSA+zBhfIDvkfVO/B4xRgWZn4fbe2WAnGJFAAxn03+I7PtUXdzE90Sjl4ne+6L4d5nCigAyYyHPn7tFdPN30uJwX/qI6jtISkQZFVLdhd9SrtNPTrFSB6QZBAaYntsptpAyfvk+KYOCamVR/XrNtLqepduiFnkh3g4iIw6YLAhlOJmKwB9zaarhApr/MPREjAZVisSU1s/KYsGzhmKXClYEWLm/8xpV7btXhcv5I7lt2vtJFA3q/T07r1HopdG5l5xhxQVdn28YFn8kBJCBOZmiPHio1m5QuJzlu9ntXApgZwSsNYJslvGjtjrfm8Sq4neceFUtz3dZCzwW09Gqo2hreuPN7HZRnNqa1BP1x8lhczVNK+zT0TqkjYAF4e7Okxoo2PZX5K4IrhNpb/P8FTK2S1+TcUq1HpBFmquJYo1qEYU6RVarJE0c2ooL7C5IRwBZ5nJ9joyRtk5hA3YBdHqWzG1gBKgE/bzMaK5LqMIugKrbUDHu59/YWVRBsWhrsYZdANV5HBUXYGNlC9dFBW8LdgH6FQVYUnQvkQgm3NH8YuO7bM4LsWZBfT3qRY9OxRyJgJRz+Ij+FDPEQ1C3GVMiWAVQ7f31u/ncytxi4wdZTbRGgdcHnpYLD/FcwSrAoOKizfKfVAiIF4kBMPK+Opfe1iWsMUB1BJh2BRgBabSNAOiFqkXYbcNFUF9P+u82FGdWTcEmgGrvh0FUppB1kC073muXEaDq/21kIjLxV9tFAC7/n5X6tkUM0PH/dcP+P0v41fvkFBYBVHs/MD0CDmVsOzEdb7JgEYDT/8uq4rpj44NSjwDTc/CyzV1gxbH7Ac4F0PH/S4ZHAOaFZLiY+2nFuQA6/t9kQMTCz1CG66tbWvWS4VwAVf9vugAbel6efqrsYbKBcwFeVNz8ajobyTppw2F84FQAnfl/kwER6wJZcWdBc7e2KZwKoOP/TVakWb0f7md+kVhwOwI0BDCFyq42rt4PSiuAiRGAEXdK4ZQlV+8HTgVwefwHvR7nhbOA0FwBGDgTIM/Z3SLXUj2hOW1wR10eSrs7Ou9eTB3jo/dzuh/gTABdn35c8dhpM3BxOmeTuXs/cDoCdDY4qe7l32pbaZxL1jF+GXo/cLotBcWVTiZU3T7RMn8rHiijW9FgauP4Ef1TLdhHWgacCgAj6tYCqGKjU/DNbqxIkMYZNs7MpxmnLuhmwYJna1dbdzHjY42hDL4/wqkA6HWuDkAngRH0iYVjRkVwnoZO/0gsuLwpkw7OBcAtwlwvfESHxctmfMBSiOG0oStj4HCF7T3+RWARwIU7QK/HbWlqls52mYJtezqMj3v34C5VOveFy8Ll4QoTsJ8Txp0RsW8/Os2im2LCtSC1RIqLw3RldTVplOKkPEYDhMAPqttnune2rzTv5Y+WKdEem2ixkWqZYSeDSUp3qwIYNOrR7cBjcbOORxkvADNeAGa8AMx4AZjxAjATf5Ab0Tp5rJBk2/iD3PAwYo8Vkmyb9CjDGfLYIaCp1rdiAnT8S5PeDVkgoDuVCsWeJxwToHZ163m3Z8hjloDGk54vn5gFbT/5eZw8phifvZz8XPlA9qmRj8JRCumi+OkljzbbrvxM0qPMm9rIqY6FXZubVBUinMbzcP3jbuXA6Mh2kMx07KPJJLfj8Xg8Hg/4H+KfFYb2WM4MAAAAAElFTkSuQmCC" # noqa: E501 diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index 9a6a49d8f443b..645f0861fa06b 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -193,7 +193,10 @@ def _convert_tool_response_to_str(tool_response: list[ToolInvokeMessage]) -> str response.type == ToolInvokeMessage.MessageType.IMAGE_LINK or response.type == ToolInvokeMessage.MessageType.IMAGE ): - result += "image has been created and sent to user already, you do not need to create it, just tell the user to check it now." + result += ( + "image has been created and sent to user already, you do not need to create it," + " just tell the user to check it now." + ) elif response.type == ToolInvokeMessage.MessageType.JSON: result += f"tool response: {json.dumps(response.message, ensure_ascii=False)}." else: diff --git a/api/core/tools/utils/feishu_api_utils.py b/api/core/tools/utils/feishu_api_utils.py index 7bb026a383a61..44803d7d6531f 100644 --- a/api/core/tools/utils/feishu_api_utils.py +++ b/api/core/tools/utils/feishu_api_utils.py @@ -89,7 +89,7 @@ def get_document_raw_content(self, document_id: str) -> dict: "content": "云文档\n多人实时协同,插入一切元素。不仅是在线文档,更是强大的创作和互动工具\n云文档:专为协作而生\n" } } - """ + """ # noqa: E501 params = { "document_id": document_id, } diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py index 1109ed7df2f8c..bf040d91d3987 100644 --- a/api/core/tools/utils/message_transformer.py +++ b/api/core/tools/utils/message_transformer.py @@ -43,7 +43,7 @@ def transform_tool_invoke_messages( result.append( ToolInvokeMessage( type=ToolInvokeMessage.MessageType.TEXT, - message=f"Failed to download image: {message.message}, you can try to download it yourself.", + message=f"Failed to download image: {message.message}, please try to download it manually.", meta=message.meta.copy() if message.meta is not None else {}, save_as=message.save_as, ) diff --git a/api/core/tools/utils/parser.py b/api/core/tools/utils/parser.py index 654c9acaf934b..210b84b29a72f 100644 --- a/api/core/tools/utils/parser.py +++ b/api/core/tools/utils/parser.py @@ -315,7 +315,8 @@ def auto_parse_to_tool_bundle( yaml_error = e if loaded_content is None: raise ToolApiSchemaError( - f"Invalid api schema, schema is neither json nor yaml. json error: {str(json_error)}, yaml error: {str(yaml_error)}" + f"Invalid api schema, schema is neither json nor yaml. json error: {str(json_error)}," + f" yaml error: {str(yaml_error)}" ) swagger_error = None @@ -355,5 +356,6 @@ def auto_parse_to_tool_bundle( openapi_plugin_error = e raise ToolApiSchemaError( - f"Invalid api schema, openapi error: {str(openapi_error)}, swagger error: {str(swagger_error)}, openapi plugin error: {str(openapi_plugin_error)}" + f"Invalid api schema, openapi error: {str(openapi_error)}, swagger error: {str(swagger_error)}," + f" openapi plugin error: {str(openapi_plugin_error)}" ) diff --git a/api/core/tools/utils/web_reader_tool.py b/api/core/tools/utils/web_reader_tool.py index 3639b5fff79b7..fc2f63a241b30 100644 --- a/api/core/tools/utils/web_reader_tool.py +++ b/api/core/tools/utils/web_reader_tool.py @@ -38,7 +38,8 @@ def page_result(text: str, cursor: int, max_length: int) -> str: def get_url(url: str, user_agent: str = None) -> str: """Fetch URL and return the contents as a string.""" headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/91.0.4472.124 Safari/537.36" } if user_agent: headers["User-Agent"] = user_agent diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index a07ba2f74038b..73164fff9ae5a 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -179,7 +179,8 @@ def _transform_result( ) else: raise ValueError( - f"Output {prefix}.{output_name} is not a valid array. make sure all elements are of the same type." + f"Output {prefix}.{output_name} is not a valid array." + f" make sure all elements are of the same type." ) elif isinstance(output_value, type(None)): pass @@ -201,7 +202,8 @@ def _transform_result( transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an object, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an object," + f" got {type(result.get(output_name))} instead." ) else: transformed_result[output_name] = self._transform_result( @@ -228,7 +230,8 @@ def _transform_result( transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array," + f" got {type(result.get(output_name))} instead." ) else: if len(result[output_name]) > dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH: @@ -248,7 +251,8 @@ def _transform_result( transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array," + f" got {type(result.get(output_name))} instead." ) else: if len(result[output_name]) > dify_config.CODE_MAX_STRING_ARRAY_LENGTH: @@ -268,7 +272,8 @@ def _transform_result( transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array," + f" got {type(result.get(output_name))} instead." ) else: if len(result[output_name]) > dify_config.CODE_MAX_OBJECT_ARRAY_LENGTH: @@ -283,7 +288,8 @@ def _transform_result( pass else: raise ValueError( - f"Output {prefix}{dot}{output_name}[{i}] is not an object, got {type(value)} instead at index {i}." + f"Output {prefix}{dot}{output_name}[{i}] is not an object," + f" got {type(value)} instead at index {i}." ) transformed_result[output_name] = [ diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index 53e8be64153d9..af55688a52c9b 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -128,11 +128,12 @@ def _fetch_dataset_retriever(self, node_data: KnowledgeRetrievalNodeData, query: weights = None elif node_data.multiple_retrieval_config.reranking_mode == "weighted_score": reranking_model = None + vector_setting = node_data.multiple_retrieval_config.weights.vector_setting weights = { "vector_setting": { - "vector_weight": node_data.multiple_retrieval_config.weights.vector_setting.vector_weight, - "embedding_provider_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_provider_name, - "embedding_model_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_model_name, + "vector_weight": vector_setting.vector_weight, + "embedding_provider_name": vector_setting.embedding_provider_name, + "embedding_model_name": vector_setting.embedding_model_name, }, "keyword_setting": { "keyword_weight": node_data.multiple_retrieval_config.weights.keyword_setting.keyword_weight diff --git a/api/core/workflow/nodes/parameter_extractor/prompts.py b/api/core/workflow/nodes/parameter_extractor/prompts.py index c63fded4d0257..58fcecc53b09f 100644 --- a/api/core/workflow/nodes/parameter_extractor/prompts.py +++ b/api/core/workflow/nodes/parameter_extractor/prompts.py @@ -23,7 +23,7 @@ To illustrate, if the task involves extracting a user's name and their request, your function call might look like this: Ensure your output follows a similar structure to examples. ### Final Output Produce well-formatted function calls in json without XML tags, as shown in the example. -""" +""" # noqa: E501 FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE = f"""extract structured information from context inside XML tags by calling the function {FUNCTION_CALLING_EXTRACTOR_NAME} with the correct parameters with structure inside XML tags. @@ -33,7 +33,7 @@ \x7bstructure\x7d -""" +""" # noqa: E501 FUNCTION_CALLING_EXTRACTOR_EXAMPLE = [ { @@ -55,7 +55,8 @@ }, }, "assistant": { - "text": "I need always call the function with the correct parameters. in this case, I need to call the function with the location parameter.", + "text": "I need always call the function with the correct parameters." + " in this case, I need to call the function with the location parameter.", "function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"location": "San Francisco"}}, }, }, @@ -72,7 +73,8 @@ }, }, "assistant": { - "text": "I need always call the function with the correct parameters. in this case, I need to call the function with the food parameter.", + "text": "I need always call the function with the correct parameters." + " in this case, I need to call the function with the food parameter.", "function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"food": "apple pie"}}, }, }, @@ -117,7 +119,7 @@ ### Answer I should always output a valid JSON object. Output nothing other than the JSON object. ```JSON -""" +""" # noqa: E501 CHAT_GENERATE_JSON_PROMPT = """You should always follow the instructions and output a valid JSON object. The structure of the JSON object you can found in the instructions. diff --git a/api/core/workflow/nodes/question_classifier/template_prompts.py b/api/core/workflow/nodes/question_classifier/template_prompts.py index 581f986922163..ce32b01aa4189 100644 --- a/api/core/workflow/nodes/question_classifier/template_prompts.py +++ b/api/core/workflow/nodes/question_classifier/template_prompts.py @@ -12,13 +12,13 @@ {histories} -""" +""" # noqa: E501 QUESTION_CLASSIFIER_USER_PROMPT_1 = """ { "input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."], "categories": [{"category_id":"f5660049-284f-41a7-b301-fd24176a711c","category_name":"Customer Service"},{"category_id":"8d007d06-f2c9-4be5-8ff6-cd4381c13c60","category_name":"Satisfaction"},{"category_id":"5fbbbb18-9843-466d-9b8e-b9bfbb9482c8","category_name":"Sales"},{"category_id":"23623c75-7184-4a2e-8226-466c2e4631e4","category_name":"Product"}], "classification_instructions": ["classify the text based on the feedback provided by customer"]} -""" +""" # noqa: E501 QUESTION_CLASSIFIER_ASSISTANT_PROMPT_1 = """ ```json @@ -32,7 +32,7 @@ {"input_text": ["bad service, slow to bring the food"], "categories": [{"category_id":"80fb86a0-4454-4bf5-924c-f253fdd83c02","category_name":"Food Quality"},{"category_id":"f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name":"Experience"},{"category_id":"cc771f63-74e7-4c61-882e-3eda9d8ba5d7","category_name":"Price"}], "classification_instructions": []} -""" +""" # noqa: E501 QUESTION_CLASSIFIER_ASSISTANT_PROMPT_2 = """ ```json @@ -73,4 +73,4 @@ ### User Input {{"input_text" : ["{input_text}"], "categories" : {categories},"classification_instruction" : ["{classification_instructions}"]}} ### Assistant Output -""" +""" # noqa: E501 diff --git a/api/libs/gmpy2_pkcs10aep_cipher.py b/api/libs/gmpy2_pkcs10aep_cipher.py index f89902c5e8de0..83f9c74e339e1 100644 --- a/api/libs/gmpy2_pkcs10aep_cipher.py +++ b/api/libs/gmpy2_pkcs10aep_cipher.py @@ -204,7 +204,8 @@ def decrypt(self, ciphertext): def new(key, hashAlgo=None, mgfunc=None, label=b"", randfunc=None): - """Return a cipher object :class:`PKCS1OAEP_Cipher` that can be used to perform PKCS#1 OAEP encryption or decryption. + """Return a cipher object :class:`PKCS1OAEP_Cipher` + that can be used to perform PKCS#1 OAEP encryption or decryption. :param key: The key object to use to encrypt or decrypt the message. diff --git a/api/models/provider.py b/api/models/provider.py index ff63e03a92e73..644915e781084 100644 --- a/api/models/provider.py +++ b/api/models/provider.py @@ -65,7 +65,10 @@ class Provider(db.Model): updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) def __repr__(self): - return f"" + return ( + f"" + ) @property def token_is_set(self): diff --git a/api/models/tools.py b/api/models/tools.py index 6b69a219b1518..861066a2d5c4e 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -62,7 +62,8 @@ class PublishedAppTool(db.Model): description = db.Column(db.Text, nullable=False) # llm_description of the tool, for LLM llm_description = db.Column(db.Text, nullable=False) - # query description, query will be seem as a parameter of the tool, to describe this parameter to llm, we need this field + # query description, query will be seem as a parameter of the tool, + # to describe this parameter to llm, we need this field query_description = db.Column(db.Text, nullable=False) # query name, the name of the query parameter query_name = db.Column(db.String(40), nullable=False) diff --git a/api/models/workflow.py b/api/models/workflow.py index d52749f0ff4e5..9c93ea4cea84e 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -246,7 +246,8 @@ def environment_variables(self, value: Sequence[Variable]): if any(var for var in value if not var.id): raise ValueError("environment variable require a unique id") - # Compare inputs and origin variables, if the value is HIDDEN_VALUE, use the origin variable value (only update `name`). + # Compare inputs and origin variables, + # if the value is HIDDEN_VALUE, use the origin variable value (only update `name`). origin_variables_dictionary = {var.id: var for var in self.environment_variables} for i, variable in enumerate(value): if variable.id in origin_variables_dictionary and variable.value == HIDDEN_VALUE: diff --git a/api/poetry.lock b/api/poetry.lock index 103423e5c76b8..6023f98e2ad5a 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -10388,4 +10388,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "2dbff415c3c9ca95c8dcfb59fc088ce2c0d00037c44f386a34c87c98e1d8b942" +content-hash = "8179c7e3f91b5a00054e26297040b1969f59b37cb9a707fbaa9c2ea419954718" diff --git a/api/pyproject.toml b/api/pyproject.toml index 3d100ebc58163..616794cf3abe5 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -27,7 +27,6 @@ select = [ "W605", # invalid-escape-sequence ] ignore = [ - "E501", # line-too-long "E402", # module-import-not-at-top-of-file "E711", # none-comparison "E712", # true-false-comparison @@ -68,16 +67,19 @@ ignore = [ "F401", # unused-import "F811", # redefined-while-unused ] -"tests/*" = [ - "F401", # unused-import - "F811", # redefined-while-unused -] "configs/*" = [ "N802", # invalid-function-name ] "libs/gmpy2_pkcs10aep_cipher.py" = [ "N803", # invalid-argument-name ] +"migrations/versions/*" = [ + "E501", # line-too-long +] +"tests/*" = [ + "F401", # unused-import + "F811", # redefined-while-unused +] [tool.ruff.format] exclude = [ @@ -270,4 +272,4 @@ optional = true [tool.poetry.group.lint.dependencies] dotenv-linter = "~0.5.0" -ruff = "~0.6.1" +ruff = "~0.6.4" diff --git a/api/services/tools/api_tools_manage_service.py b/api/services/tools/api_tools_manage_service.py index 3ded9c0989b53..6f6074f59640d 100644 --- a/api/services/tools/api_tools_manage_service.py +++ b/api/services/tools/api_tools_manage_service.py @@ -176,7 +176,8 @@ def get_api_tool_provider_remote_schema(user_id: str, tenant_id: str, url: str): get api tool provider remote schema """ headers = { - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0", + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0", "Accept": "*/*", } diff --git a/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py b/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py index 4138cdd40d822..025913cb17a80 100644 --- a/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py +++ b/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py @@ -40,7 +40,7 @@ def create_embeddings( usage=Usage(prompt_tokens=2, total_tokens=2), ) - embeddings = "VEfNvMLUnrwFleO8hcj9vEE/yrzyjOA84E1MvNfoCrxjrI+8sZUKvNgrBT17uY07gJ/IvNvhHLrUemc8KXXGumalIT3YKwU7ZsnbPMhATrwTt6u8JEwRPNMmCjxGREW7TRKvu6/MG7zAyDU8wXLkuuMDZDsXsL28zHzaOw0IArzOiMO8LtASvPKM4Dul5l+80V0bPGVDZ7wYNrI89ucsvJZdYztzRm+8P8ysOyGbc7zrdgK9sdiEPKQ8sbulKdq7KIgdvKIMDj25dNc8k0AXPBn/oLzrdgK8IXe5uz0Dvrt50V68tTjLO4ZOcjoG9x29oGfZufiwmzwMDXy8EL6ZPHvdx7nKjzE8+LCbPG22hTs3EZq7TM+0POrRzTxVZo084wPkO8Nak7z8cpw8pDwxvA2T8LvBC7C72fltvC8Atjp3fYE8JHDLvEYgC7xAdls8YiabPPkEeTzPUbK8gOLCPEBSIbyt5Oy8CpreusNakzywUhA824vLPHRlr7zAhTs7IZtzvHd9AT2xY/O6ok8IvOihqrql5l88K4EvuknWorvYKwW9iXkbvGMTRLw5qPG7onPCPLgNIzwAbK67ftbZPMxYILvAyDW9TLB0vIid1buzCKi7u+d0u8iDSLxNVam8PZyJPNxnETvVANw8Oi5mu9nVszzl65I7DIKNvLGVirxsMJE7tPXQu2PvCT1zRm87p1l9uyRMkbsdfqe8U52ePHRlr7wt9Mw8/C8ivTu02rwJFGq8tpoFPWnC7blWumq7sfy+vG1zCzy9Nlg8iv+PuvxT3DuLU228kVhoOkmTqDrv1kg8ocmTu1WpBzsKml48DzglvI8ECzxwTd27I+pWvIWkQ7xUR007GqlPPBFEDrzGECu865q8PI7BkDwNxYc8tgG6ullMSLsIajs84lk1PNLjD70mv648ZmInO2tnIjzvb5Q8o5KCPLo9xrwKMyq9QqGEvI8ECzxO2508ATUdPRAlTry5kxc8KVGMPJyBHjxIUC476KGqvIU9DzwX87c88PUIParrWrzdlzS/G3K+uzEw2TxB2BU86AhfPAMiRj2dK808a85WPPCft7xU4Bg95Q9NPDxZjzwrpek7yNkZvHa0EjyQ0nM6Nq9fuyjvUbsRq8I7CAMHO3VSWLyuauE7U1qkvPkEeTxs7ZY7B6FMO48Eizy75/S7ieBPvB07rTxmyVu8onPCO5rc6Tu7XIa7oEMfPYngT7u24vk7/+W5PE8eGDxJ1iI9t4cuvBGHiLyH1GY7jfghu+oUSDwa7Mk7iXmbuut2grrq8I2563v8uyofdTxRTrs44lm1vMeWnzukf6s7r4khvEKhhDyhyZO8G5Z4Oy56wTz4sBs81Zknuz3fg7wnJuO74n1vvASEADu98128gUl3vBtyvrtZCU47yep8u5FYaDx2G0e8a85WO5cmUjz3kds8qgqbPCUaerx50d67WKIZPI7BkDua3Om74vKAvL3zXbzXpRA9CI51vLo9xryKzXg7tXtFO9RWLTwnJuM854LqPEIs8zuO5cq8d8V1u9P0cjrQ++C8cGwdPDdUlLoOGeW8auEtu8Z337nlzFK8aRg/vFCkDD0nRSM879bIvKUFID1iStU8EL6ZvLufgLtKgNE7KVEMvJOnSzwahRU895HbvJiIjLvc8n88bmC0PPLP2rywM9C7jTscOoS3mjy/Znu7dhvHuu5Q1Dyq61o6CI71u09hkry0jhw8gb6IPI8EC7uoVAM8gs9rvGM3fjx2G8e81FYtu/ojubyYRRK72Riuu83elDtNNmk70/TyuzUFsbvgKZI7onNCvAehzLumr8679R6+urr6SztX2So8Bl5SOwSEgLv5NpA8LwC2PGPvibzJ6vw7H2tQvOtXwrzXpRC8j0z/uxwcbTy2vr+8VWYNu+t2ArwKmt68NKN2O3XrIzw9A747UU47vaavzjwU+qW8YBqyvE02aTyEt5o8cCmjOxtyPrxs7ZY775NOu+SJWLxMJQY8/bWWu6IMDrzSSsQ7GSPbPLlQnbpVzcE7Pka4PJ96sLycxJg8v/9GPO2HZTyeW3C8Vpawtx2iYTwWBg87/qI/OviwGzxyWcY7M9WNPIA4FD32C2e8tNGWPJ43trxCoYS8FGHavItTbbu7n4C80NemPLm30Ty1OMu7vG1pvG3aPztBP0o75Q/NPJhFEj2V9i683PL/O97+aLz6iu27cdPRum/mKLwvVgc89fqDu3LA+jvm2Ls8mVZ1PIuFBD3ZGK47Cpreut7+aLziWTU8XSEgPMvSKzzO73e5040+vBlmVTxS1K+8mQ4BPZZ8o7w8FpW6OR0DPSSPCz21Vwu99fqDOjMYiDy7XAY8oYaZO+aVwTyX49c84OaXOqdZfTunEQk7B8AMvMDs7zo/D6e8OP5CvN9gIzwNCII8FefOPE026TpzIjU8XsvOO+J9b7rkIiQ8is34O+e0AbxBpv67hcj9uiPq1jtCoQQ8JfY/u86nAz0Wkf28LnrBPJlW9Tt8P4K7BbSjO9grhbyAOJS8G3K+vJLe3LzXpZA7NQUxPJs+JDz6vAS8QHZbvYNVYDrj3yk88PWIPOJ97zuSIVc8ZUPnPMqPsbx2cZi7QfzPOxYGDz2hqtO6H2tQO543NjyFPY+7JRUAOt0wgDyJeZu8MpKTu6AApTtg1ze82JI5vKllZjvrV0I7HX6nu7vndDxg1ze8jwQLu1ZTNjuJvBU7BXGpvAP+C7xJk6g8j2u/vBABlLzlqBi8M9WNutRWLTx0zGM9sHbKPLoZDDtmyVu8tpqFOvPumjyuRqe87lBUvFU0drxs7Za8ejMZOzJPGbyC7qu863v8PDPVjTxJ1iI7Ca01PLuAQLuNHFy7At9LOwP+i7tYxlO80NemO9elkDx45LU8h9TmuzxZjzz/5bk8p84OurvndLwAkGi7XL9luCSzRTwMgg08vrxMPKIwyDwdomG8K6VpPGPvCTxkmTi7M/lHPGxUSzxwKSM8wQuwvOqtkzrLFSa8SbdivAMixjw2r9+7xWt2vAyCDT1NEi87B8CMvG1zi7xpwm27MrbNO9R6Z7xJt+K7jNnhu9ZiFrve/ug55CKkvCwHJLqsOr47+ortvPwvIr2v8NW8YmmVOE+FTLywUhA8MTBZvMiDyLtx8hG8OEE9vMDsbzroCF88DelBOobnPbx+b6U8sbnEOywr3ro93wO9dMzjup2xwbwnRaO7cRZMu8Z337vS44+7VpYwvFWphzxKgNE8L1aHPLPFLbunzo66zFggPN+jHbs7tFo8nW7HO9JKRLyoeD28Fm1DPGZip7u5dNe7KMsXvFnlkzxQpAw7MrZNPHpX0zwSyoK7ayQovPR0Dz3gClK8/juLPDjaCLvqrZO7a4vcO9HEzzvife88KKzXvDmocbwpMkw7t2huvaIMjjznguo7Gy/EOzxZjzoLuZ48qi5VvCjLFzuDmNo654LquyrXgDy7XAa8e7mNvJ7QAb0Rq8K7ojBIvBN0MTuOfha8GoUVveb89bxMsHS8jV9WPPKM4LyAOJS8me9AvZv7qbsbcr47tuL5uaXmXzweKNa7rkYnPINV4Lxcv+W8tVcLvI8oxbzvbxS7oYaZu9+jHT0cHO08c7uAPCSzRTywUhA85xu2u+wBcTuJvJU8PBYVusTghzsnAim8acJtPFQE0zzFIwI9C7meO1DIRry7XAY8MKpkPJZd47suN0e5JTm6u6BDn7zfx1e8AJDoOr9CQbwaQps7x/1TPLTRFryqLtU8JybjPIXI/Tz6I7k6mVb1PMWKNryd1fs8Ok0mPHt2kzy9Ep48TTZpvPS3ibwGOpi8Ns4fPBqFlbr3Kqc8+QR5vHLA+rt7uY289YXyPI6iULxL4gu8Tv/XuycCKbwCnFG8C7kevVG1b7zIXw68GoWVO4rNeDnrM4i8MxgIPUNLs7zSoJW86ScfO+rRzbs6Cqw8NxGautP0cjw0wjY8CGq7vAkU6rxKgNG5+uA+vJXXbrwKM6o86vCNOu+yjjoQAZS8xATCOQVxKbynzo68wxcZvMhATjzS4488ArsRvNEaobwRh4i7t4euvAvd2DwnAik8UtQvvBFEDrz4sJs79gtnvOknnzy+vEy8D3sfPLH8vjzmLo28KVGMvOtXwjvpapm8HBxtPH3K8Lu753Q8/l9FvLvn9DomoG48fET8u9zy/7wMpke8zmQJu3oU2TzlD828KteAPAwNfLu+mBI5ldduPNZDVjq+vEy8eEvqvDHJpLwUPaC6qi7VPABsLjwFcSm72sJcu+bYO7v41NW8RiALvYB7DjzL0is7qLs3us1FSbzaf2K8MnNTuxABFDzF8Wo838fXvOBNzDzre3w8afQEvQE1nbulBaC78zEVvG5B9LzH/VM82Riuuwu5nrwsByQ8Y6yPvHXro7yQ0nM8nStNPJkyOzwnJmM80m7+O1VmjTzqrZM8dhvHOyAQBbz3baG8KTJMPOlqmbxsVEs8Pq3suy56QbzUVq08X3CDvAE1nTwUHuA7hue9vF8tCbvwOAO6F7A9ugd9kryqLtW7auEtu9ONPryPa7+8o9r2O570OzyFpEO8ntCBPOqtk7sykhO7lC1AOw2TcLswhiq6vx4HvP5fRbwuesG7Mk8ZvA4Z5TlfcAM9DrIwPL//xrzMm5q8JEwRPHBsnbxL4gu8jyjFu99gozrkZZ483GeRPLuAwDuYiIw8iv8PvK5Gpzx+b6W87Yflu3NGbzyE+hQ8a4tcPItT7bsoy5e8L1YHvWQyBDwrga86kPEzvBQ9oDxtl0W8lwKYvGpIYrxQ5wY8AJDovOLyALyw3f489JjJvMdTpTkKMyo8V9mqvH3K8LpyNYy8JHDLOixu2LpQ54Y8Q0uzu8LUnrs0wrY84vIAveihqjwfihA8DIKNvLDd/jywM1C7FB7gOxsLirxAUqE7sulnvH3K8DkAkGg8jsGQvO+TzrynWf287CCxvK4Drbwg8UQ8JRr6vFEqAbskjwu76q2TPNP0cjopDhK8dVJYvFIXKrxLn5G8AK8oPAb3HbxbOXE8Bvedun5Q5ThHyjk8QdiVvBXDlLw0o/Y7aLGKupkOgTxKPdc81kNWPtUAXLxUR827X1FDPf47izxsEVE8akhiPIhaWzxYX5+7hT0PPSrXgLxQC0E8i4WEvKUp2jtCLHM8DcWHO768zLxnK5a89R6+vH9czrorpem73h0pvAnwr7yKzXi8gDgUPf47Czq9zyO8728UOf34EDy6PUY76OSkvKZIGr2ZDgE8gzEmPG3av7v77Ce7/oP/O3MiNTtas/w8x1OlO/D1CDvDfs27ll1jO2Ufrbv1hXK8WINZuxN0sbuxlYq8OYS3uia/rjyiTwi9O7TaO+/WyDyiDA49E7erO3fF9bj6I7k7qHi9O3SoKbyBSfc7drSSvGPvCT2pQay7t2huPGnC7byUCQY8CEaBu6rHoDhx8hE8/fgQvCjLl7zdeHS8x/3TO0Isc7tas3y8jwQLvUKhhDz+foU8fCDCPC+ZgTywD5Y7ZR8tOla66rtCCLm8gWg3vDoKrLxbWDE76SefPBkj2zrlqJi7pebfuv6Df7zWQ9a7lHA6PGDXtzzMv1Q8mtxpOwJ4lzxKGZ28mGnMPDw6z7yxY/O7m2Leu7juYjwvVge8zFigPGpIYjtWumo5xs2wOgyCjbxrZ6K8bbaFvKzTCbsks8W7C7mePIU9DzxQyEY8posUvAW0ozrHlh88CyBTPJRwursxySQ757SBuqcRCbwNCIK8EL6ZvIG+iLsIRgE8rF74vOJZtbuUcDq8r/DVPMpMt7sL3Vi8eWqquww/kzqj2vY5auGtu85kiTwMPxM66KGqvBIxNzuwUpA8v2b7u09C0rx7ms08NUirvFYQPLxKPdc68mimvP5fRTtoPPm7XuqOOgOJ+jxfLYm7u58AvXz8B72PR4W6ldfuuys+tbvYKwW7pkiaPLB2SjvKj7G875POvA6yML7qFEg9Eu68O6Up2rz77Kc84CmSPP6ivzz4sJu6/C+iOaUpWjwq14A84E3MOYB7Dr2d1Xu775NOvC6e+7spUYw8PzPhO5TGizt29ww9yNkZPY7lyrz020M7QRsQu3z8BzwkCZe79YXyO8jZmTzvGUM8HgQcO9kYrrzxBmy8hLeaPLYBOjz+oj88flBlO6GqUzuiMMi8fxlUvCr7ujz41NU8DA38PBeMAzx7uY28TTZpvFG1bzxtc4s89ucsPEereTwfipC82p4iPKtNFbzo5KQ7pcKlOW5gtDzO73c7B6FMOzRbgjxCXoo8v0JBOSl1RrwxDJ+7XWSaPD3Aw7sOsjA8tuJ5vKw6Pry5k5c8ZUNnvG/H6DyVTAA8Shkdvd7+aDvtpiW9qUGsPFTgmDwbcr68TTbpO1DnhryNX9a7mrivvIqpPjxsqhy81HrnOzv31Dvth+U6UtQvPBz4MrvtpqW84OYXvRz4sjxwkFe8zSGPuycCqbyFPY8818nKOw84JTy8bWk8USqBvBGHiLtosQo8BOs0u9skl7xQ54Y8uvrLPOknn7w705o8Jny0PAd9EjxhoKa8Iv2tu2M3/jtsVEs8DcUHPQSEADs3eE48GkKbupRR+rvdeHQ7Xy2JvO1jKz0xMFm8sWPzux07LbyrTZW7bdq/O6Pa9r0ahRW9CyDTOjSjdjyQ8bO8yaIIPfupLTz/CfQ7xndfvJs+JD0zPEK8KO/RvMpw8bwObzY7fm+lPJtiXrz5BHm8WmsIvKlBrLuDdKA7hWHJOgd9Ers0o/Y7nlvwu5NAl7u8BrW6utYRO2SZuDxyNYw8CppevAY6GDxVqQe9oGdZPFa6ary3RLS70NcmO2PQSb36ZrM86q2TPML42LwewaE8k2RRPDmocTsi/S29o/k2PHRlr7zjnC+8gHsOPUpcFzxtl8W6tuL5vHw/gry/2wy9yaIIvINV4Dx3fQG7ISFoPO7pnzwGXlK8HPiyPGAaMjzBC7A7MQyfu+eC6jyV1+67pDyxvBWkVLxrJKg754LqOScCKbwpUQy8KIgdOJDSc7zDfk08tLLWvNZDVjyh7c28ShmdvMnlgjs2NdS8ISHovP5+hbxGIIs8ayQouyKnXDzBcmS6zw44u86IQ7yl5l+7cngGvWvOVrsEhIC7yNkZPJODkbuAn0g8XN6lPOaVwbuTgxG8OR2DPAb3HTzlqJi8nUoNvCAVf73Mmxo9afSEu4FotzveHSk8c0ZvOMFOqjwP9Sq87iwavIEBg7xIUK68IbozuozZ4btg17c7vx4Hvarr2rtp9IQ8Rt0QO+1jqzyeNzY8kNLzO8sVpry98108OCL9uyisV7vhr4Y8FgaPvLFjczw42og8gWg3vPX6gzsNk/C83GeRPCUVgDy0jpw7yNkZu2VD5zvh93o81h+cuw3Fhzyl5t+86Y7TvHa0EjyzCCi7WmsIPIy1Jzy00Ra6NUiru50rTTx50d47/HKcO2wwETw0f7y8sFIQvNxnkbzS4w855pVBu9FdGzx9yvC6TM80vFQjkzy/Zvs7BhtYPLjKKLqPa787A/6LOyiInbzooSq8728UPIFJ97wq+7q8R6v5u1tYMbwdomG6iSPKPAb3HTx3oTu7fGO8POqtk7ze/ug84wNkPMnq/DsB8iK9ogwOu6lBrDznguo8NQUxvHKcwDo28tm7yNmZPN1UurxCoYS80m7+Oy+9OzzGzTC836MdvCDNCrtaawi7dVLYPEfKuTxzRm88cCmjOyXSBbwGOpi879ZIO8dTJbtqnrO8NMI2vR1+J7xwTV087umfPFG17zsC30s8oYaZPKllZrzZGK47zss9vP21FryZywa9bbYFPVNapDt2G0e7E3SxPMUjgry5dNc895Hbu0H8z7ueN7a7OccxPFhfH7vC1B48n3owvEhQLrzu6Z+8HTutvEBSITw6Taa5g1XgPCzEqbxfLYk9OYQ3vBlm1bvPUTI8wIU7PIy1pzyFyP07gzGmO3NGb7yS3ty7O5CguyEhaLyWoF28pmxUOaZImrz+g/87mnU1vFbsgTxvo668PFmPO2KNTzy09VC8LG5YPHhL6rsvJPC7kTQuvEGCxDlhB9s6u58AvfCAd7z0t4k7kVjoOCkOkrxMjDq8iPOmPL0SnrxsMJG7OEG9vCUa+rvx4rE7cpxAPDCGqjukf6u8TEnAvNn57TweBBw7JdKFvIy1p7vIg8i7" + embeddings = "VEfNvMLUnrwFleO8hcj9vEE/yrzyjOA84E1MvNfoCrxjrI+8sZUKvNgrBT17uY07gJ/IvNvhHLrUemc8KXXGumalIT3YKwU7ZsnbPMhATrwTt6u8JEwRPNMmCjxGREW7TRKvu6/MG7zAyDU8wXLkuuMDZDsXsL28zHzaOw0IArzOiMO8LtASvPKM4Dul5l+80V0bPGVDZ7wYNrI89ucsvJZdYztzRm+8P8ysOyGbc7zrdgK9sdiEPKQ8sbulKdq7KIgdvKIMDj25dNc8k0AXPBn/oLzrdgK8IXe5uz0Dvrt50V68tTjLO4ZOcjoG9x29oGfZufiwmzwMDXy8EL6ZPHvdx7nKjzE8+LCbPG22hTs3EZq7TM+0POrRzTxVZo084wPkO8Nak7z8cpw8pDwxvA2T8LvBC7C72fltvC8Atjp3fYE8JHDLvEYgC7xAdls8YiabPPkEeTzPUbK8gOLCPEBSIbyt5Oy8CpreusNakzywUhA824vLPHRlr7zAhTs7IZtzvHd9AT2xY/O6ok8IvOihqrql5l88K4EvuknWorvYKwW9iXkbvGMTRLw5qPG7onPCPLgNIzwAbK67ftbZPMxYILvAyDW9TLB0vIid1buzCKi7u+d0u8iDSLxNVam8PZyJPNxnETvVANw8Oi5mu9nVszzl65I7DIKNvLGVirxsMJE7tPXQu2PvCT1zRm87p1l9uyRMkbsdfqe8U52ePHRlr7wt9Mw8/C8ivTu02rwJFGq8tpoFPWnC7blWumq7sfy+vG1zCzy9Nlg8iv+PuvxT3DuLU228kVhoOkmTqDrv1kg8ocmTu1WpBzsKml48DzglvI8ECzxwTd27I+pWvIWkQ7xUR007GqlPPBFEDrzGECu865q8PI7BkDwNxYc8tgG6ullMSLsIajs84lk1PNLjD70mv648ZmInO2tnIjzvb5Q8o5KCPLo9xrwKMyq9QqGEvI8ECzxO2508ATUdPRAlTry5kxc8KVGMPJyBHjxIUC476KGqvIU9DzwX87c88PUIParrWrzdlzS/G3K+uzEw2TxB2BU86AhfPAMiRj2dK808a85WPPCft7xU4Bg95Q9NPDxZjzwrpek7yNkZvHa0EjyQ0nM6Nq9fuyjvUbsRq8I7CAMHO3VSWLyuauE7U1qkvPkEeTxs7ZY7B6FMO48Eizy75/S7ieBPvB07rTxmyVu8onPCO5rc6Tu7XIa7oEMfPYngT7u24vk7/+W5PE8eGDxJ1iI9t4cuvBGHiLyH1GY7jfghu+oUSDwa7Mk7iXmbuut2grrq8I2563v8uyofdTxRTrs44lm1vMeWnzukf6s7r4khvEKhhDyhyZO8G5Z4Oy56wTz4sBs81Zknuz3fg7wnJuO74n1vvASEADu98128gUl3vBtyvrtZCU47yep8u5FYaDx2G0e8a85WO5cmUjz3kds8qgqbPCUaerx50d67WKIZPI7BkDua3Om74vKAvL3zXbzXpRA9CI51vLo9xryKzXg7tXtFO9RWLTwnJuM854LqPEIs8zuO5cq8d8V1u9P0cjrQ++C8cGwdPDdUlLoOGeW8auEtu8Z337nlzFK8aRg/vFCkDD0nRSM879bIvKUFID1iStU8EL6ZvLufgLtKgNE7KVEMvJOnSzwahRU895HbvJiIjLvc8n88bmC0PPLP2rywM9C7jTscOoS3mjy/Znu7dhvHuu5Q1Dyq61o6CI71u09hkry0jhw8gb6IPI8EC7uoVAM8gs9rvGM3fjx2G8e81FYtu/ojubyYRRK72Riuu83elDtNNmk70/TyuzUFsbvgKZI7onNCvAehzLumr8679R6+urr6SztX2So8Bl5SOwSEgLv5NpA8LwC2PGPvibzJ6vw7H2tQvOtXwrzXpRC8j0z/uxwcbTy2vr+8VWYNu+t2ArwKmt68NKN2O3XrIzw9A747UU47vaavzjwU+qW8YBqyvE02aTyEt5o8cCmjOxtyPrxs7ZY775NOu+SJWLxMJQY8/bWWu6IMDrzSSsQ7GSPbPLlQnbpVzcE7Pka4PJ96sLycxJg8v/9GPO2HZTyeW3C8Vpawtx2iYTwWBg87/qI/OviwGzxyWcY7M9WNPIA4FD32C2e8tNGWPJ43trxCoYS8FGHavItTbbu7n4C80NemPLm30Ty1OMu7vG1pvG3aPztBP0o75Q/NPJhFEj2V9i683PL/O97+aLz6iu27cdPRum/mKLwvVgc89fqDu3LA+jvm2Ls8mVZ1PIuFBD3ZGK47Cpreut7+aLziWTU8XSEgPMvSKzzO73e5040+vBlmVTxS1K+8mQ4BPZZ8o7w8FpW6OR0DPSSPCz21Vwu99fqDOjMYiDy7XAY8oYaZO+aVwTyX49c84OaXOqdZfTunEQk7B8AMvMDs7zo/D6e8OP5CvN9gIzwNCII8FefOPE026TpzIjU8XsvOO+J9b7rkIiQ8is34O+e0AbxBpv67hcj9uiPq1jtCoQQ8JfY/u86nAz0Wkf28LnrBPJlW9Tt8P4K7BbSjO9grhbyAOJS8G3K+vJLe3LzXpZA7NQUxPJs+JDz6vAS8QHZbvYNVYDrj3yk88PWIPOJ97zuSIVc8ZUPnPMqPsbx2cZi7QfzPOxYGDz2hqtO6H2tQO543NjyFPY+7JRUAOt0wgDyJeZu8MpKTu6AApTtg1ze82JI5vKllZjvrV0I7HX6nu7vndDxg1ze8jwQLu1ZTNjuJvBU7BXGpvAP+C7xJk6g8j2u/vBABlLzlqBi8M9WNutRWLTx0zGM9sHbKPLoZDDtmyVu8tpqFOvPumjyuRqe87lBUvFU0drxs7Za8ejMZOzJPGbyC7qu863v8PDPVjTxJ1iI7Ca01PLuAQLuNHFy7At9LOwP+i7tYxlO80NemO9elkDx45LU8h9TmuzxZjzz/5bk8p84OurvndLwAkGi7XL9luCSzRTwMgg08vrxMPKIwyDwdomG8K6VpPGPvCTxkmTi7M/lHPGxUSzxwKSM8wQuwvOqtkzrLFSa8SbdivAMixjw2r9+7xWt2vAyCDT1NEi87B8CMvG1zi7xpwm27MrbNO9R6Z7xJt+K7jNnhu9ZiFrve/ug55CKkvCwHJLqsOr47+ortvPwvIr2v8NW8YmmVOE+FTLywUhA8MTBZvMiDyLtx8hG8OEE9vMDsbzroCF88DelBOobnPbx+b6U8sbnEOywr3ro93wO9dMzjup2xwbwnRaO7cRZMu8Z337vS44+7VpYwvFWphzxKgNE8L1aHPLPFLbunzo66zFggPN+jHbs7tFo8nW7HO9JKRLyoeD28Fm1DPGZip7u5dNe7KMsXvFnlkzxQpAw7MrZNPHpX0zwSyoK7ayQovPR0Dz3gClK8/juLPDjaCLvqrZO7a4vcO9HEzzvife88KKzXvDmocbwpMkw7t2huvaIMjjznguo7Gy/EOzxZjzoLuZ48qi5VvCjLFzuDmNo654LquyrXgDy7XAa8e7mNvJ7QAb0Rq8K7ojBIvBN0MTuOfha8GoUVveb89bxMsHS8jV9WPPKM4LyAOJS8me9AvZv7qbsbcr47tuL5uaXmXzweKNa7rkYnPINV4Lxcv+W8tVcLvI8oxbzvbxS7oYaZu9+jHT0cHO08c7uAPCSzRTywUhA85xu2u+wBcTuJvJU8PBYVusTghzsnAim8acJtPFQE0zzFIwI9C7meO1DIRry7XAY8MKpkPJZd47suN0e5JTm6u6BDn7zfx1e8AJDoOr9CQbwaQps7x/1TPLTRFryqLtU8JybjPIXI/Tz6I7k6mVb1PMWKNryd1fs8Ok0mPHt2kzy9Ep48TTZpvPS3ibwGOpi8Ns4fPBqFlbr3Kqc8+QR5vHLA+rt7uY289YXyPI6iULxL4gu8Tv/XuycCKbwCnFG8C7kevVG1b7zIXw68GoWVO4rNeDnrM4i8MxgIPUNLs7zSoJW86ScfO+rRzbs6Cqw8NxGautP0cjw0wjY8CGq7vAkU6rxKgNG5+uA+vJXXbrwKM6o86vCNOu+yjjoQAZS8xATCOQVxKbynzo68wxcZvMhATjzS4488ArsRvNEaobwRh4i7t4euvAvd2DwnAik8UtQvvBFEDrz4sJs79gtnvOknnzy+vEy8D3sfPLH8vjzmLo28KVGMvOtXwjvpapm8HBxtPH3K8Lu753Q8/l9FvLvn9DomoG48fET8u9zy/7wMpke8zmQJu3oU2TzlD828KteAPAwNfLu+mBI5ldduPNZDVjq+vEy8eEvqvDHJpLwUPaC6qi7VPABsLjwFcSm72sJcu+bYO7v41NW8RiALvYB7DjzL0is7qLs3us1FSbzaf2K8MnNTuxABFDzF8Wo838fXvOBNzDzre3w8afQEvQE1nbulBaC78zEVvG5B9LzH/VM82Riuuwu5nrwsByQ8Y6yPvHXro7yQ0nM8nStNPJkyOzwnJmM80m7+O1VmjTzqrZM8dhvHOyAQBbz3baG8KTJMPOlqmbxsVEs8Pq3suy56QbzUVq08X3CDvAE1nTwUHuA7hue9vF8tCbvwOAO6F7A9ugd9kryqLtW7auEtu9ONPryPa7+8o9r2O570OzyFpEO8ntCBPOqtk7sykhO7lC1AOw2TcLswhiq6vx4HvP5fRbwuesG7Mk8ZvA4Z5TlfcAM9DrIwPL//xrzMm5q8JEwRPHBsnbxL4gu8jyjFu99gozrkZZ483GeRPLuAwDuYiIw8iv8PvK5Gpzx+b6W87Yflu3NGbzyE+hQ8a4tcPItT7bsoy5e8L1YHvWQyBDwrga86kPEzvBQ9oDxtl0W8lwKYvGpIYrxQ5wY8AJDovOLyALyw3f489JjJvMdTpTkKMyo8V9mqvH3K8LpyNYy8JHDLOixu2LpQ54Y8Q0uzu8LUnrs0wrY84vIAveihqjwfihA8DIKNvLDd/jywM1C7FB7gOxsLirxAUqE7sulnvH3K8DkAkGg8jsGQvO+TzrynWf287CCxvK4Drbwg8UQ8JRr6vFEqAbskjwu76q2TPNP0cjopDhK8dVJYvFIXKrxLn5G8AK8oPAb3HbxbOXE8Bvedun5Q5ThHyjk8QdiVvBXDlLw0o/Y7aLGKupkOgTxKPdc81kNWPtUAXLxUR827X1FDPf47izxsEVE8akhiPIhaWzxYX5+7hT0PPSrXgLxQC0E8i4WEvKUp2jtCLHM8DcWHO768zLxnK5a89R6+vH9czrorpem73h0pvAnwr7yKzXi8gDgUPf47Czq9zyO8728UOf34EDy6PUY76OSkvKZIGr2ZDgE8gzEmPG3av7v77Ce7/oP/O3MiNTtas/w8x1OlO/D1CDvDfs27ll1jO2Ufrbv1hXK8WINZuxN0sbuxlYq8OYS3uia/rjyiTwi9O7TaO+/WyDyiDA49E7erO3fF9bj6I7k7qHi9O3SoKbyBSfc7drSSvGPvCT2pQay7t2huPGnC7byUCQY8CEaBu6rHoDhx8hE8/fgQvCjLl7zdeHS8x/3TO0Isc7tas3y8jwQLvUKhhDz+foU8fCDCPC+ZgTywD5Y7ZR8tOla66rtCCLm8gWg3vDoKrLxbWDE76SefPBkj2zrlqJi7pebfuv6Df7zWQ9a7lHA6PGDXtzzMv1Q8mtxpOwJ4lzxKGZ28mGnMPDw6z7yxY/O7m2Leu7juYjwvVge8zFigPGpIYjtWumo5xs2wOgyCjbxrZ6K8bbaFvKzTCbsks8W7C7mePIU9DzxQyEY8posUvAW0ozrHlh88CyBTPJRwursxySQ757SBuqcRCbwNCIK8EL6ZvIG+iLsIRgE8rF74vOJZtbuUcDq8r/DVPMpMt7sL3Vi8eWqquww/kzqj2vY5auGtu85kiTwMPxM66KGqvBIxNzuwUpA8v2b7u09C0rx7ms08NUirvFYQPLxKPdc68mimvP5fRTtoPPm7XuqOOgOJ+jxfLYm7u58AvXz8B72PR4W6ldfuuys+tbvYKwW7pkiaPLB2SjvKj7G875POvA6yML7qFEg9Eu68O6Up2rz77Kc84CmSPP6ivzz4sJu6/C+iOaUpWjwq14A84E3MOYB7Dr2d1Xu775NOvC6e+7spUYw8PzPhO5TGizt29ww9yNkZPY7lyrz020M7QRsQu3z8BzwkCZe79YXyO8jZmTzvGUM8HgQcO9kYrrzxBmy8hLeaPLYBOjz+oj88flBlO6GqUzuiMMi8fxlUvCr7ujz41NU8DA38PBeMAzx7uY28TTZpvFG1bzxtc4s89ucsPEereTwfipC82p4iPKtNFbzo5KQ7pcKlOW5gtDzO73c7B6FMOzRbgjxCXoo8v0JBOSl1RrwxDJ+7XWSaPD3Aw7sOsjA8tuJ5vKw6Pry5k5c8ZUNnvG/H6DyVTAA8Shkdvd7+aDvtpiW9qUGsPFTgmDwbcr68TTbpO1DnhryNX9a7mrivvIqpPjxsqhy81HrnOzv31Dvth+U6UtQvPBz4MrvtpqW84OYXvRz4sjxwkFe8zSGPuycCqbyFPY8818nKOw84JTy8bWk8USqBvBGHiLtosQo8BOs0u9skl7xQ54Y8uvrLPOknn7w705o8Jny0PAd9EjxhoKa8Iv2tu2M3/jtsVEs8DcUHPQSEADs3eE48GkKbupRR+rvdeHQ7Xy2JvO1jKz0xMFm8sWPzux07LbyrTZW7bdq/O6Pa9r0ahRW9CyDTOjSjdjyQ8bO8yaIIPfupLTz/CfQ7xndfvJs+JD0zPEK8KO/RvMpw8bwObzY7fm+lPJtiXrz5BHm8WmsIvKlBrLuDdKA7hWHJOgd9Ers0o/Y7nlvwu5NAl7u8BrW6utYRO2SZuDxyNYw8CppevAY6GDxVqQe9oGdZPFa6ary3RLS70NcmO2PQSb36ZrM86q2TPML42LwewaE8k2RRPDmocTsi/S29o/k2PHRlr7zjnC+8gHsOPUpcFzxtl8W6tuL5vHw/gry/2wy9yaIIvINV4Dx3fQG7ISFoPO7pnzwGXlK8HPiyPGAaMjzBC7A7MQyfu+eC6jyV1+67pDyxvBWkVLxrJKg754LqOScCKbwpUQy8KIgdOJDSc7zDfk08tLLWvNZDVjyh7c28ShmdvMnlgjs2NdS8ISHovP5+hbxGIIs8ayQouyKnXDzBcmS6zw44u86IQ7yl5l+7cngGvWvOVrsEhIC7yNkZPJODkbuAn0g8XN6lPOaVwbuTgxG8OR2DPAb3HTzlqJi8nUoNvCAVf73Mmxo9afSEu4FotzveHSk8c0ZvOMFOqjwP9Sq87iwavIEBg7xIUK68IbozuozZ4btg17c7vx4Hvarr2rtp9IQ8Rt0QO+1jqzyeNzY8kNLzO8sVpry98108OCL9uyisV7vhr4Y8FgaPvLFjczw42og8gWg3vPX6gzsNk/C83GeRPCUVgDy0jpw7yNkZu2VD5zvh93o81h+cuw3Fhzyl5t+86Y7TvHa0EjyzCCi7WmsIPIy1Jzy00Ra6NUiru50rTTx50d47/HKcO2wwETw0f7y8sFIQvNxnkbzS4w855pVBu9FdGzx9yvC6TM80vFQjkzy/Zvs7BhtYPLjKKLqPa787A/6LOyiInbzooSq8728UPIFJ97wq+7q8R6v5u1tYMbwdomG6iSPKPAb3HTx3oTu7fGO8POqtk7ze/ug84wNkPMnq/DsB8iK9ogwOu6lBrDznguo8NQUxvHKcwDo28tm7yNmZPN1UurxCoYS80m7+Oy+9OzzGzTC836MdvCDNCrtaawi7dVLYPEfKuTxzRm88cCmjOyXSBbwGOpi879ZIO8dTJbtqnrO8NMI2vR1+J7xwTV087umfPFG17zsC30s8oYaZPKllZrzZGK47zss9vP21FryZywa9bbYFPVNapDt2G0e7E3SxPMUjgry5dNc895Hbu0H8z7ueN7a7OccxPFhfH7vC1B48n3owvEhQLrzu6Z+8HTutvEBSITw6Taa5g1XgPCzEqbxfLYk9OYQ3vBlm1bvPUTI8wIU7PIy1pzyFyP07gzGmO3NGb7yS3ty7O5CguyEhaLyWoF28pmxUOaZImrz+g/87mnU1vFbsgTxvo668PFmPO2KNTzy09VC8LG5YPHhL6rsvJPC7kTQuvEGCxDlhB9s6u58AvfCAd7z0t4k7kVjoOCkOkrxMjDq8iPOmPL0SnrxsMJG7OEG9vCUa+rvx4rE7cpxAPDCGqjukf6u8TEnAvNn57TweBBw7JdKFvIy1p7vIg8i7" # noqa: E501 data = [] for i, text in enumerate(input): diff --git a/api/tests/unit_tests/core/app/segments/test_segment.py b/api/tests/unit_tests/core/app/segments/test_segment.py index 7cc339d21200a..73002623f03dd 100644 --- a/api/tests/unit_tests/core/app/segments/test_segment.py +++ b/api/tests/unit_tests/core/app/segments/test_segment.py @@ -21,9 +21,9 @@ def test_segment_group_to_text(): segments_group = parser.convert_template(template=template, variable_pool=variable_pool) assert segments_group.text == "Hello, fake-user-id! Your query is fake-user-query. And your key is fake-secret-key." - assert ( - segments_group.log - == f"Hello, fake-user-id! Your query is fake-user-query. And your key is {encrypter.obfuscated_token('fake-secret-key')}." + assert segments_group.log == ( + f"Hello, fake-user-id! Your query is fake-user-query." + f" And your key is {encrypter.obfuscated_token('fake-secret-key')}." ) From 40fb4d16ef0f5e8dd984c205b9941df68a445b0f Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Thu, 12 Sep 2024 15:50:49 +0800 Subject: [PATCH 05/25] chore: refurbish Python code by applying refurb linter rules (#8296) --- api/controllers/console/admin.py | 24 +++++++------------ api/controllers/console/app/audio.py | 8 ++----- api/controllers/console/auth/oauth.py | 2 +- api/controllers/console/datasets/datasets.py | 7 +----- api/controllers/console/explore/audio.py | 8 ++----- .../console/workspace/tool_providers.py | 2 +- api/controllers/service_api/app/audio.py | 8 ++----- api/controllers/web/audio.py | 8 ++----- api/core/agent/cot_agent_runner.py | 2 +- api/core/agent/fc_agent_runner.py | 2 +- api/core/app/apps/base_app_runner.py | 8 +++---- api/core/extension/extensible.py | 4 ++-- api/core/memory/token_buffer_memory.py | 2 +- .../__base/large_language_model.py | 2 +- .../model_providers/anthropic/llm/llm.py | 2 +- .../azure_ai_studio/llm/llm.py | 2 +- .../model_providers/azure_openai/llm/llm.py | 10 ++++---- .../model_providers/azure_openai/tts/tts.py | 2 +- .../model_providers/bedrock/llm/llm.py | 8 +++---- .../model_providers/chatglm/llm/llm.py | 4 ++-- .../model_providers/localai/llm/llm.py | 6 ++--- .../model_providers/minimax/llm/llm.py | 4 ++-- .../ollama/text_embedding/text_embedding.py | 2 +- .../model_providers/openai/llm/llm.py | 8 +++---- .../model_providers/openai/tts/tts.py | 2 +- .../openai_api_compatible/llm/llm.py | 4 ++-- .../model_providers/openllm/llm/llm.py | 4 ++-- .../openllm/llm/openllm_generate.py | 2 +- .../model_providers/replicate/llm/llm.py | 2 +- .../sagemaker/rerank/rerank.py | 3 ++- .../model_providers/sagemaker/tts/tts.py | 2 +- .../model_providers/spark/llm/llm.py | 2 +- .../tencent/speech2text/flash_recognizer.py | 3 ++- .../model_providers/tongyi/llm/llm.py | 4 ++-- .../model_providers/upstage/llm/llm.py | 6 ++--- .../model_providers/vertex_ai/llm/llm.py | 4 ++-- .../legacy/volc_sdk/base/auth.py | 3 ++- .../legacy/volc_sdk/base/util.py | 3 ++- .../volcengine_maas/llm/llm.py | 8 +++---- .../model_providers/wenxin/llm/llm.py | 6 ++--- .../wenxin/text_embedding/text_embedding.py | 2 +- .../model_providers/xinference/llm/llm.py | 6 ++--- .../model_providers/xinference/tts/tts.py | 2 +- .../model_providers/zhipuai/llm/llm.py | 4 ++-- .../zhipuai/zhipuai_sdk/core/_http_client.py | 4 +++- api/core/ops/langfuse_trace/langfuse_trace.py | 22 ++++++++--------- .../ops/langsmith_trace/langsmith_trace.py | 10 ++++---- api/core/ops/ops_trace_manager.py | 15 ++++++------ api/core/prompt/simple_prompt_transform.py | 6 ++--- .../rag/datasource/keyword/keyword_base.py | 2 +- .../vdb/analyticdb/analyticdb_vector.py | 4 ++-- .../datasource/vdb/chroma/chroma_vector.py | 2 +- .../vdb/elasticsearch/elasticsearch_vector.py | 6 ++--- .../datasource/vdb/milvus/milvus_vector.py | 2 +- .../datasource/vdb/myscale/myscale_vector.py | 2 +- .../vdb/opensearch/opensearch_vector.py | 2 +- .../rag/datasource/vdb/oracle/oraclevector.py | 4 ++-- .../datasource/vdb/pgvecto_rs/pgvecto_rs.py | 2 +- .../rag/datasource/vdb/pgvector/pgvector.py | 2 +- .../datasource/vdb/qdrant/qdrant_vector.py | 2 +- .../rag/datasource/vdb/relyt/relyt_vector.py | 2 +- .../datasource/vdb/tencent/tencent_vector.py | 2 +- .../datasource/vdb/tidb_vector/tidb_vector.py | 2 +- api/core/rag/datasource/vdb/vector_base.py | 2 +- api/core/rag/datasource/vdb/vector_factory.py | 2 +- .../vdb/weaviate/weaviate_vector.py | 2 +- api/core/rag/extractor/blob/blob.py | 8 +++---- api/core/rag/extractor/extract_processor.py | 7 +++--- api/core/rag/extractor/helpers.py | 4 ++-- api/core/rag/extractor/markdown_extractor.py | 7 +++--- api/core/rag/extractor/text_extractor.py | 7 +++--- api/core/rag/extractor/word_extractor.py | 2 +- api/core/rag/retrieval/dataset_retrieval.py | 10 ++++---- api/core/tools/provider/api_tool_provider.py | 2 +- .../aws/tools/sagemaker_text_rerank.py | 3 ++- .../builtin/hap/tools/get_worksheet_fields.py | 2 +- .../hap/tools/get_worksheet_pivot_data.py | 2 +- .../hap/tools/list_worksheet_records.py | 2 +- .../searchapi/tools/youtube_transcripts.py | 2 +- .../dataset_multi_retriever_tool.py | 6 ++--- .../dataset_retriever_tool.py | 6 ++--- api/core/tools/utils/web_reader_tool.py | 6 ++--- api/core/tools/utils/yaml_utils.py | 2 +- .../workflow/graph_engine/entities/graph.py | 5 ++-- api/core/workflow/nodes/code/code_node.py | 8 +++---- .../workflow/nodes/if_else/if_else_node.py | 2 +- api/core/workflow/nodes/llm/llm_node.py | 2 +- .../question_classifier_node.py | 2 +- ...aset_join_when_app_model_config_updated.py | 3 +-- ...oin_when_app_published_workflow_updated.py | 3 +-- api/extensions/storage/local_storage.py | 8 +++---- api/models/dataset.py | 4 ++-- api/models/model.py | 10 ++++---- api/pyproject.toml | 3 +++ api/services/account_service.py | 2 +- api/services/app_dsl_service.py | 12 ++++------ api/services/dataset_service.py | 8 ++----- api/services/hit_testing_service.py | 6 ++--- api/services/model_provider_service.py | 6 ++--- api/services/recommended_app_service.py | 8 +++---- api/services/tag_service.py | 2 +- .../tools/builtin_tools_manage_service.py | 4 ++-- api/services/website_service.py | 4 ++-- api/services/workflow/workflow_converter.py | 8 +++---- .../model_runtime/__mock/huggingface_tei.py | 2 +- 105 files changed, 220 insertions(+), 276 deletions(-) diff --git a/api/controllers/console/admin.py b/api/controllers/console/admin.py index a4ceec26620dc..f78ea9b2881ba 100644 --- a/api/controllers/console/admin.py +++ b/api/controllers/console/admin.py @@ -60,23 +60,15 @@ def post(self): site = app.site if not site: - desc = args["desc"] if args["desc"] else "" - copy_right = args["copyright"] if args["copyright"] else "" - privacy_policy = args["privacy_policy"] if args["privacy_policy"] else "" - custom_disclaimer = args["custom_disclaimer"] if args["custom_disclaimer"] else "" + desc = args["desc"] or "" + copy_right = args["copyright"] or "" + privacy_policy = args["privacy_policy"] or "" + custom_disclaimer = args["custom_disclaimer"] or "" else: - desc = site.description if site.description else args["desc"] if args["desc"] else "" - copy_right = site.copyright if site.copyright else args["copyright"] if args["copyright"] else "" - privacy_policy = ( - site.privacy_policy if site.privacy_policy else args["privacy_policy"] if args["privacy_policy"] else "" - ) - custom_disclaimer = ( - site.custom_disclaimer - if site.custom_disclaimer - else args["custom_disclaimer"] - if args["custom_disclaimer"] - else "" - ) + desc = site.description or args["desc"] or "" + copy_right = site.copyright or args["copyright"] or "" + privacy_policy = site.privacy_policy or args["privacy_policy"] or "" + custom_disclaimer = site.custom_disclaimer or args["custom_disclaimer"] or "" recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args["app_id"]).first() diff --git a/api/controllers/console/app/audio.py b/api/controllers/console/app/audio.py index 437a6a7b3865b..7332758e83c60 100644 --- a/api/controllers/console/app/audio.py +++ b/api/controllers/console/app/audio.py @@ -99,14 +99,10 @@ def post(self, app_model): and app_model.workflow.features_dict ): text_to_speech = app_model.workflow.features_dict.get("text_to_speech") - voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice") + voice = args.get("voice") or text_to_speech.get("voice") else: try: - voice = ( - args.get("voice") - if args.get("voice") - else app_model.app_model_config.text_to_speech_dict.get("voice") - ) + voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice") except Exception: voice = None response = AudioService.transcript_tts(app_model=app_model, text=text, message_id=message_id, voice=voice) diff --git a/api/controllers/console/auth/oauth.py b/api/controllers/console/auth/oauth.py index ae1b49f3ecf44..1df0f5de9d5e8 100644 --- a/api/controllers/console/auth/oauth.py +++ b/api/controllers/console/auth/oauth.py @@ -101,7 +101,7 @@ def _generate_account(provider: str, user_info: OAuthUserInfo): if not account: # Create account - account_name = user_info.name if user_info.name else "Dify" + account_name = user_info.name or "Dify" account = RegisterService.register( email=user_info.email, name=account_name, password=None, open_id=user_info.id, provider=provider ) diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 6ccacc78eeb29..08603bfc218d3 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -550,12 +550,7 @@ class DatasetApiBaseUrlApi(Resource): @login_required @account_initialization_required def get(self): - return { - "api_base_url": ( - dify_config.SERVICE_API_URL if dify_config.SERVICE_API_URL else request.host_url.rstrip("/") - ) - + "/v1" - } + return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"} class DatasetRetrievalSettingApi(Resource): diff --git a/api/controllers/console/explore/audio.py b/api/controllers/console/explore/audio.py index 71cb060ecc188..2eb7e0449037d 100644 --- a/api/controllers/console/explore/audio.py +++ b/api/controllers/console/explore/audio.py @@ -86,14 +86,10 @@ def post(self, installed_app): and app_model.workflow.features_dict ): text_to_speech = app_model.workflow.features_dict.get("text_to_speech") - voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice") + voice = args.get("voice") or text_to_speech.get("voice") else: try: - voice = ( - args.get("voice") - if args.get("voice") - else app_model.app_model_config.text_to_speech_dict.get("voice") - ) + voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice") except Exception: voice = None response = AudioService.transcript_tts(app_model=app_model, message_id=message_id, voice=voice, text=text) diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index c41a898fdca27..d2a17b133b6d1 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -327,7 +327,7 @@ def post(self): return ApiToolManageService.test_api_tool_preview( current_user.current_tenant_id, - args["provider_name"] if args["provider_name"] else "", + args["provider_name"] or "", args["tool_name"], args["credentials"], args["parameters"], diff --git a/api/controllers/service_api/app/audio.py b/api/controllers/service_api/app/audio.py index 85aab047a7823..8d8ca8d78c0d5 100644 --- a/api/controllers/service_api/app/audio.py +++ b/api/controllers/service_api/app/audio.py @@ -84,14 +84,10 @@ def post(self, app_model: App, end_user: EndUser): and app_model.workflow.features_dict ): text_to_speech = app_model.workflow.features_dict.get("text_to_speech") - voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice") + voice = args.get("voice") or text_to_speech.get("voice") else: try: - voice = ( - args.get("voice") - if args.get("voice") - else app_model.app_model_config.text_to_speech_dict.get("voice") - ) + voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice") except Exception: voice = None response = AudioService.transcript_tts( diff --git a/api/controllers/web/audio.py b/api/controllers/web/audio.py index d062d2893b7ab..49c467dbe18bb 100644 --- a/api/controllers/web/audio.py +++ b/api/controllers/web/audio.py @@ -83,14 +83,10 @@ def post(self, app_model: App, end_user): and app_model.workflow.features_dict ): text_to_speech = app_model.workflow.features_dict.get("text_to_speech") - voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice") + voice = args.get("voice") or text_to_speech.get("voice") else: try: - voice = ( - args.get("voice") - if args.get("voice") - else app_model.app_model_config.text_to_speech_dict.get("voice") - ) + voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice") except Exception: voice = None diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 29b428a7c3246..ebe04bf26000f 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -256,7 +256,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): model=model_instance.model, prompt_messages=prompt_messages, message=AssistantPromptMessage(content=final_answer), - usage=llm_usage["usage"] if llm_usage["usage"] else LLMUsage.empty_usage(), + usage=llm_usage["usage"] or LLMUsage.empty_usage(), system_fingerprint="", ) ), diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index 27cf561e3d61a..13164e0bfca21 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -298,7 +298,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): model=model_instance.model, prompt_messages=prompt_messages, message=AssistantPromptMessage(content=final_answer), - usage=llm_usage["usage"] if llm_usage["usage"] else LLMUsage.empty_usage(), + usage=llm_usage["usage"] or LLMUsage.empty_usage(), system_fingerprint="", ) ), diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index aadb43ad399bb..bd2be18bfd2d5 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -161,7 +161,7 @@ def organize_prompt_messages( app_mode=AppMode.value_of(app_record.mode), prompt_template_entity=prompt_template_entity, inputs=inputs, - query=query if query else "", + query=query or "", files=files, context=context, memory=memory, @@ -189,7 +189,7 @@ def organize_prompt_messages( prompt_messages = prompt_transform.get_prompt( prompt_template=prompt_template, inputs=inputs, - query=query if query else "", + query=query or "", files=files, context=context, memory_config=memory_config, @@ -238,7 +238,7 @@ def direct_output( model=app_generate_entity.model_conf.model, prompt_messages=prompt_messages, message=AssistantPromptMessage(content=text), - usage=usage if usage else LLMUsage.empty_usage(), + usage=usage or LLMUsage.empty_usage(), ), ), PublishFrom.APPLICATION_MANAGER, @@ -351,7 +351,7 @@ def moderation_for_inputs( tenant_id=tenant_id, app_config=app_generate_entity.app_config, inputs=inputs, - query=query if query else "", + query=query or "", message_id=message_id, trace_manager=app_generate_entity.trace_manager, ) diff --git a/api/core/extension/extensible.py b/api/core/extension/extensible.py index f1a49c4921906..97dbaf2026e79 100644 --- a/api/core/extension/extensible.py +++ b/api/core/extension/extensible.py @@ -3,6 +3,7 @@ import json import logging import os +from pathlib import Path from typing import Any, Optional from pydantic import BaseModel @@ -63,8 +64,7 @@ def scan_extensions(cls): builtin_file_path = os.path.join(subdir_path, "__builtin__") if os.path.exists(builtin_file_path): - with open(builtin_file_path, encoding="utf-8") as f: - position = int(f.read().strip()) + position = int(Path(builtin_file_path).read_text(encoding="utf-8").strip()) position_map[extension_name] = position if (extension_name + ".py") not in file_names: diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index 54b1d8212bd61..a14d237a12a39 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -39,7 +39,7 @@ def get_history_prompt_messages( ) if message_limit and message_limit > 0: - message_limit = message_limit if message_limit <= 500 else 500 + message_limit = min(message_limit, 500) else: message_limit = 500 diff --git a/api/core/model_runtime/model_providers/__base/large_language_model.py b/api/core/model_runtime/model_providers/__base/large_language_model.py index e8789ec7df182..ba88cc1f384fc 100644 --- a/api/core/model_runtime/model_providers/__base/large_language_model.py +++ b/api/core/model_runtime/model_providers/__base/large_language_model.py @@ -449,7 +449,7 @@ def _invoke_result_generator( model=real_model, prompt_messages=prompt_messages, message=prompt_message, - usage=usage if usage else LLMUsage.empty_usage(), + usage=usage or LLMUsage.empty_usage(), system_fingerprint=system_fingerprint, ), credentials=credentials, diff --git a/api/core/model_runtime/model_providers/anthropic/llm/llm.py b/api/core/model_runtime/model_providers/anthropic/llm/llm.py index 0cb66842e7e26..ff741e02402b1 100644 --- a/api/core/model_runtime/model_providers/anthropic/llm/llm.py +++ b/api/core/model_runtime/model_providers/anthropic/llm/llm.py @@ -409,7 +409,7 @@ def _handle_chat_generate_stream_response( ), ) elif isinstance(chunk, ContentBlockDeltaEvent): - chunk_text = chunk.delta.text if chunk.delta.text else "" + chunk_text = chunk.delta.text or "" full_assistant_content += chunk_text # transform assistant message to prompt message diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/llm/llm.py b/api/core/model_runtime/model_providers/azure_ai_studio/llm/llm.py index 42eae6c1e50fe..516ef8b295ef8 100644 --- a/api/core/model_runtime/model_providers/azure_ai_studio/llm/llm.py +++ b/api/core/model_runtime/model_providers/azure_ai_studio/llm/llm.py @@ -213,7 +213,7 @@ def _invoke_result_generator( model=real_model, prompt_messages=prompt_messages, message=prompt_message, - usage=usage if usage else LLMUsage.empty_usage(), + usage=usage or LLMUsage.empty_usage(), system_fingerprint=system_fingerprint, ), credentials=credentials, diff --git a/api/core/model_runtime/model_providers/azure_openai/llm/llm.py b/api/core/model_runtime/model_providers/azure_openai/llm/llm.py index 3b9fb52e24ec9..f0033ea051b50 100644 --- a/api/core/model_runtime/model_providers/azure_openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/azure_openai/llm/llm.py @@ -225,7 +225,7 @@ def _handle_generate_stream_response( continue # transform assistant message to prompt message - text = delta.text if delta.text else "" + text = delta.text or "" assistant_prompt_message = AssistantPromptMessage(content=text) full_text += text @@ -400,15 +400,13 @@ def _handle_chat_generate_stream_response( continue # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content if delta.delta.content else "", tool_calls=tool_calls - ) + assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls) - full_assistant_content += delta.delta.content if delta.delta.content else "" + full_assistant_content += delta.delta.content or "" real_model = chunk.model system_fingerprint = chunk.system_fingerprint - completion += delta.delta.content if delta.delta.content else "" + completion += delta.delta.content or "" yield LLMResultChunk( model=real_model, diff --git a/api/core/model_runtime/model_providers/azure_openai/tts/tts.py b/api/core/model_runtime/model_providers/azure_openai/tts/tts.py index bbad7264673d3..8db044b24dd27 100644 --- a/api/core/model_runtime/model_providers/azure_openai/tts/tts.py +++ b/api/core/model_runtime/model_providers/azure_openai/tts/tts.py @@ -84,7 +84,7 @@ def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str ) for i in range(len(sentences)) ] - for index, future in enumerate(futures): + for future in futures: yield from future.result().__enter__().iter_bytes(1024) else: diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py index 239ae52b4cd50..953ac0741f1c4 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py +++ b/api/core/model_runtime/model_providers/bedrock/llm/llm.py @@ -331,10 +331,10 @@ def _handle_converse_stream_response( elif "contentBlockDelta" in chunk: delta = chunk["contentBlockDelta"]["delta"] if "text" in delta: - chunk_text = delta["text"] if delta["text"] else "" + chunk_text = delta["text"] or "" full_assistant_content += chunk_text assistant_prompt_message = AssistantPromptMessage( - content=chunk_text if chunk_text else "", + content=chunk_text or "", ) index = chunk["contentBlockDelta"]["contentBlockIndex"] yield LLMResultChunk( @@ -751,7 +751,7 @@ def _handle_generate_response( elif model_prefix == "cohere": output = response_body.get("generations")[0].get("text") prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, output if output else "") + completion_tokens = self.get_num_tokens(model, credentials, output or "") else: raise ValueError(f"Got unknown model prefix {model_prefix} when handling block response") @@ -828,7 +828,7 @@ def _handle_generate_stream_response( # transform assistant message to prompt message assistant_prompt_message = AssistantPromptMessage( - content=content_delta if content_delta else "", + content=content_delta or "", ) index += 1 diff --git a/api/core/model_runtime/model_providers/chatglm/llm/llm.py b/api/core/model_runtime/model_providers/chatglm/llm/llm.py index 114acc1ec3a94..b3eeb48e226e1 100644 --- a/api/core/model_runtime/model_providers/chatglm/llm/llm.py +++ b/api/core/model_runtime/model_providers/chatglm/llm/llm.py @@ -302,11 +302,11 @@ def _handle_chat_generate_stream_response( if delta.delta.function_call: function_calls = [delta.delta.function_call] - assistant_message_tool_calls = self._extract_response_tool_calls(function_calls if function_calls else []) + assistant_message_tool_calls = self._extract_response_tool_calls(function_calls or []) # transform assistant message to prompt message assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_message_tool_calls + content=delta.delta.content or "", tool_calls=assistant_message_tool_calls ) if delta.finish_reason is not None: diff --git a/api/core/model_runtime/model_providers/localai/llm/llm.py b/api/core/model_runtime/model_providers/localai/llm/llm.py index 94c03efe7b5b3..e7295355f627d 100644 --- a/api/core/model_runtime/model_providers/localai/llm/llm.py +++ b/api/core/model_runtime/model_providers/localai/llm/llm.py @@ -511,7 +511,7 @@ def _handle_completion_generate_stream_response( delta = chunk.choices[0] # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=delta.text if delta.text else "", tool_calls=[]) + assistant_prompt_message = AssistantPromptMessage(content=delta.text or "", tool_calls=[]) if delta.finish_reason is not None: # temp_assistant_prompt_message is used to calculate usage @@ -578,11 +578,11 @@ def _handle_chat_generate_stream_response( if delta.delta.function_call: function_calls = [delta.delta.function_call] - assistant_message_tool_calls = self._extract_response_tool_calls(function_calls if function_calls else []) + assistant_message_tool_calls = self._extract_response_tool_calls(function_calls or []) # transform assistant message to prompt message assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_message_tool_calls + content=delta.delta.content or "", tool_calls=assistant_message_tool_calls ) if delta.finish_reason is not None: diff --git a/api/core/model_runtime/model_providers/minimax/llm/llm.py b/api/core/model_runtime/model_providers/minimax/llm/llm.py index 76ed704a75540..4250c40cfb94b 100644 --- a/api/core/model_runtime/model_providers/minimax/llm/llm.py +++ b/api/core/model_runtime/model_providers/minimax/llm/llm.py @@ -211,7 +211,7 @@ def _handle_chat_generate_stream_response( index=0, message=AssistantPromptMessage(content=message.content, tool_calls=[]), usage=usage, - finish_reason=message.stop_reason if message.stop_reason else None, + finish_reason=message.stop_reason or None, ), ) elif message.function_call: @@ -244,7 +244,7 @@ def _handle_chat_generate_stream_response( delta=LLMResultChunkDelta( index=0, message=AssistantPromptMessage(content=message.content, tool_calls=[]), - finish_reason=message.stop_reason if message.stop_reason else None, + finish_reason=message.stop_reason or None, ), ) diff --git a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py index 2cfb79b241222..b4c61d8a6dc79 100644 --- a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py @@ -65,7 +65,7 @@ def _invoke( inputs = [] used_tokens = 0 - for i, text in enumerate(texts): + for text in texts: # Here token count is only an approximation based on the GPT2 tokenizer num_tokens = self._get_num_tokens_by_gpt2(text) diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py index 578687b5d3a81..c4b381df41b68 100644 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai/llm/llm.py @@ -508,7 +508,7 @@ def _handle_generate_stream_response( continue # transform assistant message to prompt message - text = delta.text if delta.text else "" + text = delta.text or "" assistant_prompt_message = AssistantPromptMessage(content=text) full_text += text @@ -760,11 +760,9 @@ def _handle_chat_generate_stream_response( final_tool_calls.extend(tool_calls) # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content if delta.delta.content else "", tool_calls=tool_calls - ) + assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls) - full_assistant_content += delta.delta.content if delta.delta.content else "" + full_assistant_content += delta.delta.content or "" if has_finish_reason: final_chunk = LLMResultChunk( diff --git a/api/core/model_runtime/model_providers/openai/tts/tts.py b/api/core/model_runtime/model_providers/openai/tts/tts.py index bfb443698ca8d..b50b43199feee 100644 --- a/api/core/model_runtime/model_providers/openai/tts/tts.py +++ b/api/core/model_runtime/model_providers/openai/tts/tts.py @@ -88,7 +88,7 @@ def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str ) for i in range(len(sentences)) ] - for index, future in enumerate(futures): + for future in futures: yield from future.result().__enter__().iter_bytes(1024) else: diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py index 41ca163a92ff8..5a8a754f72422 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py @@ -179,9 +179,9 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode features = [] function_calling_type = credentials.get("function_calling_type", "no_call") - if function_calling_type in ["function_call"]: + if function_calling_type == "function_call": features.append(ModelFeature.TOOL_CALL) - elif function_calling_type in ["tool_call"]: + elif function_calling_type == "tool_call": features.append(ModelFeature.MULTI_TOOL_CALL) stream_function_calling = credentials.get("stream_function_calling", "supported") diff --git a/api/core/model_runtime/model_providers/openllm/llm/llm.py b/api/core/model_runtime/model_providers/openllm/llm/llm.py index b560afca39e9d..34b4de796206d 100644 --- a/api/core/model_runtime/model_providers/openllm/llm/llm.py +++ b/api/core/model_runtime/model_providers/openllm/llm/llm.py @@ -179,7 +179,7 @@ def _handle_chat_generate_stream_response( index=0, message=AssistantPromptMessage(content=message.content, tool_calls=[]), usage=usage, - finish_reason=message.stop_reason if message.stop_reason else None, + finish_reason=message.stop_reason or None, ), ) else: @@ -189,7 +189,7 @@ def _handle_chat_generate_stream_response( delta=LLMResultChunkDelta( index=0, message=AssistantPromptMessage(content=message.content, tool_calls=[]), - finish_reason=message.stop_reason if message.stop_reason else None, + finish_reason=message.stop_reason or None, ), ) diff --git a/api/core/model_runtime/model_providers/openllm/llm/openllm_generate.py b/api/core/model_runtime/model_providers/openllm/llm/openllm_generate.py index e754479ec0103..351dcced15375 100644 --- a/api/core/model_runtime/model_providers/openllm/llm/openllm_generate.py +++ b/api/core/model_runtime/model_providers/openllm/llm/openllm_generate.py @@ -106,7 +106,7 @@ def generate( timeout = 120 data = { - "stop": stop if stop else [], + "stop": stop or [], "prompt": "\n".join([message.content for message in prompt_messages]), "llm_config": default_llm_config, } diff --git a/api/core/model_runtime/model_providers/replicate/llm/llm.py b/api/core/model_runtime/model_providers/replicate/llm/llm.py index 87c8bc4a91cda..e77372cae23b6 100644 --- a/api/core/model_runtime/model_providers/replicate/llm/llm.py +++ b/api/core/model_runtime/model_providers/replicate/llm/llm.py @@ -214,7 +214,7 @@ def _handle_generate_stream_response( index += 1 - assistant_prompt_message = AssistantPromptMessage(content=output if output else "") + assistant_prompt_message = AssistantPromptMessage(content=output or "") if index < prediction_output_length: yield LLMResultChunk( diff --git a/api/core/model_runtime/model_providers/sagemaker/rerank/rerank.py b/api/core/model_runtime/model_providers/sagemaker/rerank/rerank.py index 7e7614055c76b..959dff6a21c6d 100644 --- a/api/core/model_runtime/model_providers/sagemaker/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/sagemaker/rerank/rerank.py @@ -1,5 +1,6 @@ import json import logging +import operator from typing import Any, Optional import boto3 @@ -94,7 +95,7 @@ def _invoke( for idx in range(len(scores)): candidate_docs.append({"content": docs[idx], "score": scores[idx]}) - sorted(candidate_docs, key=lambda x: x["score"], reverse=True) + sorted(candidate_docs, key=operator.itemgetter("score"), reverse=True) line = 3 rerank_documents = [] diff --git a/api/core/model_runtime/model_providers/sagemaker/tts/tts.py b/api/core/model_runtime/model_providers/sagemaker/tts/tts.py index 3dd5f8f64cfc2..a22bd6dd6ef4f 100644 --- a/api/core/model_runtime/model_providers/sagemaker/tts/tts.py +++ b/api/core/model_runtime/model_providers/sagemaker/tts/tts.py @@ -260,7 +260,7 @@ def _tts_invoke_streaming(self, model_type: str, payload: dict, sagemaker_endpoi for payload in payloads ] - for index, future in enumerate(futures): + for future in futures: resp = future.result() audio_bytes = requests.get(resp.get("s3_presign_url")).content for i in range(0, len(audio_bytes), 1024): diff --git a/api/core/model_runtime/model_providers/spark/llm/llm.py b/api/core/model_runtime/model_providers/spark/llm/llm.py index 0c42acf5aa6f8..57193dc0316b7 100644 --- a/api/core/model_runtime/model_providers/spark/llm/llm.py +++ b/api/core/model_runtime/model_providers/spark/llm/llm.py @@ -220,7 +220,7 @@ def _handle_generate_stream_response( delta = content assistant_prompt_message = AssistantPromptMessage( - content=delta if delta else "", + content=delta or "", ) prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) diff --git a/api/core/model_runtime/model_providers/tencent/speech2text/flash_recognizer.py b/api/core/model_runtime/model_providers/tencent/speech2text/flash_recognizer.py index 9fd4a45f45348..c3c21793e8eb3 100644 --- a/api/core/model_runtime/model_providers/tencent/speech2text/flash_recognizer.py +++ b/api/core/model_runtime/model_providers/tencent/speech2text/flash_recognizer.py @@ -1,6 +1,7 @@ import base64 import hashlib import hmac +import operator import time import requests @@ -127,7 +128,7 @@ def _sign(self, signstr, secret_key): return s def _build_req_with_signature(self, secret_key, params, header): - query = sorted(params.items(), key=lambda d: d[0]) + query = sorted(params.items(), key=operator.itemgetter(0)) signstr = self._format_sign_string(query) signature = self._sign(signstr, secret_key) header["Authorization"] = signature diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index db0b2deaa5ab2..ca708d931c09f 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -4,6 +4,7 @@ import uuid from collections.abc import Generator from http import HTTPStatus +from pathlib import Path from typing import Optional, Union, cast from dashscope import Generation, MultiModalConversation, get_tokenizer @@ -454,8 +455,7 @@ def _save_base64_image_to_file(self, base64_image: str) -> str: file_path = os.path.join(temp_dir, f"{uuid.uuid4()}.{mime_type.split('/')[1]}") - with open(file_path, "wb") as image_file: - image_file.write(base64.b64decode(encoded_string)) + Path(file_path).write_bytes(base64.b64decode(encoded_string)) return f"file://{file_path}" diff --git a/api/core/model_runtime/model_providers/upstage/llm/llm.py b/api/core/model_runtime/model_providers/upstage/llm/llm.py index 9646e209b2636..74524e81e281f 100644 --- a/api/core/model_runtime/model_providers/upstage/llm/llm.py +++ b/api/core/model_runtime/model_providers/upstage/llm/llm.py @@ -368,11 +368,9 @@ def _handle_chat_generate_stream_response( final_tool_calls.extend(tool_calls) # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content if delta.delta.content else "", tool_calls=tool_calls - ) + assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls) - full_assistant_content += delta.delta.content if delta.delta.content else "" + full_assistant_content += delta.delta.content or "" if has_finish_reason: final_chunk = LLMResultChunk( diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py index 1b9931d2c3194..dad5002f357ff 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py @@ -231,10 +231,10 @@ def _handle_claude_stream_response( ), ) elif isinstance(chunk, ContentBlockDeltaEvent): - chunk_text = chunk.delta.text if chunk.delta.text else "" + chunk_text = chunk.delta.text or "" full_assistant_content += chunk_text assistant_prompt_message = AssistantPromptMessage( - content=chunk_text if chunk_text else "", + content=chunk_text or "", ) index = chunk.index yield LLMResultChunk( diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/auth.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/auth.py index 7435720252af2..97c77de8d32c3 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/auth.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/auth.py @@ -1,5 +1,6 @@ # coding : utf-8 import datetime +from itertools import starmap import pytz @@ -48,7 +49,7 @@ def __init__(self): self.authorization = "" def __str__(self): - return "\n".join(["{}:{}".format(*item) for item in self.__dict__.items()]) + return "\n".join(list(starmap("{}:{}".format, self.__dict__.items()))) class Credentials: diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/util.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/util.py index 44f99599653c4..178d63714e9cf 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/util.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/util.py @@ -1,5 +1,6 @@ import hashlib import hmac +import operator from functools import reduce from urllib.parse import quote @@ -40,4 +41,4 @@ def to_hex(content): if len(hv) == 1: hv = "0" + hv lst.append(hv) - return reduce(lambda x, y: x + y, lst) + return reduce(operator.add, lst) diff --git a/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py b/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py index c25851fc4589e..f8bf8fb821978 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py @@ -174,9 +174,7 @@ def _handle_stream_chat_response() -> Generator: prompt_messages=prompt_messages, delta=LLMResultChunkDelta( index=index, - message=AssistantPromptMessage( - content=message["content"] if message["content"] else "", tool_calls=[] - ), + message=AssistantPromptMessage(content=message["content"] or "", tool_calls=[]), usage=usage, finish_reason=choice.get("finish_reason"), ), @@ -208,7 +206,7 @@ def _handle_chat_response() -> LLMResult: model=model, prompt_messages=prompt_messages, message=AssistantPromptMessage( - content=message["content"] if message["content"] else "", + content=message["content"] or "", tool_calls=tool_calls, ), usage=self._calc_response_usage( @@ -284,7 +282,7 @@ def _handle_chat_response(resp: ChatCompletion) -> LLMResult: model=model, prompt_messages=prompt_messages, message=AssistantPromptMessage( - content=message.content if message.content else "", + content=message.content or "", tool_calls=tool_calls, ), usage=self._calc_response_usage( diff --git a/api/core/model_runtime/model_providers/wenxin/llm/llm.py b/api/core/model_runtime/model_providers/wenxin/llm/llm.py index 8feedbfe55f08..ec3556f7da8a4 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/llm.py +++ b/api/core/model_runtime/model_providers/wenxin/llm/llm.py @@ -199,7 +199,7 @@ def _generate( secret_key=credentials["secret_key"], ) - user = user if user else "ErnieBotDefault" + user = user or "ErnieBotDefault" # convert prompt messages to baichuan messages messages = [ @@ -289,7 +289,7 @@ def _handle_chat_generate_stream_response( index=0, message=AssistantPromptMessage(content=message.content, tool_calls=[]), usage=usage, - finish_reason=message.stop_reason if message.stop_reason else None, + finish_reason=message.stop_reason or None, ), ) else: @@ -299,7 +299,7 @@ def _handle_chat_generate_stream_response( delta=LLMResultChunkDelta( index=0, message=AssistantPromptMessage(content=message.content, tool_calls=[]), - finish_reason=message.stop_reason if message.stop_reason else None, + finish_reason=message.stop_reason or None, ), ) diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py index db323ae4c1c0c..4d6f6dccd0cf7 100644 --- a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py @@ -85,7 +85,7 @@ def _invoke( api_key = credentials["api_key"] secret_key = credentials["secret_key"] embedding: TextEmbedding = self._create_text_embedding(api_key, secret_key) - user = user if user else "ErnieBotDefault" + user = user or "ErnieBotDefault" context_size = self._get_context_size(model, credentials) max_chunks = self._get_max_chunks(model, credentials) diff --git a/api/core/model_runtime/model_providers/xinference/llm/llm.py b/api/core/model_runtime/model_providers/xinference/llm/llm.py index 7ad236880b4c3..f8f6c6b12d337 100644 --- a/api/core/model_runtime/model_providers/xinference/llm/llm.py +++ b/api/core/model_runtime/model_providers/xinference/llm/llm.py @@ -589,7 +589,7 @@ def _handle_chat_generate_response( # convert tool call to assistant message tool call tool_calls = assistant_message.tool_calls - assistant_prompt_message_tool_calls = self._extract_response_tool_calls(tool_calls if tool_calls else []) + assistant_prompt_message_tool_calls = self._extract_response_tool_calls(tool_calls or []) function_call = assistant_message.function_call if function_call: assistant_prompt_message_tool_calls += [self._extract_response_function_call(function_call)] @@ -652,7 +652,7 @@ def _handle_chat_stream_response( # transform assistant message to prompt message assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_message_tool_calls + content=delta.delta.content or "", tool_calls=assistant_message_tool_calls ) if delta.finish_reason is not None: @@ -749,7 +749,7 @@ def _handle_completion_stream_response( delta = chunk.choices[0] # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=delta.text if delta.text else "", tool_calls=[]) + assistant_prompt_message = AssistantPromptMessage(content=delta.text or "", tool_calls=[]) if delta.finish_reason is not None: # temp_assistant_prompt_message is used to calculate usage diff --git a/api/core/model_runtime/model_providers/xinference/tts/tts.py b/api/core/model_runtime/model_providers/xinference/tts/tts.py index 60db15130299d..d29e8ed8f9c18 100644 --- a/api/core/model_runtime/model_providers/xinference/tts/tts.py +++ b/api/core/model_runtime/model_providers/xinference/tts/tts.py @@ -215,7 +215,7 @@ def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str for i in range(len(sentences)) ] - for index, future in enumerate(futures): + for future in futures: response = future.result() for i in range(0, len(response), 1024): yield response[i : i + 1024] diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py index 29b873fd06624..f76e51fee9e43 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py +++ b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py @@ -414,10 +414,10 @@ def _handle_generate_stream_response( # transform assistant message to prompt message assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_tool_calls + content=delta.delta.content or "", tool_calls=assistant_tool_calls ) - full_assistant_content += delta.delta.content if delta.delta.content else "" + full_assistant_content += delta.delta.content or "" if delta.finish_reason is not None and chunk.usage is not None: completion_tokens = chunk.usage.completion_tokens diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py index 48eeb37c41ba7..5f7f6d04f20d9 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py @@ -30,6 +30,8 @@ def _merge_map(map1: Mapping, map2: Mapping) -> Mapping: return {key: val for key, val in merged.items() if val is not None} +from itertools import starmap + from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT ZHIPUAI_DEFAULT_TIMEOUT = httpx.Timeout(timeout=300.0, connect=8.0) @@ -159,7 +161,7 @@ def _primitive_value_to_str(val) -> str: return [(key, str_data)] def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]: - items = flatten([self._object_to_formdata(k, v) for k, v in data.items()]) + items = flatten(list(starmap(self._object_to_formdata, data.items()))) serialized: dict[str, object] = {} for key, value in items: diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index a0f3ac7f867ea..6aefbec9aa626 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -65,7 +65,7 @@ def trace(self, trace_info: BaseTraceInfo): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): - trace_id = trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id + trace_id = trace_info.workflow_app_log_id or trace_info.workflow_run_id user_id = trace_info.metadata.get("user_id") if trace_info.message_id: trace_id = trace_info.message_id @@ -84,7 +84,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): ) self.add_trace(langfuse_trace_data=trace_data) workflow_span_data = LangfuseSpan( - id=(trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id), + id=(trace_info.workflow_app_log_id or trace_info.workflow_run_id), name=TraceTaskName.WORKFLOW_TRACE.value, input=trace_info.workflow_run_inputs, output=trace_info.workflow_run_outputs, @@ -93,7 +93,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): end_time=trace_info.end_time, metadata=trace_info.metadata, level=LevelEnum.DEFAULT if trace_info.error == "" else LevelEnum.ERROR, - status_message=trace_info.error if trace_info.error else "", + status_message=trace_info.error or "", ) self.add_span(langfuse_span_data=workflow_span_data) else: @@ -143,7 +143,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): else: inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} outputs = json.loads(node_execution.outputs) if node_execution.outputs else {} - created_at = node_execution.created_at if node_execution.created_at else datetime.now() + created_at = node_execution.created_at or datetime.now() elapsed_time = node_execution.elapsed_time finished_at = created_at + timedelta(seconds=elapsed_time) @@ -172,10 +172,8 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): end_time=finished_at, metadata=metadata, level=(LevelEnum.DEFAULT if status == "succeeded" else LevelEnum.ERROR), - status_message=trace_info.error if trace_info.error else "", - parent_observation_id=( - trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id - ), + status_message=trace_info.error or "", + parent_observation_id=(trace_info.workflow_app_log_id or trace_info.workflow_run_id), ) else: span_data = LangfuseSpan( @@ -188,7 +186,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): end_time=finished_at, metadata=metadata, level=(LevelEnum.DEFAULT if status == "succeeded" else LevelEnum.ERROR), - status_message=trace_info.error if trace_info.error else "", + status_message=trace_info.error or "", ) self.add_span(langfuse_span_data=span_data) @@ -212,7 +210,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): output=outputs, metadata=metadata, level=(LevelEnum.DEFAULT if status == "succeeded" else LevelEnum.ERROR), - status_message=trace_info.error if trace_info.error else "", + status_message=trace_info.error or "", usage=generation_usage, ) @@ -277,7 +275,7 @@ def message_trace(self, trace_info: MessageTraceInfo, **kwargs): output=message_data.answer, metadata=metadata, level=(LevelEnum.DEFAULT if message_data.status != "error" else LevelEnum.ERROR), - status_message=message_data.error if message_data.error else "", + status_message=message_data.error or "", usage=generation_usage, ) @@ -319,7 +317,7 @@ def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo): end_time=trace_info.end_time, metadata=trace_info.metadata, level=(LevelEnum.DEFAULT if message_data.status != "error" else LevelEnum.ERROR), - status_message=message_data.error if message_data.error else "", + status_message=message_data.error or "", usage=generation_usage, ) diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index eea7bb3535fe8..37cbea13fd873 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -82,7 +82,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): langsmith_run = LangSmithRunModel( file_list=trace_info.file_list, total_tokens=trace_info.total_tokens, - id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id, + id=trace_info.workflow_app_log_id or trace_info.workflow_run_id, name=TraceTaskName.WORKFLOW_TRACE.value, inputs=trace_info.workflow_run_inputs, run_type=LangSmithRunType.tool, @@ -94,7 +94,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): }, error=trace_info.error, tags=["workflow"], - parent_run_id=trace_info.message_id if trace_info.message_id else None, + parent_run_id=trace_info.message_id or None, ) self.add_run(langsmith_run) @@ -133,7 +133,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): else: inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} outputs = json.loads(node_execution.outputs) if node_execution.outputs else {} - created_at = node_execution.created_at if node_execution.created_at else datetime.now() + created_at = node_execution.created_at or datetime.now() elapsed_time = node_execution.elapsed_time finished_at = created_at + timedelta(seconds=elapsed_time) @@ -180,9 +180,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): extra={ "metadata": metadata, }, - parent_run_id=trace_info.workflow_app_log_id - if trace_info.workflow_app_log_id - else trace_info.workflow_run_id, + parent_run_id=trace_info.workflow_app_log_id or trace_info.workflow_run_id, tags=["node_execution"], ) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 68fcdf32da66d..6f17bade9748e 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -354,11 +354,11 @@ def workflow_trace(self, workflow_run: WorkflowRun, conversation_id, user_id): workflow_run_inputs = json.loads(workflow_run.inputs) if workflow_run.inputs else {} workflow_run_outputs = json.loads(workflow_run.outputs) if workflow_run.outputs else {} workflow_run_version = workflow_run.version - error = workflow_run.error if workflow_run.error else "" + error = workflow_run.error or "" total_tokens = workflow_run.total_tokens - file_list = workflow_run_inputs.get("sys.file") if workflow_run_inputs.get("sys.file") else [] + file_list = workflow_run_inputs.get("sys.file") or [] query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" # get workflow_app_log_id @@ -452,7 +452,7 @@ def message_trace(self, message_id): message_tokens=message_tokens, answer_tokens=message_data.answer_tokens, total_tokens=message_tokens + message_data.answer_tokens, - error=message_data.error if message_data.error else "", + error=message_data.error or "", inputs=inputs, outputs=message_data.answer, file_list=file_list, @@ -487,7 +487,7 @@ def moderation_trace(self, message_id, timer, **kwargs): workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None moderation_trace_info = ModerationTraceInfo( - message_id=workflow_app_log_id if workflow_app_log_id else message_id, + message_id=workflow_app_log_id or message_id, inputs=inputs, message_data=message_data.to_dict(), flagged=moderation_result.flagged, @@ -527,7 +527,7 @@ def suggested_question_trace(self, message_id, timer, **kwargs): workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None suggested_question_trace_info = SuggestedQuestionTraceInfo( - message_id=workflow_app_log_id if workflow_app_log_id else message_id, + message_id=workflow_app_log_id or message_id, message_data=message_data.to_dict(), inputs=message_data.message, outputs=message_data.answer, @@ -569,7 +569,7 @@ def dataset_retrieval_trace(self, message_id, timer, **kwargs): dataset_retrieval_trace_info = DatasetRetrievalTraceInfo( message_id=message_id, - inputs=message_data.query if message_data.query else message_data.inputs, + inputs=message_data.query or message_data.inputs, documents=[doc.model_dump() for doc in documents], start_time=timer.get("start"), end_time=timer.get("end"), @@ -695,8 +695,7 @@ def __init__(self, app_id=None, user_id=None): self.start_timer() def add_trace_task(self, trace_task: TraceTask): - global trace_manager_timer - global trace_manager_queue + global trace_manager_timer, trace_manager_queue try: if self.trace_instance: trace_task.app_id = self.app_id diff --git a/api/core/prompt/simple_prompt_transform.py b/api/core/prompt/simple_prompt_transform.py index 13e5c5253e4d4..74795605209f9 100644 --- a/api/core/prompt/simple_prompt_transform.py +++ b/api/core/prompt/simple_prompt_transform.py @@ -112,11 +112,11 @@ def get_prompt_str_and_rules( for v in prompt_template_config["special_variable_keys"]: # support #context#, #query# and #histories# if v == "#context#": - variables["#context#"] = context if context else "" + variables["#context#"] = context or "" elif v == "#query#": - variables["#query#"] = query if query else "" + variables["#query#"] = query or "" elif v == "#histories#": - variables["#histories#"] = histories if histories else "" + variables["#histories#"] = histories or "" prompt_template = prompt_template_config["prompt_template"] prompt = prompt_template.format(variables) diff --git a/api/core/rag/datasource/keyword/keyword_base.py b/api/core/rag/datasource/keyword/keyword_base.py index 27e4f383ad6a9..4b9ec460e61fe 100644 --- a/api/core/rag/datasource/keyword/keyword_base.py +++ b/api/core/rag/datasource/keyword/keyword_base.py @@ -34,7 +34,7 @@ def search(self, query: str, **kwargs: Any) -> list[Document]: raise NotImplementedError def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]: - for text in texts[:]: + for text in texts.copy(): doc_id = text.metadata["doc_id"] exists_duplicate_node = self.text_exists(doc_id) if exists_duplicate_node: diff --git a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py index a9c0eefb786f3..ef48d22963947 100644 --- a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py +++ b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py @@ -239,7 +239,7 @@ def delete_by_metadata_field(self, key: str, value: str) -> None: def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: from alibabacloud_gpdb20160503 import models as gpdb_20160503_models - score_threshold = kwargs.get("score_threshold", 0.0) if kwargs.get("score_threshold", 0.0) else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) request = gpdb_20160503_models.QueryCollectionDataRequest( dbinstance_id=self.config.instance_id, region_id=self.config.region_id, @@ -267,7 +267,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: from alibabacloud_gpdb20160503 import models as gpdb_20160503_models - score_threshold = kwargs.get("score_threshold", 0.0) if kwargs.get("score_threshold", 0.0) else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) request = gpdb_20160503_models.QueryCollectionDataRequest( dbinstance_id=self.config.instance_id, region_id=self.config.region_id, diff --git a/api/core/rag/datasource/vdb/chroma/chroma_vector.py b/api/core/rag/datasource/vdb/chroma/chroma_vector.py index cb38cf94a96e8..41f749279dc14 100644 --- a/api/core/rag/datasource/vdb/chroma/chroma_vector.py +++ b/api/core/rag/datasource/vdb/chroma/chroma_vector.py @@ -92,7 +92,7 @@ def text_exists(self, id: str) -> bool: def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: collection = self._client.get_or_create_collection(self._collection_name) results: QueryResult = collection.query(query_embeddings=query_vector, n_results=kwargs.get("top_k", 4)) - score_threshold = kwargs.get("score_threshold", 0.0) if kwargs.get("score_threshold", 0.0) else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) ids: list[str] = results["ids"][0] documents: list[str] = results["documents"][0] diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py index f13723b51fb1e..c52ca5f488ccf 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py @@ -86,8 +86,8 @@ def add_texts(self, documents: list[Document], embeddings: list[list[float]], ** id=uuids[i], document={ Field.CONTENT_KEY.value: documents[i].page_content, - Field.VECTOR.value: embeddings[i] if embeddings[i] else None, - Field.METADATA_KEY.value: documents[i].metadata if documents[i].metadata else {}, + Field.VECTOR.value: embeddings[i] or None, + Field.METADATA_KEY.value: documents[i].metadata or {}, }, ) self._client.indices.refresh(index=self._collection_name) @@ -131,7 +131,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc docs = [] for doc, score in docs_and_scores: - score_threshold = kwargs.get("score_threshold", 0.0) if kwargs.get("score_threshold", 0.0) else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) if score > score_threshold: doc.metadata["score"] = score docs.append(doc) diff --git a/api/core/rag/datasource/vdb/milvus/milvus_vector.py b/api/core/rag/datasource/vdb/milvus/milvus_vector.py index d6d7136282607..7d78f542567a8 100644 --- a/api/core/rag/datasource/vdb/milvus/milvus_vector.py +++ b/api/core/rag/datasource/vdb/milvus/milvus_vector.py @@ -141,7 +141,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc for result in results[0]: metadata = result["entity"].get(Field.METADATA_KEY.value) metadata["score"] = result["distance"] - score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) if result["distance"] > score_threshold: doc = Document(page_content=result["entity"].get(Field.CONTENT_KEY.value), metadata=metadata) docs.append(doc) diff --git a/api/core/rag/datasource/vdb/myscale/myscale_vector.py b/api/core/rag/datasource/vdb/myscale/myscale_vector.py index 90464ac42a86d..fbd7864eddce7 100644 --- a/api/core/rag/datasource/vdb/myscale/myscale_vector.py +++ b/api/core/rag/datasource/vdb/myscale/myscale_vector.py @@ -122,7 +122,7 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: def _search(self, dist: str, order: SortOrder, **kwargs: Any) -> list[Document]: top_k = kwargs.get("top_k", 5) - score_threshold = kwargs.get("score_threshold") or 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) where_str = ( f"WHERE dist < {1 - score_threshold}" if self._metric.upper() == "COSINE" and order == SortOrder.ASC and score_threshold > 0.0 diff --git a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py index 7c0f620956e5a..a6f70c873342f 100644 --- a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py +++ b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py @@ -170,7 +170,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc metadata = {} metadata["score"] = hit["_score"] - score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) if hit["_score"] > score_threshold: doc = Document(page_content=hit["_source"].get(Field.CONTENT_KEY.value), metadata=metadata) docs.append(doc) diff --git a/api/core/rag/datasource/vdb/oracle/oraclevector.py b/api/core/rag/datasource/vdb/oracle/oraclevector.py index d223b0decf3d2..16365823620ab 100644 --- a/api/core/rag/datasource/vdb/oracle/oraclevector.py +++ b/api/core/rag/datasource/vdb/oracle/oraclevector.py @@ -200,7 +200,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc [numpy.array(query_vector)], ) docs = [] - score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) for record in cur: metadata, text, distance = record score = 1 - distance @@ -212,7 +212,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: top_k = kwargs.get("top_k", 5) # just not implement fetch by score_threshold now, may be later - score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) if len(query) > 0: # Check which language the query is in zh_pattern = re.compile("[\u4e00-\u9fa5]+") diff --git a/api/core/rag/datasource/vdb/pgvecto_rs/pgvecto_rs.py b/api/core/rag/datasource/vdb/pgvecto_rs/pgvecto_rs.py index 24b391d63abed..765436006b3b0 100644 --- a/api/core/rag/datasource/vdb/pgvecto_rs/pgvecto_rs.py +++ b/api/core/rag/datasource/vdb/pgvecto_rs/pgvecto_rs.py @@ -198,7 +198,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc metadata = record.meta score = 1 - dis metadata["score"] = score - score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) if score > score_threshold: doc = Document(page_content=record.text, metadata=metadata) docs.append(doc) diff --git a/api/core/rag/datasource/vdb/pgvector/pgvector.py b/api/core/rag/datasource/vdb/pgvector/pgvector.py index d2d9e5238b607..c35464b464c82 100644 --- a/api/core/rag/datasource/vdb/pgvector/pgvector.py +++ b/api/core/rag/datasource/vdb/pgvector/pgvector.py @@ -144,7 +144,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc (json.dumps(query_vector),), ) docs = [] - score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) for record in cur: metadata, text, distance = record score = 1 - distance diff --git a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py index 83d561819ca7b..2bf417e99313d 100644 --- a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py +++ b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py @@ -339,7 +339,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc for result in results: metadata = result.payload.get(Field.METADATA_KEY.value) or {} # duplicate check score threshold - score_threshold = kwargs.get("score_threshold", 0.0) if kwargs.get("score_threshold", 0.0) else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) if result.score > score_threshold: metadata["score"] = result.score doc = Document( diff --git a/api/core/rag/datasource/vdb/relyt/relyt_vector.py b/api/core/rag/datasource/vdb/relyt/relyt_vector.py index 54290eaa5d212..76f214ffa4dd7 100644 --- a/api/core/rag/datasource/vdb/relyt/relyt_vector.py +++ b/api/core/rag/datasource/vdb/relyt/relyt_vector.py @@ -230,7 +230,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc # Organize results. docs = [] for document, score in results: - score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) if 1 - score > score_threshold: docs.append(document) return docs diff --git a/api/core/rag/datasource/vdb/tencent/tencent_vector.py b/api/core/rag/datasource/vdb/tencent/tencent_vector.py index dbedc1d4e9a22..b550743f3959c 100644 --- a/api/core/rag/datasource/vdb/tencent/tencent_vector.py +++ b/api/core/rag/datasource/vdb/tencent/tencent_vector.py @@ -153,7 +153,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc limit=kwargs.get("top_k", 4), timeout=self._client_config.timeout, ) - score_threshold = kwargs.get("score_threshold", 0.0) if kwargs.get("score_threshold", 0.0) else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) return self._get_search_res(res, score_threshold) def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: diff --git a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py index 7eaf189292da1..a2eff6ff043c0 100644 --- a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py +++ b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py @@ -185,7 +185,7 @@ def delete_by_metadata_field(self, key: str, value: str) -> None: def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: top_k = kwargs.get("top_k", 5) - score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) filter = kwargs.get("filter") distance = 1 - score_threshold diff --git a/api/core/rag/datasource/vdb/vector_base.py b/api/core/rag/datasource/vdb/vector_base.py index fb80cdec87b53..1a0dc7f48b803 100644 --- a/api/core/rag/datasource/vdb/vector_base.py +++ b/api/core/rag/datasource/vdb/vector_base.py @@ -49,7 +49,7 @@ def delete(self) -> None: raise NotImplementedError def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]: - for text in texts[:]: + for text in texts.copy(): doc_id = text.metadata["doc_id"] exists_duplicate_node = self.text_exists(doc_id) if exists_duplicate_node: diff --git a/api/core/rag/datasource/vdb/vector_factory.py b/api/core/rag/datasource/vdb/vector_factory.py index bb24143a41ac8..ca90233b7f344 100644 --- a/api/core/rag/datasource/vdb/vector_factory.py +++ b/api/core/rag/datasource/vdb/vector_factory.py @@ -153,7 +153,7 @@ def _get_embeddings(self) -> Embeddings: return CacheEmbedding(embedding_model) def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]: - for text in texts[:]: + for text in texts.copy(): doc_id = text.metadata["doc_id"] exists_duplicate_node = self.text_exists(doc_id) if exists_duplicate_node: diff --git a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py index ca1123c6a04e7..64e92c5b825ad 100644 --- a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py +++ b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py @@ -205,7 +205,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc docs = [] for doc, score in docs_and_scores: - score_threshold = kwargs.get("score_threshold", 0.0) if kwargs.get("score_threshold", 0.0) else 0.0 + score_threshold = kwargs.get("score_threshold", 0.0) # check score threshold if score > score_threshold: doc.metadata["score"] = score diff --git a/api/core/rag/extractor/blob/blob.py b/api/core/rag/extractor/blob/blob.py index f4c7b4b5f78b7..e46ab8b7fd0ac 100644 --- a/api/core/rag/extractor/blob/blob.py +++ b/api/core/rag/extractor/blob/blob.py @@ -12,7 +12,7 @@ from abc import ABC, abstractmethod from collections.abc import Generator, Iterable, Mapping from io import BufferedReader, BytesIO -from pathlib import PurePath +from pathlib import Path, PurePath from typing import Any, Optional, Union from pydantic import BaseModel, ConfigDict, model_validator @@ -56,8 +56,7 @@ def check_blob_is_valid(cls, values: Mapping[str, Any]) -> Mapping[str, Any]: def as_string(self) -> str: """Read data as a string.""" if self.data is None and self.path: - with open(str(self.path), encoding=self.encoding) as f: - return f.read() + return Path(str(self.path)).read_text(encoding=self.encoding) elif isinstance(self.data, bytes): return self.data.decode(self.encoding) elif isinstance(self.data, str): @@ -72,8 +71,7 @@ def as_bytes(self) -> bytes: elif isinstance(self.data, str): return self.data.encode(self.encoding) elif self.data is None and self.path: - with open(str(self.path), "rb") as f: - return f.read() + return Path(str(self.path)).read_bytes() else: raise ValueError(f"Unable to get bytes for blob {self}") diff --git a/api/core/rag/extractor/extract_processor.py b/api/core/rag/extractor/extract_processor.py index 244ef9614a48a..3181656f59287 100644 --- a/api/core/rag/extractor/extract_processor.py +++ b/api/core/rag/extractor/extract_processor.py @@ -68,8 +68,7 @@ def load_from_url(cls, url: str, return_text: bool = False) -> Union[list[Docume suffix = "." + re.search(r"\.(\w+)$", filename).group(1) file_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}" - with open(file_path, "wb") as file: - file.write(response.content) + Path(file_path).write_bytes(response.content) extract_setting = ExtractSetting(datasource_type="upload_file", document_model="text_model") if return_text: delimiter = "\n" @@ -111,7 +110,7 @@ def extract( ) elif file_extension in [".htm", ".html"]: extractor = HtmlExtractor(file_path) - elif file_extension in [".docx"]: + elif file_extension == ".docx": extractor = WordExtractor(file_path, upload_file.tenant_id, upload_file.created_by) elif file_extension == ".csv": extractor = CSVExtractor(file_path, autodetect_encoding=True) @@ -143,7 +142,7 @@ def extract( extractor = MarkdownExtractor(file_path, autodetect_encoding=True) elif file_extension in [".htm", ".html"]: extractor = HtmlExtractor(file_path) - elif file_extension in [".docx"]: + elif file_extension == ".docx": extractor = WordExtractor(file_path, upload_file.tenant_id, upload_file.created_by) elif file_extension == ".csv": extractor = CSVExtractor(file_path, autodetect_encoding=True) diff --git a/api/core/rag/extractor/helpers.py b/api/core/rag/extractor/helpers.py index 9a21d4272a68e..69ca9d5d63688 100644 --- a/api/core/rag/extractor/helpers.py +++ b/api/core/rag/extractor/helpers.py @@ -1,6 +1,7 @@ """Document loader helpers.""" import concurrent.futures +from pathlib import Path from typing import NamedTuple, Optional, cast @@ -28,8 +29,7 @@ def detect_file_encodings(file_path: str, timeout: int = 5) -> list[FileEncoding import chardet def read_and_detect(file_path: str) -> list[dict]: - with open(file_path, "rb") as f: - rawdata = f.read() + rawdata = Path(file_path).read_bytes() return cast(list[dict], chardet.detect_all(rawdata)) with concurrent.futures.ThreadPoolExecutor() as executor: diff --git a/api/core/rag/extractor/markdown_extractor.py b/api/core/rag/extractor/markdown_extractor.py index ca125ecf554d7..849852ac23819 100644 --- a/api/core/rag/extractor/markdown_extractor.py +++ b/api/core/rag/extractor/markdown_extractor.py @@ -1,6 +1,7 @@ """Abstract interface for document loader implementations.""" import re +from pathlib import Path from typing import Optional, cast from core.rag.extractor.extractor_base import BaseExtractor @@ -102,15 +103,13 @@ def parse_tups(self, filepath: str) -> list[tuple[Optional[str], str]]: """Parse file into tuples.""" content = "" try: - with open(filepath, encoding=self._encoding) as f: - content = f.read() + content = Path(filepath).read_text(encoding=self._encoding) except UnicodeDecodeError as e: if self._autodetect_encoding: detected_encodings = detect_file_encodings(filepath) for encoding in detected_encodings: try: - with open(filepath, encoding=encoding.encoding) as f: - content = f.read() + content = Path(filepath).read_text(encoding=encoding.encoding) break except UnicodeDecodeError: continue diff --git a/api/core/rag/extractor/text_extractor.py b/api/core/rag/extractor/text_extractor.py index ed0ae41f51e39..b2b51d71d73a1 100644 --- a/api/core/rag/extractor/text_extractor.py +++ b/api/core/rag/extractor/text_extractor.py @@ -1,5 +1,6 @@ """Abstract interface for document loader implementations.""" +from pathlib import Path from typing import Optional from core.rag.extractor.extractor_base import BaseExtractor @@ -25,15 +26,13 @@ def extract(self) -> list[Document]: """Load from file path.""" text = "" try: - with open(self._file_path, encoding=self._encoding) as f: - text = f.read() + text = Path(self._file_path).read_text(encoding=self._encoding) except UnicodeDecodeError as e: if self._autodetect_encoding: detected_encodings = detect_file_encodings(self._file_path) for encoding in detected_encodings: try: - with open(self._file_path, encoding=encoding.encoding) as f: - text = f.read() + text = Path(self._file_path).read_text(encoding=encoding.encoding) break except UnicodeDecodeError: continue diff --git a/api/core/rag/extractor/word_extractor.py b/api/core/rag/extractor/word_extractor.py index 947644a90b19c..7352ef378b67d 100644 --- a/api/core/rag/extractor/word_extractor.py +++ b/api/core/rag/extractor/word_extractor.py @@ -153,7 +153,7 @@ def _parse_row(self, row, image_map, total_cols): if col_index >= total_cols: break cell_content = self._parse_cell(cell, image_map).strip() - cell_colspan = cell.grid_span if cell.grid_span else 1 + cell_colspan = cell.grid_span or 1 for i in range(cell_colspan): if col_index + i < total_cols: row_cells[col_index + i] = cell_content if i == 0 else "" diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index e4ad78ed2b345..cdaca8387d227 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -256,7 +256,7 @@ def single_retrieve( # get retrieval model config dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first() if dataset: - retrieval_model_config = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model + retrieval_model_config = dataset.retrieval_model or default_retrieval_model # get top k top_k = retrieval_model_config["top_k"] @@ -410,7 +410,7 @@ def _retriever(self, flask_app: Flask, dataset_id: str, query: str, top_k: int, return [] # get retrieval model , if the model is not setting , using default - retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model + retrieval_model = dataset.retrieval_model or default_retrieval_model if dataset.indexing_technique == "economy": # use keyword table query @@ -433,9 +433,7 @@ def _retriever(self, flask_app: Flask, dataset_id: str, query: str, top_k: int, reranking_model=retrieval_model.get("reranking_model", None) if retrieval_model["reranking_enable"] else None, - reranking_mode=retrieval_model.get("reranking_mode") - if retrieval_model.get("reranking_mode") - else "reranking_model", + reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model", weights=retrieval_model.get("weights", None), ) @@ -486,7 +484,7 @@ def to_dataset_retriever_tool( } for dataset in available_datasets: - retrieval_model_config = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model + retrieval_model_config = dataset.retrieval_model or default_retrieval_model # get top k top_k = retrieval_model_config["top_k"] diff --git a/api/core/tools/provider/api_tool_provider.py b/api/core/tools/provider/api_tool_provider.py index 2e6018cffc1c4..d99314e33a320 100644 --- a/api/core/tools/provider/api_tool_provider.py +++ b/api/core/tools/provider/api_tool_provider.py @@ -106,7 +106,7 @@ def _parse_tool_bundle(self, tool_bundle: ApiToolBundle) -> ApiTool: "human": {"en_US": tool_bundle.summary or "", "zh_Hans": tool_bundle.summary or ""}, "llm": tool_bundle.summary or "", }, - "parameters": tool_bundle.parameters if tool_bundle.parameters else [], + "parameters": tool_bundle.parameters or [], } ) diff --git a/api/core/tools/provider/builtin/aws/tools/sagemaker_text_rerank.py b/api/core/tools/provider/builtin/aws/tools/sagemaker_text_rerank.py index 3c35b65e66bad..bffcd058b509b 100644 --- a/api/core/tools/provider/builtin/aws/tools/sagemaker_text_rerank.py +++ b/api/core/tools/provider/builtin/aws/tools/sagemaker_text_rerank.py @@ -1,4 +1,5 @@ import json +import operator from typing import Any, Union import boto3 @@ -71,7 +72,7 @@ def _invoke( candidate_docs[idx]["score"] = scores[idx] line = 8 - sorted_candidate_docs = sorted(candidate_docs, key=lambda x: x["score"], reverse=True) + sorted_candidate_docs = sorted(candidate_docs, key=operator.itemgetter("score"), reverse=True) line = 9 return [self.create_json_message(res) for res in sorted_candidate_docs[: self.topk]] diff --git a/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py b/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py index 9e0918afa9bd9..40e1af043b352 100644 --- a/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py +++ b/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py @@ -115,7 +115,7 @@ def get_controls(self, controls: list) -> dict: fields.append(field) fields_list.append( f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}" - f"|{field['options'] if field['options'] else ''}|" + f"|{field['options'] or ''}|" ) fields.append( diff --git a/api/core/tools/provider/builtin/hap/tools/get_worksheet_pivot_data.py b/api/core/tools/provider/builtin/hap/tools/get_worksheet_pivot_data.py index 6b831f3145b71..01c1af9b3ea9b 100644 --- a/api/core/tools/provider/builtin/hap/tools/get_worksheet_pivot_data.py +++ b/api/core/tools/provider/builtin/hap/tools/get_worksheet_pivot_data.py @@ -130,7 +130,7 @@ def generate_pivot_json(self, data: dict[str, Any]) -> dict: # ] rows = [] for row in data["data"]: - row_data = row["rows"] if row["rows"] else {} + row_data = row["rows"] or {} row_data.update(row["columns"]) row_data.update(row["values"]) rows.append(row_data) diff --git a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py index 5888f7443fc39..171895a3060fb 100644 --- a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py +++ b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py @@ -113,7 +113,7 @@ def _invoke( result_text = f"Found {result['total']} rows in worksheet \"{worksheet_name}\"." if result["total"] > 0: result_text += ( - f" The following are {result['total'] if result['total'] < limit else limit}" + f" The following are {min(limit, result['total'])}" f" pieces of data presented in a table format:\n\n{table_header}" ) for row in rows: diff --git a/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py b/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py index d7bfb53bd70eb..09725cf8a2ab0 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py +++ b/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py @@ -37,7 +37,7 @@ def get_params(self, video_id: str, language: str, **kwargs: Any) -> dict[str, s return { "engine": "youtube_transcripts", "video_id": video_id, - "lang": language if language else "en", + "lang": language or "en", **{key: value for key, value in kwargs.items() if value not in [None, ""]}, } diff --git a/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py b/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py index e76af6fe70033..067600c601337 100644 --- a/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py +++ b/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py @@ -160,7 +160,7 @@ def _retriever( hit_callback.on_query(query, dataset.id) # get retrieval model , if the model is not setting , using default - retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model + retrieval_model = dataset.retrieval_model or default_retrieval_model if dataset.indexing_technique == "economy": # use keyword table query @@ -183,9 +183,7 @@ def _retriever( reranking_model=retrieval_model.get("reranking_model", None) if retrieval_model["reranking_enable"] else None, - reranking_mode=retrieval_model.get("reranking_mode") - if retrieval_model.get("reranking_mode") - else "reranking_model", + reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model", weights=retrieval_model.get("weights", None), ) diff --git a/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py b/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py index f61458278eaf5..ad533946a17bd 100644 --- a/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py +++ b/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py @@ -55,7 +55,7 @@ def _run(self, query: str) -> str: hit_callback.on_query(query, dataset.id) # get retrieval model , if the model is not setting , using default - retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model + retrieval_model = dataset.retrieval_model or default_retrieval_model if dataset.indexing_technique == "economy": # use keyword table query documents = RetrievalService.retrieve( @@ -76,9 +76,7 @@ def _run(self, query: str) -> str: reranking_model=retrieval_model.get("reranking_model", None) if retrieval_model["reranking_enable"] else None, - reranking_mode=retrieval_model.get("reranking_mode") - if retrieval_model.get("reranking_mode") - else "reranking_model", + reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model", weights=retrieval_model.get("weights", None), ) else: diff --git a/api/core/tools/utils/web_reader_tool.py b/api/core/tools/utils/web_reader_tool.py index fc2f63a241b30..e57cae9f16ac8 100644 --- a/api/core/tools/utils/web_reader_tool.py +++ b/api/core/tools/utils/web_reader_tool.py @@ -8,6 +8,7 @@ import tempfile import unicodedata from contextlib import contextmanager +from pathlib import Path from urllib.parse import unquote import chardet @@ -98,7 +99,7 @@ def get_url(url: str, user_agent: str = None) -> str: authors=a["byline"], publish_date=a["date"], top_image="", - text=a["plain_text"] if a["plain_text"] else "", + text=a["plain_text"] or "", ) return res @@ -117,8 +118,7 @@ def extract_using_readabilipy(html): subprocess.check_call(["node", "ExtractArticle.js", "-i", html_path, "-o", article_json_path]) # Read output of call to Readability.parse() from JSON file and return as Python dictionary - with open(article_json_path, encoding="utf-8") as json_file: - input_json = json.loads(json_file.read()) + input_json = json.loads(Path(article_json_path).read_text(encoding="utf-8")) # Deleting files after processing os.unlink(article_json_path) diff --git a/api/core/tools/utils/yaml_utils.py b/api/core/tools/utils/yaml_utils.py index bcb061376dc5a..99b9f8049976e 100644 --- a/api/core/tools/utils/yaml_utils.py +++ b/api/core/tools/utils/yaml_utils.py @@ -21,7 +21,7 @@ def load_yaml_file(file_path: str, ignore_error: bool = True, default_value: Any with open(file_path, encoding="utf-8") as yaml_file: try: yaml_content = yaml.safe_load(yaml_file) - return yaml_content if yaml_content else default_value + return yaml_content or default_value except Exception as e: raise YAMLError(f"Failed to load YAML file {file_path}: {e}") except Exception as e: diff --git a/api/core/workflow/graph_engine/entities/graph.py b/api/core/workflow/graph_engine/entities/graph.py index c156dd8c98646..20efe91a59b9a 100644 --- a/api/core/workflow/graph_engine/entities/graph.py +++ b/api/core/workflow/graph_engine/entities/graph.py @@ -268,7 +268,7 @@ def _check_connected_to_previous_node(cls, route: list[str], edge_mapping: dict[ f"Node {graph_edge.source_node_id} is connected to the previous node, please check the graph." ) - new_route = route[:] + new_route = route.copy() new_route.append(graph_edge.target_node_id) cls._check_connected_to_previous_node( route=new_route, @@ -679,8 +679,7 @@ def _is_node_in_routes( all_routes_node_ids = set() parallel_start_node_ids: dict[str, list[str]] = {} for branch_node_id, node_ids in routes_node_ids.items(): - for node_id in node_ids: - all_routes_node_ids.add(node_id) + all_routes_node_ids.update(node_ids) if branch_node_id in reverse_edge_mapping: for graph_edge in reverse_edge_mapping[branch_node_id]: diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index 73164fff9ae5a..9da7ad99f386a 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -74,7 +74,7 @@ def _check_string(self, value: str, variable: str) -> str: :return: """ if not isinstance(value, str): - if isinstance(value, type(None)): + if value is None: return None else: raise ValueError(f"Output variable `{variable}` must be a string") @@ -95,7 +95,7 @@ def _check_number(self, value: Union[int, float], variable: str) -> Union[int, f :return: """ if not isinstance(value, int | float): - if isinstance(value, type(None)): + if value is None: return None else: raise ValueError(f"Output variable `{variable}` must be a number") @@ -182,7 +182,7 @@ def _transform_result( f"Output {prefix}.{output_name} is not a valid array." f" make sure all elements are of the same type." ) - elif isinstance(output_value, type(None)): + elif output_value is None: pass else: raise ValueError(f"Output {prefix}.{output_name} is not a valid type.") @@ -284,7 +284,7 @@ def _transform_result( for i, value in enumerate(result[output_name]): if not isinstance(value, dict): - if isinstance(value, type(None)): + if value is None: pass else: raise ValueError( diff --git a/api/core/workflow/nodes/if_else/if_else_node.py b/api/core/workflow/nodes/if_else/if_else_node.py index 5b4737c6e5911..37384202d868e 100644 --- a/api/core/workflow/nodes/if_else/if_else_node.py +++ b/api/core/workflow/nodes/if_else/if_else_node.py @@ -79,7 +79,7 @@ def _run(self) -> NodeRunResult: status=WorkflowNodeExecutionStatus.SUCCEEDED, inputs=node_inputs, process_data=process_datas, - edge_source_handle=selected_case_id if selected_case_id else "false", # Use case ID or 'default' + edge_source_handle=selected_case_id or "false", # Use case ID or 'default' outputs=outputs, ) diff --git a/api/core/workflow/nodes/llm/llm_node.py b/api/core/workflow/nodes/llm/llm_node.py index 049c211488283..3d336b0b0bbd5 100644 --- a/api/core/workflow/nodes/llm/llm_node.py +++ b/api/core/workflow/nodes/llm/llm_node.py @@ -580,7 +580,7 @@ def _fetch_prompt_messages( prompt_messages = prompt_transform.get_prompt( prompt_template=node_data.prompt_template, inputs=inputs, - query=query if query else "", + query=query or "", files=files, context=context, memory_config=node_data.memory, diff --git a/api/core/workflow/nodes/question_classifier/question_classifier_node.py b/api/core/workflow/nodes/question_classifier/question_classifier_node.py index d860f848ec385..2ae58bc5f7a96 100644 --- a/api/core/workflow/nodes/question_classifier/question_classifier_node.py +++ b/api/core/workflow/nodes/question_classifier/question_classifier_node.py @@ -250,7 +250,7 @@ def _get_prompt_template( for class_ in classes: category = {"category_id": class_.id, "category_name": class_.name} categories.append(category) - instruction = node_data.instruction if node_data.instruction else "" + instruction = node_data.instruction or "" input_text = query memory_str = "" if memory: diff --git a/api/events/event_handlers/update_app_dataset_join_when_app_model_config_updated.py b/api/events/event_handlers/update_app_dataset_join_when_app_model_config_updated.py index 59375b1a0b1a8..de7c0f4dfeb74 100644 --- a/api/events/event_handlers/update_app_dataset_join_when_app_model_config_updated.py +++ b/api/events/event_handlers/update_app_dataset_join_when_app_model_config_updated.py @@ -18,8 +18,7 @@ def handle(sender, **kwargs): added_dataset_ids = dataset_ids else: old_dataset_ids = set() - for app_dataset_join in app_dataset_joins: - old_dataset_ids.add(app_dataset_join.dataset_id) + old_dataset_ids.update(app_dataset_join.dataset_id for app_dataset_join in app_dataset_joins) added_dataset_ids = dataset_ids - old_dataset_ids removed_dataset_ids = old_dataset_ids - dataset_ids diff --git a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py index 333b85ecb2907..c5e98e263f74f 100644 --- a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py +++ b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py @@ -22,8 +22,7 @@ def handle(sender, **kwargs): added_dataset_ids = dataset_ids else: old_dataset_ids = set() - for app_dataset_join in app_dataset_joins: - old_dataset_ids.add(app_dataset_join.dataset_id) + old_dataset_ids.update(app_dataset_join.dataset_id for app_dataset_join in app_dataset_joins) added_dataset_ids = dataset_ids - old_dataset_ids removed_dataset_ids = old_dataset_ids - dataset_ids diff --git a/api/extensions/storage/local_storage.py b/api/extensions/storage/local_storage.py index 46ee4bf80f8e6..f833ae85dc54e 100644 --- a/api/extensions/storage/local_storage.py +++ b/api/extensions/storage/local_storage.py @@ -1,6 +1,7 @@ import os import shutil from collections.abc import Generator +from pathlib import Path from flask import Flask @@ -26,8 +27,7 @@ def save(self, filename, data): folder = os.path.dirname(filename) os.makedirs(folder, exist_ok=True) - with open(os.path.join(os.getcwd(), filename), "wb") as f: - f.write(data) + Path(os.path.join(os.getcwd(), filename)).write_bytes(data) def load_once(self, filename: str) -> bytes: if not self.folder or self.folder.endswith("/"): @@ -38,9 +38,7 @@ def load_once(self, filename: str) -> bytes: if not os.path.exists(filename): raise FileNotFoundError("File not found") - with open(filename, "rb") as f: - data = f.read() - + data = Path(filename).read_bytes() return data def load_stream(self, filename: str) -> Generator: diff --git a/api/models/dataset.py b/api/models/dataset.py index 55f6ed3180b9c..0da35910cd4b2 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -144,7 +144,7 @@ def retrieval_model_dict(self): "top_k": 2, "score_threshold_enabled": False, } - return self.retrieval_model if self.retrieval_model else default_retrieval_model + return self.retrieval_model or default_retrieval_model @property def tags(self): @@ -160,7 +160,7 @@ def tags(self): .all() ) - return tags if tags else [] + return tags or [] @staticmethod def gen_collection_name_by_id(dataset_id: str) -> str: diff --git a/api/models/model.py b/api/models/model.py index 8ab3026522faa..a8b2e00ee4400 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -118,7 +118,7 @@ def workflow(self) -> Optional["Workflow"]: @property def api_base_url(self): - return (dify_config.SERVICE_API_URL if dify_config.SERVICE_API_URL else request.host_url.rstrip("/")) + "/v1" + return (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1" @property def tenant(self): @@ -207,7 +207,7 @@ def tags(self): .all() ) - return tags if tags else [] + return tags or [] class AppModelConfig(db.Model): @@ -908,7 +908,7 @@ def files(self): "id": message_file.id, "type": message_file.type, "url": url, - "belongs_to": message_file.belongs_to if message_file.belongs_to else "user", + "belongs_to": message_file.belongs_to or "user", } ) @@ -1212,7 +1212,7 @@ def generate_code(n): @property def app_base_url(self): - return dify_config.APP_WEB_URL if dify_config.APP_WEB_URL else request.url_root.rstrip("/") + return dify_config.APP_WEB_URL or request.url_root.rstrip("/") class ApiToken(db.Model): @@ -1488,7 +1488,7 @@ class TraceAppConfig(db.Model): @property def tracing_config_dict(self): - return self.tracing_config if self.tracing_config else {} + return self.tracing_config or {} @property def tracing_config_str(self): diff --git a/api/pyproject.toml b/api/pyproject.toml index 616794cf3abe5..7d64d07678b37 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -15,6 +15,7 @@ select = [ "C4", # flake8-comprehensions "E", # pycodestyle E rules "F", # pyflakes rules + "FURB", # refurb rules "I", # isort rules "N", # pep8-naming "RUF019", # unnecessary-key-check @@ -37,6 +38,8 @@ ignore = [ "F405", # undefined-local-with-import-star-usage "F821", # undefined-name "F841", # unused-variable + "FURB113", # repeated-append + "FURB152", # math-constant "UP007", # non-pep604-annotation "UP032", # f-string "B005", # strip-with-multi-characters diff --git a/api/services/account_service.py b/api/services/account_service.py index 7fb42f9e81702..e839ae54bae72 100644 --- a/api/services/account_service.py +++ b/api/services/account_service.py @@ -544,7 +544,7 @@ def register( """Register account""" try: account = AccountService.create_account( - email=email, name=name, interface_language=language if language else languages[0], password=password + email=email, name=name, interface_language=language or languages[0], password=password ) account.status = AccountStatus.ACTIVE.value if not status else status.value account.initialized_at = datetime.now(timezone.utc).replace(tzinfo=None) diff --git a/api/services/app_dsl_service.py b/api/services/app_dsl_service.py index 73c446b83b7e7..8dcefbadc084d 100644 --- a/api/services/app_dsl_service.py +++ b/api/services/app_dsl_service.py @@ -81,13 +81,11 @@ def import_and_create_new_app(cls, tenant_id: str, data: str, args: dict, accoun raise ValueError("Missing app in data argument") # get app basic info - name = args.get("name") if args.get("name") else app_data.get("name") - description = args.get("description") if args.get("description") else app_data.get("description", "") - icon_type = args.get("icon_type") if args.get("icon_type") else app_data.get("icon_type") - icon = args.get("icon") if args.get("icon") else app_data.get("icon") - icon_background = ( - args.get("icon_background") if args.get("icon_background") else app_data.get("icon_background") - ) + name = args.get("name") or app_data.get("name") + description = args.get("description") or app_data.get("description", "") + icon_type = args.get("icon_type") or app_data.get("icon_type") + icon = args.get("icon") or app_data.get("icon") + icon_background = args.get("icon_background") or app_data.get("icon_background") use_icon_as_answer_icon = app_data.get("use_icon_as_answer_icon", False) # import dsl and create app diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index cce0874cf4f9b..a52b5b7e7d785 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -155,7 +155,7 @@ def create_empty_dataset( dataset.tenant_id = tenant_id dataset.embedding_model_provider = embedding_model.provider if embedding_model else None dataset.embedding_model = embedding_model.model if embedding_model else None - dataset.permission = permission if permission else DatasetPermissionEnum.ONLY_ME + dataset.permission = permission or DatasetPermissionEnum.ONLY_ME db.session.add(dataset) db.session.commit() return dataset @@ -681,11 +681,7 @@ def save_document_with_dataset_id( "score_threshold_enabled": False, } - dataset.retrieval_model = ( - document_data.get("retrieval_model") - if document_data.get("retrieval_model") - else default_retrieval_model - ) + dataset.retrieval_model = document_data.get("retrieval_model") or default_retrieval_model documents = [] batch = time.strftime("%Y%m%d%H%M%S") + str(random.randint(100000, 999999)) diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index 2f911f5036641..a9f963dbacfeb 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -33,7 +33,7 @@ def retrieve(cls, dataset: Dataset, query: str, account: Account, retrieval_mode # get retrieval model , if the model is not setting , using default if not retrieval_model: - retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model + retrieval_model = dataset.retrieval_model or default_retrieval_model all_documents = RetrievalService.retrieve( retrieval_method=retrieval_model.get("search_method", "semantic_search"), @@ -46,9 +46,7 @@ def retrieve(cls, dataset: Dataset, query: str, account: Account, retrieval_mode reranking_model=retrieval_model.get("reranking_model", None) if retrieval_model["reranking_enable"] else None, - reranking_mode=retrieval_model.get("reranking_mode") - if retrieval_model.get("reranking_mode") - else "reranking_model", + reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model", weights=retrieval_model.get("weights", None), ) diff --git a/api/services/model_provider_service.py b/api/services/model_provider_service.py index c0f3c40762541..384a072b371fd 100644 --- a/api/services/model_provider_service.py +++ b/api/services/model_provider_service.py @@ -1,6 +1,7 @@ import logging import mimetypes import os +from pathlib import Path from typing import Optional, cast import requests @@ -453,9 +454,8 @@ def get_model_provider_icon( mimetype = mimetype or "application/octet-stream" # read binary from file - with open(file_path, "rb") as f: - byte_data = f.read() - return byte_data, mimetype + byte_data = Path(file_path).read_bytes() + return byte_data, mimetype def switch_preferred_provider(self, tenant_id: str, provider: str, preferred_provider_type: str) -> None: """ diff --git a/api/services/recommended_app_service.py b/api/services/recommended_app_service.py index 10abf0a764c75..daec8393d092e 100644 --- a/api/services/recommended_app_service.py +++ b/api/services/recommended_app_service.py @@ -1,6 +1,7 @@ import json import logging from os import path +from pathlib import Path from typing import Optional import requests @@ -218,10 +219,9 @@ def _get_builtin_data(cls) -> dict: return cls.builtin_data root_path = current_app.root_path - with open(path.join(root_path, "constants", "recommended_apps.json"), encoding="utf-8") as f: - json_data = f.read() - data = json.loads(json_data) - cls.builtin_data = data + cls.builtin_data = json.loads( + Path(path.join(root_path, "constants", "recommended_apps.json")).read_text(encoding="utf-8") + ) return cls.builtin_data diff --git a/api/services/tag_service.py b/api/services/tag_service.py index 0c17485a9f613..5e2851cd8febc 100644 --- a/api/services/tag_service.py +++ b/api/services/tag_service.py @@ -57,7 +57,7 @@ def get_tags_by_target_id(tag_type: str, current_tenant_id: str, target_id: str) .all() ) - return tags if tags else [] + return tags or [] @staticmethod def save_tags(args: dict) -> Tag: diff --git a/api/services/tools/builtin_tools_manage_service.py b/api/services/tools/builtin_tools_manage_service.py index dc8cebb587a2d..e2e49d017ef16 100644 --- a/api/services/tools/builtin_tools_manage_service.py +++ b/api/services/tools/builtin_tools_manage_service.py @@ -1,5 +1,6 @@ import json import logging +from pathlib import Path from configs import dify_config from core.helper.position_helper import is_filtered @@ -183,8 +184,7 @@ def get_builtin_tool_provider_icon(provider: str): get tool provider icon and it's mimetype """ icon_path, mime_type = ToolManager.get_builtin_provider_icon(provider) - with open(icon_path, "rb") as f: - icon_bytes = f.read() + icon_bytes = Path(icon_path).read_bytes() return icon_bytes, mime_type diff --git a/api/services/website_service.py b/api/services/website_service.py index 6dff35d63f192..fea605cf30b41 100644 --- a/api/services/website_service.py +++ b/api/services/website_service.py @@ -50,8 +50,8 @@ def crawl_url(cls, args: dict) -> dict: excludes = options.get("excludes").split(",") if options.get("excludes") else [] params = { "crawlerOptions": { - "includes": includes if includes else [], - "excludes": excludes if excludes else [], + "includes": includes or [], + "excludes": excludes or [], "generateImgAltText": True, "limit": options.get("limit", 1), "returnOnlyUrls": False, diff --git a/api/services/workflow/workflow_converter.py b/api/services/workflow/workflow_converter.py index 4b845be2f4d01..db1a036e682bc 100644 --- a/api/services/workflow/workflow_converter.py +++ b/api/services/workflow/workflow_converter.py @@ -63,11 +63,11 @@ def convert_to_workflow( # create new app new_app = App() new_app.tenant_id = app_model.tenant_id - new_app.name = name if name else app_model.name + "(workflow)" + new_app.name = name or app_model.name + "(workflow)" new_app.mode = AppMode.ADVANCED_CHAT.value if app_model.mode == AppMode.CHAT.value else AppMode.WORKFLOW.value - new_app.icon_type = icon_type if icon_type else app_model.icon_type - new_app.icon = icon if icon else app_model.icon - new_app.icon_background = icon_background if icon_background else app_model.icon_background + new_app.icon_type = icon_type or app_model.icon_type + new_app.icon = icon or app_model.icon + new_app.icon_background = icon_background or app_model.icon_background new_app.enable_site = app_model.enable_site new_app.enable_api = app_model.enable_api new_app.api_rpm = app_model.api_rpm diff --git a/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py b/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py index 83317e59de593..b9a721c803fc5 100644 --- a/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py +++ b/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py @@ -51,7 +51,7 @@ def invoke_embeddings(server_url: str, texts: list[str]) -> dict: # } # } embeddings = [] - for idx, text in enumerate(texts): + for idx in range(len(texts)): embedding = [0.1] * 768 embeddings.append( { From 8815511ccb87ad85d7f9e03f4472dbf3be09fd76 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Thu, 12 Sep 2024 18:09:16 +0800 Subject: [PATCH 06/25] chore: apply flake8-pytest-style linter rules (#8307) --- api/pyproject.toml | 2 ++ .../model_runtime/xinference/test_llm.py | 12 ++++++------ .../utils/test_module_import_helper.py | 6 ++++-- .../vdb/opensearch/test_opensearch.py | 2 +- api/tests/unit_tests/conftest.py | 2 +- .../unit_tests/core/helper/test_ssrf_proxy.py | 8 ++++---- api/tests/unit_tests/libs/test_email.py | 14 +++++--------- 7 files changed, 23 insertions(+), 23 deletions(-) diff --git a/api/pyproject.toml b/api/pyproject.toml index 7d64d07678b37..1822f59419984 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -18,6 +18,7 @@ select = [ "FURB", # refurb rules "I", # isort rules "N", # pep8-naming + "PT", # flake8-pytest-style rules "RUF019", # unnecessary-key-check "RUF100", # unused-noqa "RUF101", # redirected-noqa @@ -50,6 +51,7 @@ ignore = [ "B905", # zip-without-explicit-strict "N806", # non-lowercase-variable-in-function "N815", # mixed-case-variable-in-class-scope + "PT011", # pytest-raises-too-broad "SIM102", # collapsible-if "SIM103", # needless-bool "SIM105", # suppressible-exception diff --git a/api/tests/integration_tests/model_runtime/xinference/test_llm.py b/api/tests/integration_tests/model_runtime/xinference/test_llm.py index 7db59fddeff78..fb5e03855d212 100644 --- a/api/tests/integration_tests/model_runtime/xinference/test_llm.py +++ b/api/tests/integration_tests/model_runtime/xinference/test_llm.py @@ -20,7 +20,7 @@ from tests.integration_tests.model_runtime.__mock.xinference import setup_xinference_mock -@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True) +@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True) def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference_mock): model = XinferenceAILargeLanguageModel() @@ -45,7 +45,7 @@ def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference ) -@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True) +@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True) def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock): model = XinferenceAILargeLanguageModel() @@ -75,7 +75,7 @@ def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock): assert response.usage.total_tokens > 0 -@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True) +@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True) def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock): model = XinferenceAILargeLanguageModel() @@ -236,7 +236,7 @@ def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock): # assert response.message.tool_calls[0].function.name == 'get_current_weather' -@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True) +@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True) def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinference_mock): model = XinferenceAILargeLanguageModel() @@ -261,7 +261,7 @@ def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinf ) -@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True) +@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True) def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock): model = XinferenceAILargeLanguageModel() @@ -286,7 +286,7 @@ def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock): assert response.usage.total_tokens > 0 -@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True) +@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True) def test_invoke_stream_generation_model(setup_openai_mock, setup_xinference_mock): model = XinferenceAILargeLanguageModel() diff --git a/api/tests/integration_tests/utils/test_module_import_helper.py b/api/tests/integration_tests/utils/test_module_import_helper.py index 7d32f5ae66f5d..50725415e4174 100644 --- a/api/tests/integration_tests/utils/test_module_import_helper.py +++ b/api/tests/integration_tests/utils/test_module_import_helper.py @@ -9,7 +9,8 @@ def test_loading_subclass_from_source(): module = load_single_subclass_from_source( module_name="ChildClass", script_path=os.path.join(current_path, "child_class.py"), parent_type=ParentClass ) - assert module and module.__name__ == "ChildClass" + assert module + assert module.__name__ == "ChildClass" def test_load_import_module_from_source(): @@ -17,7 +18,8 @@ def test_load_import_module_from_source(): module = import_module_from_source( module_name="ChildClass", py_file_path=os.path.join(current_path, "child_class.py") ) - assert module and module.__name__ == "ChildClass" + assert module + assert module.__name__ == "ChildClass" def test_lazy_loading_subclass_from_source(): diff --git a/api/tests/integration_tests/vdb/opensearch/test_opensearch.py b/api/tests/integration_tests/vdb/opensearch/test_opensearch.py index a99b81d41eba7..2666ce2e1ebe5 100644 --- a/api/tests/integration_tests/vdb/opensearch/test_opensearch.py +++ b/api/tests/integration_tests/vdb/opensearch/test_opensearch.py @@ -34,7 +34,7 @@ def setup_method(self): self.vector._client = MagicMock() @pytest.mark.parametrize( - "search_response, expected_length, expected_doc_id", + ("search_response", "expected_length", "expected_doc_id"), [ ( { diff --git a/api/tests/unit_tests/conftest.py b/api/tests/unit_tests/conftest.py index ca3082953aef5..621c995a4bd64 100644 --- a/api/tests/unit_tests/conftest.py +++ b/api/tests/unit_tests/conftest.py @@ -13,7 +13,7 @@ CACHED_APP.config.update({"TESTING": True}) -@pytest.fixture() +@pytest.fixture def app() -> Flask: return CACHED_APP diff --git a/api/tests/unit_tests/core/helper/test_ssrf_proxy.py b/api/tests/unit_tests/core/helper/test_ssrf_proxy.py index 7a0bc70c63eea..d6e6b0b79ca81 100644 --- a/api/tests/unit_tests/core/helper/test_ssrf_proxy.py +++ b/api/tests/unit_tests/core/helper/test_ssrf_proxy.py @@ -1,6 +1,8 @@ import random from unittest.mock import MagicMock, patch +import pytest + from core.helper.ssrf_proxy import SSRF_DEFAULT_MAX_RETRIES, STATUS_FORCELIST, make_request @@ -22,11 +24,9 @@ def test_retry_exceed_max_retries(mock_request): side_effects = [mock_response] * SSRF_DEFAULT_MAX_RETRIES mock_request.side_effect = side_effects - try: + with pytest.raises(Exception) as e: make_request("GET", "http://example.com", max_retries=SSRF_DEFAULT_MAX_RETRIES - 1) - raise AssertionError("Expected Exception not raised") - except Exception as e: - assert str(e) == f"Reached maximum retries ({SSRF_DEFAULT_MAX_RETRIES - 1}) for URL http://example.com" + assert str(e.value) == f"Reached maximum retries ({SSRF_DEFAULT_MAX_RETRIES - 1}) for URL http://example.com" @patch("httpx.request") diff --git a/api/tests/unit_tests/libs/test_email.py b/api/tests/unit_tests/libs/test_email.py index f8234f3f3be3c..ae0177791b598 100644 --- a/api/tests/unit_tests/libs/test_email.py +++ b/api/tests/unit_tests/libs/test_email.py @@ -1,3 +1,5 @@ +import pytest + from libs.helper import email @@ -9,17 +11,11 @@ def test_email_with_valid_email(): def test_email_with_invalid_email(): - try: + with pytest.raises(ValueError, match="invalid_email is not a valid email."): email("invalid_email") - except ValueError as e: - assert str(e) == "invalid_email is not a valid email." - try: + with pytest.raises(ValueError, match="@example.com is not a valid email."): email("@example.com") - except ValueError as e: - assert str(e) == "@example.com is not a valid email." - try: + with pytest.raises(ValueError, match="()@example.com is not a valid email."): email("()@example.com") - except ValueError as e: - assert str(e) == "()@example.com is not a valid email." From d4985fb3aaa052e878da9e6d8d00345b1cf354f8 Mon Sep 17 00:00:00 2001 From: ybalbert001 <120714773+ybalbert001@users.noreply.github.com> Date: Thu, 12 Sep 2024 19:15:20 +0800 Subject: [PATCH 07/25] Fix: Support Bedrock cross region inference [#8190](https://github.com/langgenius/dify/issues/8190) (#8317) --- .../llm/eu.anthropic.claude-3-haiku-v1.yaml | 59 ++ .../eu.anthropic.claude-3-sonnet-v1.5.yaml | 58 ++ .../llm/eu.anthropic.claude-3-sonnet-v1.yaml | 58 ++ .../model_providers/bedrock/llm/llm.py | 2 + .../llm/us.anthropic.claude-3-haiku-v1.yaml | 59 ++ .../llm/us.anthropic.claude-3-opus-v1.yaml | 59 ++ .../us.anthropic.claude-3-sonnet-v1.5.yaml | 58 ++ .../llm/us.anthropic.claude-3-sonnet-v1.yaml | 58 ++ api/poetry.lock | 824 +++++++++--------- api/pyproject.toml | 2 +- 10 files changed, 838 insertions(+), 399 deletions(-) create mode 100644 api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-haiku-v1.yaml create mode 100644 api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.5.yaml create mode 100644 api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.yaml create mode 100644 api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-haiku-v1.yaml create mode 100644 api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-opus-v1.yaml create mode 100644 api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.5.yaml create mode 100644 api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.yaml diff --git a/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-haiku-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-haiku-v1.yaml new file mode 100644 index 0000000000000..fe5f54de13531 --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-haiku-v1.yaml @@ -0,0 +1,59 @@ +model: eu.anthropic.claude-3-haiku-20240307-v1:0 +label: + en_US: Claude 3 Haiku(Cross Region Inference) +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 200000 +# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html +parameter_rules: + - name: max_tokens + use_template: max_tokens + required: true + type: int + default: 4096 + min: 1 + max: 4096 + help: + zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 + en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. + # docs: https://docs.anthropic.com/claude/docs/system-prompts + - name: temperature + use_template: temperature + required: false + type: float + default: 1 + min: 0.0 + max: 1.0 + help: + zh_Hans: 生成内容的随机性。 + en_US: The amount of randomness injected into the response. + - name: top_p + required: false + type: float + default: 0.999 + min: 0.000 + max: 1.000 + help: + zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 + en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. + - name: top_k + required: false + type: int + default: 0 + min: 0 + # tip docs from aws has error, max value is 500 + max: 500 + help: + zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 + en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. +pricing: + input: '0.00025' + output: '0.00125' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.5.yaml b/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.5.yaml new file mode 100644 index 0000000000000..9f8d029a57cd9 --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.5.yaml @@ -0,0 +1,58 @@ +model: eu.anthropic.claude-3-5-sonnet-20240620-v1:0 +label: + en_US: Claude 3.5 Sonnet(Cross Region Inference) +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 200000 +# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html +parameter_rules: + - name: max_tokens + use_template: max_tokens + required: true + type: int + default: 4096 + min: 1 + max: 4096 + help: + zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 + en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. + - name: temperature + use_template: temperature + required: false + type: float + default: 1 + min: 0.0 + max: 1.0 + help: + zh_Hans: 生成内容的随机性。 + en_US: The amount of randomness injected into the response. + - name: top_p + required: false + type: float + default: 0.999 + min: 0.000 + max: 1.000 + help: + zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 + en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. + - name: top_k + required: false + type: int + default: 0 + min: 0 + # tip docs from aws has error, max value is 500 + max: 500 + help: + zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 + en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. +pricing: + input: '0.003' + output: '0.015' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.yaml new file mode 100644 index 0000000000000..bfaf5abb8e91d --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.yaml @@ -0,0 +1,58 @@ +model: eu.anthropic.claude-3-sonnet-20240229-v1:0 +label: + en_US: Claude 3 Sonnet(Cross Region Inference) +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 200000 +# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html +parameter_rules: + - name: max_tokens + use_template: max_tokens + required: true + type: int + default: 4096 + min: 1 + max: 4096 + help: + zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 + en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. + - name: temperature + use_template: temperature + required: false + type: float + default: 1 + min: 0.0 + max: 1.0 + help: + zh_Hans: 生成内容的随机性。 + en_US: The amount of randomness injected into the response. + - name: top_p + required: false + type: float + default: 0.999 + min: 0.000 + max: 1.000 + help: + zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 + en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. + - name: top_k + required: false + type: int + default: 0 + min: 0 + # tip docs from aws has error, max value is 500 + max: 500 + help: + zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 + en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. +pricing: + input: '0.003' + output: '0.015' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py index 953ac0741f1c4..c34c20ced35a2 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py +++ b/api/core/model_runtime/model_providers/bedrock/llm/llm.py @@ -61,6 +61,8 @@ class BedrockLargeLanguageModel(LargeLanguageModel): CONVERSE_API_ENABLED_MODEL_INFO = [ {"prefix": "anthropic.claude-v2", "support_system_prompts": True, "support_tool_use": False}, {"prefix": "anthropic.claude-v1", "support_system_prompts": True, "support_tool_use": False}, + {"prefix": "us.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True}, + {"prefix": "eu.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True}, {"prefix": "anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True}, {"prefix": "meta.llama", "support_system_prompts": True, "support_tool_use": False}, {"prefix": "mistral.mistral-7b-instruct", "support_system_prompts": False, "support_tool_use": False}, diff --git a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-haiku-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-haiku-v1.yaml new file mode 100644 index 0000000000000..58c1f057796de --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-haiku-v1.yaml @@ -0,0 +1,59 @@ +model: us.anthropic.claude-3-haiku-20240307-v1:0 +label: + en_US: Claude 3 Haiku(Cross Region Inference) +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 200000 +# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html +parameter_rules: + - name: max_tokens + use_template: max_tokens + required: true + type: int + default: 4096 + min: 1 + max: 4096 + help: + zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 + en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. + # docs: https://docs.anthropic.com/claude/docs/system-prompts + - name: temperature + use_template: temperature + required: false + type: float + default: 1 + min: 0.0 + max: 1.0 + help: + zh_Hans: 生成内容的随机性。 + en_US: The amount of randomness injected into the response. + - name: top_p + required: false + type: float + default: 0.999 + min: 0.000 + max: 1.000 + help: + zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 + en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. + - name: top_k + required: false + type: int + default: 0 + min: 0 + # tip docs from aws has error, max value is 500 + max: 500 + help: + zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 + en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. +pricing: + input: '0.00025' + output: '0.00125' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-opus-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-opus-v1.yaml new file mode 100644 index 0000000000000..6b9e1ec067b98 --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-opus-v1.yaml @@ -0,0 +1,59 @@ +model: us.anthropic.claude-3-opus-20240229-v1:0 +label: + en_US: Claude 3 Opus(Cross Region Inference) +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 200000 +# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html +parameter_rules: + - name: max_tokens + use_template: max_tokens + required: true + type: int + default: 4096 + min: 1 + max: 4096 + help: + zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 + en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. + # docs: https://docs.anthropic.com/claude/docs/system-prompts + - name: temperature + use_template: temperature + required: false + type: float + default: 1 + min: 0.0 + max: 1.0 + help: + zh_Hans: 生成内容的随机性。 + en_US: The amount of randomness injected into the response. + - name: top_p + required: false + type: float + default: 0.999 + min: 0.000 + max: 1.000 + help: + zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 + en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. + - name: top_k + required: false + type: int + default: 0 + min: 0 + # tip docs from aws has error, max value is 500 + max: 500 + help: + zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 + en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. +pricing: + input: '0.015' + output: '0.075' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.5.yaml b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.5.yaml new file mode 100644 index 0000000000000..f1e0d6c5a2d1f --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.5.yaml @@ -0,0 +1,58 @@ +model: us.anthropic.claude-3-5-sonnet-20240620-v1:0 +label: + en_US: Claude 3.5 Sonnet(Cross Region Inference) +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 200000 +# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html +parameter_rules: + - name: max_tokens + use_template: max_tokens + required: true + type: int + default: 4096 + min: 1 + max: 4096 + help: + zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 + en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. + - name: temperature + use_template: temperature + required: false + type: float + default: 1 + min: 0.0 + max: 1.0 + help: + zh_Hans: 生成内容的随机性。 + en_US: The amount of randomness injected into the response. + - name: top_p + required: false + type: float + default: 0.999 + min: 0.000 + max: 1.000 + help: + zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 + en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. + - name: top_k + required: false + type: int + default: 0 + min: 0 + # tip docs from aws has error, max value is 500 + max: 500 + help: + zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 + en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. +pricing: + input: '0.003' + output: '0.015' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.yaml new file mode 100644 index 0000000000000..dce50bf4b5de3 --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.yaml @@ -0,0 +1,58 @@ +model: us.anthropic.claude-3-sonnet-20240229-v1:0 +label: + en_US: Claude 3 Sonnet(Cross Region Inference) +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 200000 +# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html +parameter_rules: + - name: max_tokens + use_template: max_tokens + required: true + type: int + default: 4096 + min: 1 + max: 4096 + help: + zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 + en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. + - name: temperature + use_template: temperature + required: false + type: float + default: 1 + min: 0.0 + max: 1.0 + help: + zh_Hans: 生成内容的随机性。 + en_US: The amount of randomness injected into the response. + - name: top_p + required: false + type: float + default: 0.999 + min: 0.000 + max: 1.000 + help: + zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 + en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. + - name: top_k + required: false + type: int + default: 0 + min: 0 + # tip docs from aws has error, max value is 500 + max: 500 + help: + zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 + en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. +pricing: + input: '0.003' + output: '0.015' + unit: '0.001' + currency: USD diff --git a/api/poetry.lock b/api/poetry.lock index 6023f98e2ad5a..36b52f68bed3c 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -569,13 +569,13 @@ typing-extensions = ">=4.6.0" [[package]] name = "azure-ai-ml" -version = "1.19.0" +version = "1.20.0" description = "Microsoft Azure Machine Learning Client Library for Python" optional = false python-versions = ">=3.7" files = [ - {file = "azure-ai-ml-1.19.0.tar.gz", hash = "sha256:94bb1afbb0497e539ae75455fc4a51b6942b5b68b3a275727ecce6ceb250eff9"}, - {file = "azure_ai_ml-1.19.0-py3-none-any.whl", hash = "sha256:f0385af06efbeae1f83113613e45343508d1288fd2f05857619e7c7d4d4f5302"}, + {file = "azure-ai-ml-1.20.0.tar.gz", hash = "sha256:6432a0da1b7250cb0db5a1c33202e0419935e19ea32d4c2b3220705f8f1d4101"}, + {file = "azure_ai_ml-1.20.0-py3-none-any.whl", hash = "sha256:c7eb3c5ccf82a6ee94403c3e5060763decd38cf03ff2620a4a6577526e605104"}, ] [package.dependencies] @@ -809,17 +809,17 @@ files = [ [[package]] name = "boto3" -version = "1.34.148" +version = "1.35.17" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.34.148-py3-none-any.whl", hash = "sha256:d63d36e5a34533ba69188d56f96da132730d5e9932c4e11c02d79319cd1afcec"}, - {file = "boto3-1.34.148.tar.gz", hash = "sha256:2058397f0a92c301e3116e9e65fbbc70ea49270c250882d65043d19b7c6e2d17"}, + {file = "boto3-1.35.17-py3-none-any.whl", hash = "sha256:67268aa6c4043e9fdeb4ab3c1e9032f44a6fa168c789af5e351f63f1f8880a2f"}, + {file = "boto3-1.35.17.tar.gz", hash = "sha256:4a32db8793569ee5f13c5bf3efb260193353cb8946bf6426e3c330b61c68e59d"}, ] [package.dependencies] -botocore = ">=1.34.148,<1.35.0" +botocore = ">=1.35.17,<1.36.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -828,13 +828,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.34.162" +version = "1.35.17" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.34.162-py3-none-any.whl", hash = "sha256:2d918b02db88d27a75b48275e6fb2506e9adaaddbec1ffa6a8a0898b34e769be"}, - {file = "botocore-1.34.162.tar.gz", hash = "sha256:adc23be4fb99ad31961236342b7cbf3c0bfc62532cd02852196032e8c0d682f3"}, + {file = "botocore-1.35.17-py3-none-any.whl", hash = "sha256:a93f773ca93139529b5d36730b382dbee63ab4c7f26129aa5c84835255ca999d"}, + {file = "botocore-1.35.17.tar.gz", hash = "sha256:0d35d03ea647b5d464c7f77bdab6fb23ae5d49752b13cf97ab84444518c7b1bd"}, ] [package.dependencies] @@ -843,7 +843,7 @@ python-dateutil = ">=2.1,<3.0.0" urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} [package.extras] -crt = ["awscrt (==0.21.2)"] +crt = ["awscrt (==0.21.5)"] [[package]] name = "bottleneck" @@ -1049,13 +1049,13 @@ beautifulsoup4 = "*" [[package]] name = "build" -version = "1.2.1" +version = "1.2.2" description = "A simple, correct Python build frontend" optional = false python-versions = ">=3.8" files = [ - {file = "build-1.2.1-py3-none-any.whl", hash = "sha256:75e10f767a433d9a86e50d83f418e83efc18ede923ee5ff7df93b6cb0306c5d4"}, - {file = "build-1.2.1.tar.gz", hash = "sha256:526263f4870c26f26c433545579475377b2b7588b6f1eac76a001e873ae3e19d"}, + {file = "build-1.2.2-py3-none-any.whl", hash = "sha256:277ccc71619d98afdd841a0e96ac9fe1593b823af481d3b0cea748e8894e0613"}, + {file = "build-1.2.2.tar.gz", hash = "sha256:119b2fb462adef986483438377a13b2f42064a2a3a4161f24a0cca698a07ac8c"}, ] [package.dependencies] @@ -2241,57 +2241,57 @@ typing_extensions = ">=4.0,<5.0" [[package]] name = "duckdb" -version = "1.0.0" +version = "1.1.0" description = "DuckDB in-process database" optional = false python-versions = ">=3.7.0" files = [ - {file = "duckdb-1.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4a8ce2d1f9e1c23b9bab3ae4ca7997e9822e21563ff8f646992663f66d050211"}, - {file = "duckdb-1.0.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:19797670f20f430196e48d25d082a264b66150c264c1e8eae8e22c64c2c5f3f5"}, - {file = "duckdb-1.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:b71c342090fe117b35d866a91ad6bffce61cd6ff3e0cff4003f93fc1506da0d8"}, - {file = "duckdb-1.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25dd69f44ad212c35ae2ea736b0e643ea2b70f204b8dff483af1491b0e2a4cec"}, - {file = "duckdb-1.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8da5f293ecb4f99daa9a9352c5fd1312a6ab02b464653a0c3a25ab7065c45d4d"}, - {file = "duckdb-1.0.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3207936da9967ddbb60644ec291eb934d5819b08169bc35d08b2dedbe7068c60"}, - {file = "duckdb-1.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1128d6c9c33e883b1f5df6b57c1eb46b7ab1baf2650912d77ee769aaa05111f9"}, - {file = "duckdb-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:02310d263474d0ac238646677feff47190ffb82544c018b2ff732a4cb462c6ef"}, - {file = "duckdb-1.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:75586791ab2702719c284157b65ecefe12d0cca9041da474391896ddd9aa71a4"}, - {file = "duckdb-1.0.0-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:83bb415fc7994e641344f3489e40430ce083b78963cb1057bf714ac3a58da3ba"}, - {file = "duckdb-1.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:bee2e0b415074e84c5a2cefd91f6b5ebeb4283e7196ba4ef65175a7cef298b57"}, - {file = "duckdb-1.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa5a4110d2a499312609544ad0be61e85a5cdad90e5b6d75ad16b300bf075b90"}, - {file = "duckdb-1.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fa389e6a382d4707b5f3d1bc2087895925ebb92b77e9fe3bfb23c9b98372fdc"}, - {file = "duckdb-1.0.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7ede6f5277dd851f1a4586b0c78dc93f6c26da45e12b23ee0e88c76519cbdbe0"}, - {file = "duckdb-1.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0b88cdbc0d5c3e3d7545a341784dc6cafd90fc035f17b2f04bf1e870c68456e5"}, - {file = "duckdb-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd1693cdd15375156f7fff4745debc14e5c54928589f67b87fb8eace9880c370"}, - {file = "duckdb-1.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:c65a7fe8a8ce21b985356ee3ec0c3d3b3b2234e288e64b4cfb03356dbe6e5583"}, - {file = "duckdb-1.0.0-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:e5a8eda554379b3a43b07bad00968acc14dd3e518c9fbe8f128b484cf95e3d16"}, - {file = "duckdb-1.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:a1b6acdd54c4a7b43bd7cb584975a1b2ff88ea1a31607a2b734b17960e7d3088"}, - {file = "duckdb-1.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a677bb1b6a8e7cab4a19874249d8144296e6e39dae38fce66a80f26d15e670df"}, - {file = "duckdb-1.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:752e9d412b0a2871bf615a2ede54be494c6dc289d076974eefbf3af28129c759"}, - {file = "duckdb-1.0.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3aadb99d098c5e32d00dc09421bc63a47134a6a0de9d7cd6abf21780b678663c"}, - {file = "duckdb-1.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83b7091d4da3e9301c4f9378833f5ffe934fb1ad2b387b439ee067b2c10c8bb0"}, - {file = "duckdb-1.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:6a8058d0148b544694cb5ea331db44f6c2a00a7b03776cc4dd1470735c3d5ff7"}, - {file = "duckdb-1.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e40cb20e5ee19d44bc66ec99969af791702a049079dc5f248c33b1c56af055f4"}, - {file = "duckdb-1.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7bce1bc0de9af9f47328e24e6e7e39da30093179b1c031897c042dd94a59c8e"}, - {file = "duckdb-1.0.0-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8355507f7a04bc0a3666958f4414a58e06141d603e91c0fa5a7c50e49867fb6d"}, - {file = "duckdb-1.0.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:39f1a46f5a45ad2886dc9b02ce5b484f437f90de66c327f86606d9ba4479d475"}, - {file = "duckdb-1.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a6d29ba477b27ae41676b62c8fae8d04ee7cbe458127a44f6049888231ca58fa"}, - {file = "duckdb-1.0.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:1bea713c1925918714328da76e79a1f7651b2b503511498ccf5e007a7e67d49e"}, - {file = "duckdb-1.0.0-cp38-cp38-macosx_12_0_universal2.whl", hash = "sha256:bfe67f3bcf181edbf6f918b8c963eb060e6aa26697d86590da4edc5707205450"}, - {file = "duckdb-1.0.0-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:dbc6093a75242f002be1d96a6ace3fdf1d002c813e67baff52112e899de9292f"}, - {file = "duckdb-1.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba1881a2b11c507cee18f8fd9ef10100be066fddaa2c20fba1f9a664245cd6d8"}, - {file = "duckdb-1.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:445d0bb35087c522705c724a75f9f1c13f1eb017305b694d2686218d653c8142"}, - {file = "duckdb-1.0.0-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:224553432e84432ffb9684f33206572477049b371ce68cc313a01e214f2fbdda"}, - {file = "duckdb-1.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d3914032e47c4e76636ad986d466b63fdea65e37be8a6dfc484ed3f462c4fde4"}, - {file = "duckdb-1.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:af9128a2eb7e1bb50cd2c2020d825fb2946fdad0a2558920cd5411d998999334"}, - {file = "duckdb-1.0.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dd2659a5dbc0df0de68f617a605bf12fe4da85ba24f67c08730984a0892087e8"}, - {file = "duckdb-1.0.0-cp39-cp39-macosx_12_0_universal2.whl", hash = "sha256:ac5a4afb0bc20725e734e0b2c17e99a274de4801aff0d4e765d276b99dad6d90"}, - {file = "duckdb-1.0.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:2c5a53bee3668d6e84c0536164589d5127b23d298e4c443d83f55e4150fafe61"}, - {file = "duckdb-1.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b980713244d7708b25ee0a73de0c65f0e5521c47a0e907f5e1b933d79d972ef6"}, - {file = "duckdb-1.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21cbd4f9fe7b7a56eff96c3f4d6778770dd370469ca2212eddbae5dd63749db5"}, - {file = "duckdb-1.0.0-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ed228167c5d49888c5ef36f6f9cbf65011c2daf9dcb53ea8aa7a041ce567b3e4"}, - {file = "duckdb-1.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:46d8395fbcea7231fd5032a250b673cc99352fef349b718a23dea2c0dd2b8dec"}, - {file = "duckdb-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:6ad1fc1a4d57e7616944166a5f9417bdbca1ea65c490797e3786e3a42e162d8a"}, - {file = "duckdb-1.0.0.tar.gz", hash = "sha256:a2a059b77bc7d5b76ae9d88e267372deff19c291048d59450c431e166233d453"}, + {file = "duckdb-1.1.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5e4cbc408e6e41146dea89b9044dae7356e353db0c96b183e5583ee02bc6ae5d"}, + {file = "duckdb-1.1.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:6370ae27ec8167ccfbefb94f58ad9fdc7bac142399960549d6d367f233189868"}, + {file = "duckdb-1.1.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:4e1c3414f7fd01f4810dc8b335deffc91933a159282d65fef11c1286bc0ded04"}, + {file = "duckdb-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6bc2a58689adf5520303c5f68b065b9f980bd31f1366c541b8c7490abaf55cd"}, + {file = "duckdb-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d02be208d2885ca085d4c852b911493b8cdac9d6eae893259da32bd72a437c25"}, + {file = "duckdb-1.1.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:655df442ceebfc6f3fd6c8766e04b60d44dddedfa90275d794f9fab2d3180879"}, + {file = "duckdb-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6e183729bb64be7798ccbfda6283ebf423c869268c25af2b56929e48f763be2f"}, + {file = "duckdb-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:61fb838da51e07ceb0222c4406b059b90e10efcc453c19a3650b73c0112138c4"}, + {file = "duckdb-1.1.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:7807e2f0d3344668e433f0dc1f54bfaddd410589611393e9a7ed56f8dec9514f"}, + {file = "duckdb-1.1.0-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:3da30b7b466f710d52caa1fdc3ef0bf4176ad7f115953cd9f8b0fbf0f723778f"}, + {file = "duckdb-1.1.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:b9b6a77ef0183f561b1fc2945fcc762a71570ffd33fea4e3a855d413ed596fe4"}, + {file = "duckdb-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16243e66a9fd0e64ee265f2634d137adc6593f54ddf3ef55cb8a29e1decf6e54"}, + {file = "duckdb-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42b910a149e00f40a1766dc74fa309d4255b912a5d2fdcc387287658048650f6"}, + {file = "duckdb-1.1.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:47849d546dc4238c0f20e95fe53b621aa5b08684e68fff91fd84a7092be91a17"}, + {file = "duckdb-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:11ec967b67159361ceade34095796a8d19368ea5c30cad988f44896b082b0816"}, + {file = "duckdb-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:510b5885ed6c267b9c0e1e7c6138fdffc2dd6f934a5a95b76da85da127213338"}, + {file = "duckdb-1.1.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:657bc7ac64d5faf069a782ae73afac51ef30ae2e5d0e09ce6a09d03db84ab35e"}, + {file = "duckdb-1.1.0-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:89f3de8cba57d19b41cd3c47dd06d979bd2a2ffead115480e37afbe72b02896d"}, + {file = "duckdb-1.1.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:f6486323ab20656d22ffa8f3c6e109dde30d0b327b7c831f22ebcfe747f97fb0"}, + {file = "duckdb-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78a4510f82431ee3f14db689fe8727a4a9062c8f2fbb3bcfe3bfad3c1a198004"}, + {file = "duckdb-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64bf2a6e23840d662bd2ac09206a9bd4fa657418884d69e5c352d4456dc70b3c"}, + {file = "duckdb-1.1.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23fc9aa0af74e3803ed90c8d98280fd5bcac8c940592bf6288e8fd60fb051d00"}, + {file = "duckdb-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1f3aea31341ce400640dd522e4399b941f66df17e39884f446638fe958d6117c"}, + {file = "duckdb-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:3db4ab31c20de4edaef152930836b38e7662cd71370748fdf2c38ba9cf854dc4"}, + {file = "duckdb-1.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3b6b4fe1edfe35f64f403a9f0ab75258cee35abd964356893ee37424174b7e4"}, + {file = "duckdb-1.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad02f50d5a2020822d1638fc1a9bcf082056f11d2e15ccfc1c1ed4d0f85a3be"}, + {file = "duckdb-1.1.0-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb66e9e7391801928ea134dcab12d2e4c97f2ce0391c603a3e480bbb15830bc8"}, + {file = "duckdb-1.1.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:069fb7bca459e31edb32a61f0eea95d7a8a766bef7b8318072563abf8e939593"}, + {file = "duckdb-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e39f9b7b62e64e10d421ff04480290a70129c38067d1a4f600e9212b10542c5a"}, + {file = "duckdb-1.1.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:55ef98bcc7ba745752607f1b926e8d9b7ce32c42c423bbad10c44820aefe23a7"}, + {file = "duckdb-1.1.0-cp38-cp38-macosx_12_0_universal2.whl", hash = "sha256:e2a08175e43b865c1e9611efd18cacd29ddd69093de442b1ebdf312071df7719"}, + {file = "duckdb-1.1.0-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:0e3644b1f034012d82b9baa12a7ea306fe71dc6623731b28c753c4a617ff9499"}, + {file = "duckdb-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:211a33c1ddb5cc609f75eb43772b0b03b45d2fa89bec107e4715267ca907806a"}, + {file = "duckdb-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e74b6f8a5145abbf7e6c1a2a61f0adbcd493c19b358f524ec9a3cebdf362abb"}, + {file = "duckdb-1.1.0-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:58f1633dd2c5af5088ae2d119418e200855d0699d84f2fae9d46d30f404bcead"}, + {file = "duckdb-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d18caea926b1e301c29b140418fca697aad728129e269b4f82c2795a184549e1"}, + {file = "duckdb-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:cd9fb1408942411ad360f8414bc3fbf0091c396ca903d947a10f2e31324d5cbd"}, + {file = "duckdb-1.1.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bd11bc899cebf5ff936d1276a2dfb7b7db08aba3bcc42924afeafc2163bddb43"}, + {file = "duckdb-1.1.0-cp39-cp39-macosx_12_0_universal2.whl", hash = "sha256:53825a63193c582a78c152ea53de8d145744ddbeea18f452625a82ebc33eb14a"}, + {file = "duckdb-1.1.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:29dc18087de47563b3859a6b98bbed96e1c96ce5db829646dc3b16a916997e7d"}, + {file = "duckdb-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecb19319883564237a7a03a104dbe7f445e73519bb67108fcab3d19b6b91fe30"}, + {file = "duckdb-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aac2fcabe2d5072c252d0b3087365f431de812d8199705089fb073e4d039d19c"}, + {file = "duckdb-1.1.0-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d89eaaa5df8a57e7d2bc1f4c46493bb1fee319a00155f2015810ad2ace6570ae"}, + {file = "duckdb-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d86a6926313913cd2cc7e08816d3e7f72ba340adf2959279b1a80058be6526d9"}, + {file = "duckdb-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:d8333f3e85fa2a0f1c222b752c2bd42ea875235ff88492f7bcbb6867d0f644eb"}, + {file = "duckdb-1.1.0.tar.gz", hash = "sha256:b4d4c12b1f98732151bd31377753e0da1a20f6423016d2d097d2e31953ec7c23"}, ] [[package]] @@ -2429,13 +2429,13 @@ test = ["pytest (>=6)"] [[package]] name = "fastapi" -version = "0.113.0" +version = "0.114.1" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" files = [ - {file = "fastapi-0.113.0-py3-none-any.whl", hash = "sha256:c8d364485b6361fb643d53920a18d58a696e189abcb901ec03b487e35774c476"}, - {file = "fastapi-0.113.0.tar.gz", hash = "sha256:b7cf9684dc154dfc93f8b718e5850577b529889096518df44defa41e73caf50f"}, + {file = "fastapi-0.114.1-py3-none-any.whl", hash = "sha256:5d4746f6e4b7dff0b4f6b6c6d5445645285f662fe75886e99af7ee2d6b58bb3e"}, + {file = "fastapi-0.114.1.tar.gz", hash = "sha256:1d7bbbeabbaae0acb0c22f0ab0b040f642d3093ca3645f8c876b6f91391861d8"}, ] [package.dependencies] @@ -2524,19 +2524,19 @@ sgmllib3k = "*" [[package]] name = "filelock" -version = "3.15.4" +version = "3.16.0" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, - {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, + {file = "filelock-3.16.0-py3-none-any.whl", hash = "sha256:f6ed4c963184f4c84dd5557ce8fece759a3724b37b80c6c4f20a2f63a4dc6609"}, + {file = "filelock-3.16.0.tar.gz", hash = "sha256:81de9eb8453c769b63369f87f11131a7ab04e367f8d97ad39dc230daa07e3bec"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] -typing = ["typing-extensions (>=4.8)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.1.1)", "pytest (>=8.3.2)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.3)"] +typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "filetype" @@ -3410,69 +3410,77 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "greenlet" -version = "3.0.3" +version = "3.1.0" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ - {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, - {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, - {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, - {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, - {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, - {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, - {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, - {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, - {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, - {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, - {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, - {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, - {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, - {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, - {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, - {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, + {file = "greenlet-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a814dc3100e8a046ff48faeaa909e80cdb358411a3d6dd5293158425c684eda8"}, + {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a771dc64fa44ebe58d65768d869fcfb9060169d203446c1d446e844b62bdfdca"}, + {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0e49a65d25d7350cca2da15aac31b6f67a43d867448babf997fe83c7505f57bc"}, + {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2cd8518eade968bc52262d8c46727cfc0826ff4d552cf0430b8d65aaf50bb91d"}, + {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76dc19e660baea5c38e949455c1181bc018893f25372d10ffe24b3ed7341fb25"}, + {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c0a5b1c22c82831f56f2f7ad9bbe4948879762fe0d59833a4a71f16e5fa0f682"}, + {file = "greenlet-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2651dfb006f391bcb240635079a68a261b227a10a08af6349cba834a2141efa1"}, + {file = "greenlet-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3e7e6ef1737a819819b1163116ad4b48d06cfdd40352d813bb14436024fcda99"}, + {file = "greenlet-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:ffb08f2a1e59d38c7b8b9ac8083c9c8b9875f0955b1e9b9b9a965607a51f8e54"}, + {file = "greenlet-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9730929375021ec90f6447bff4f7f5508faef1c02f399a1953870cdb78e0c345"}, + {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:713d450cf8e61854de9420fb7eea8ad228df4e27e7d4ed465de98c955d2b3fa6"}, + {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c3446937be153718250fe421da548f973124189f18fe4575a0510b5c928f0cc"}, + {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ddc7bcedeb47187be74208bc652d63d6b20cb24f4e596bd356092d8000da6d6"}, + {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44151d7b81b9391ed759a2f2865bbe623ef00d648fed59363be2bbbd5154656f"}, + {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cea1cca3be76c9483282dc7760ea1cc08a6ecec1f0b6ca0a94ea0d17432da19"}, + {file = "greenlet-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:619935a44f414274a2c08c9e74611965650b730eb4efe4b2270f91df5e4adf9a"}, + {file = "greenlet-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:221169d31cada333a0c7fd087b957c8f431c1dba202c3a58cf5a3583ed973e9b"}, + {file = "greenlet-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:01059afb9b178606b4b6e92c3e710ea1635597c3537e44da69f4531e111dd5e9"}, + {file = "greenlet-3.1.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:24fc216ec7c8be9becba8b64a98a78f9cd057fd2dc75ae952ca94ed8a893bf27"}, + {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d07c28b85b350564bdff9f51c1c5007dfb2f389385d1bc23288de51134ca303"}, + {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:243a223c96a4246f8a30ea470c440fe9db1f5e444941ee3c3cd79df119b8eebf"}, + {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26811df4dc81271033a7836bc20d12cd30938e6bd2e9437f56fa03da81b0f8fc"}, + {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9d86401550b09a55410f32ceb5fe7efcd998bd2dad9e82521713cb148a4a15f"}, + {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:26d9c1c4f1748ccac0bae1dbb465fb1a795a75aba8af8ca871503019f4285e2a"}, + {file = "greenlet-3.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:cd468ec62257bb4544989402b19d795d2305eccb06cde5da0eb739b63dc04665"}, + {file = "greenlet-3.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a53dfe8f82b715319e9953330fa5c8708b610d48b5c59f1316337302af5c0811"}, + {file = "greenlet-3.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:28fe80a3eb673b2d5cc3b12eea468a5e5f4603c26aa34d88bf61bba82ceb2f9b"}, + {file = "greenlet-3.1.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:76b3e3976d2a452cba7aa9e453498ac72240d43030fdc6d538a72b87eaff52fd"}, + {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655b21ffd37a96b1e78cc48bf254f5ea4b5b85efaf9e9e2a526b3c9309d660ca"}, + {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6f4c2027689093775fd58ca2388d58789009116844432d920e9147f91acbe64"}, + {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76e5064fd8e94c3f74d9fd69b02d99e3cdb8fc286ed49a1f10b256e59d0d3a0b"}, + {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a4bf607f690f7987ab3291406e012cd8591a4f77aa54f29b890f9c331e84989"}, + {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:037d9ac99540ace9424cb9ea89f0accfaff4316f149520b4ae293eebc5bded17"}, + {file = "greenlet-3.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:90b5bbf05fe3d3ef697103850c2ce3374558f6fe40fd57c9fac1bf14903f50a5"}, + {file = "greenlet-3.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:726377bd60081172685c0ff46afbc600d064f01053190e4450857483c4d44484"}, + {file = "greenlet-3.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:d46d5069e2eeda111d6f71970e341f4bd9aeeee92074e649ae263b834286ecc0"}, + {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81eeec4403a7d7684b5812a8aaa626fa23b7d0848edb3a28d2eb3220daddcbd0"}, + {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a3dae7492d16e85ea6045fd11cb8e782b63eac8c8d520c3a92c02ac4573b0a6"}, + {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b5ea3664eed571779403858d7cd0a9b0ebf50d57d2cdeafc7748e09ef8cd81a"}, + {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22f4e26400f7f48faef2d69c20dc055a1f3043d330923f9abe08ea0aecc44df"}, + {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13ff8c8e54a10472ce3b2a2da007f915175192f18e6495bad50486e87c7f6637"}, + {file = "greenlet-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9671e7282d8c6fcabc32c0fb8d7c0ea8894ae85cee89c9aadc2d7129e1a9954"}, + {file = "greenlet-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:184258372ae9e1e9bddce6f187967f2e08ecd16906557c4320e3ba88a93438c3"}, + {file = "greenlet-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:a0409bc18a9f85321399c29baf93545152d74a49d92f2f55302f122007cfda00"}, + {file = "greenlet-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9eb4a1d7399b9f3c7ac68ae6baa6be5f9195d1d08c9ddc45ad559aa6b556bce6"}, + {file = "greenlet-3.1.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:a8870983af660798dc1b529e1fd6f1cefd94e45135a32e58bd70edd694540f33"}, + {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfcfb73aed40f550a57ea904629bdaf2e562c68fa1164fa4588e752af6efdc3f"}, + {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9482c2ed414781c0af0b35d9d575226da6b728bd1a720668fa05837184965b7"}, + {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d58ec349e0c2c0bc6669bf2cd4982d2f93bf067860d23a0ea1fe677b0f0b1e09"}, + {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd65695a8df1233309b701dec2539cc4b11e97d4fcc0f4185b4a12ce54db0491"}, + {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:665b21e95bc0fce5cab03b2e1d90ba9c66c510f1bb5fdc864f3a377d0f553f6b"}, + {file = "greenlet-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3c59a06c2c28a81a026ff11fbf012081ea34fb9b7052f2ed0366e14896f0a1d"}, + {file = "greenlet-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415b9494ff6240b09af06b91a375731febe0090218e2898d2b85f9b92abcda0"}, + {file = "greenlet-3.1.0-cp38-cp38-win32.whl", hash = "sha256:1544b8dd090b494c55e60c4ff46e238be44fdc472d2589e943c241e0169bcea2"}, + {file = "greenlet-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:7f346d24d74c00b6730440f5eb8ec3fe5774ca8d1c9574e8e57c8671bb51b910"}, + {file = "greenlet-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:db1b3ccb93488328c74e97ff888604a8b95ae4f35f4f56677ca57a4fc3a4220b"}, + {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cd313629ded43bb3b98737bba2f3e2c2c8679b55ea29ed73daea6b755fe8e7"}, + {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fad7a051e07f64e297e6e8399b4d6a3bdcad3d7297409e9a06ef8cbccff4f501"}, + {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3967dcc1cd2ea61b08b0b276659242cbce5caca39e7cbc02408222fb9e6ff39"}, + {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d45b75b0f3fd8d99f62eb7908cfa6d727b7ed190737dec7fe46d993da550b81a"}, + {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2d004db911ed7b6218ec5c5bfe4cf70ae8aa2223dffbb5b3c69e342bb253cb28"}, + {file = "greenlet-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9505a0c8579899057cbefd4ec34d865ab99852baf1ff33a9481eb3924e2da0b"}, + {file = "greenlet-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fd6e94593f6f9714dbad1aaba734b5ec04593374fa6638df61592055868f8b8"}, + {file = "greenlet-3.1.0-cp39-cp39-win32.whl", hash = "sha256:d0dd943282231480aad5f50f89bdf26690c995e8ff555f26d8a5b9887b559bcc"}, + {file = "greenlet-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:ac0adfdb3a21dc2a24ed728b61e72440d297d0fd3a577389df566651fcd08f97"}, + {file = "greenlet-3.1.0.tar.gz", hash = "sha256:b395121e9bbe8d02a750886f108d540abe66075e61e22f7353d9acb0b81be0f0"}, ] [package.extras] @@ -4012,13 +4020,13 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs [[package]] name = "importlib-resources" -version = "6.4.4" +version = "6.4.5" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_resources-6.4.4-py3-none-any.whl", hash = "sha256:dda242603d1c9cd836c3368b1174ed74cb4049ecd209e7a1a0104620c18c5c11"}, - {file = "importlib_resources-6.4.4.tar.gz", hash = "sha256:20600c8b7361938dc0bb2d5ec0297802e575df486f5a544fa414da65e13721f7"}, + {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"}, + {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"}, ] [package.extras] @@ -4313,13 +4321,13 @@ files = [ [[package]] name = "kombu" -version = "5.4.0" +version = "5.4.1" description = "Messaging library for Python." optional = false python-versions = ">=3.8" files = [ - {file = "kombu-5.4.0-py3-none-any.whl", hash = "sha256:c8dd99820467610b4febbc7a9e8a0d3d7da2d35116b67184418b51cc520ea6b6"}, - {file = "kombu-5.4.0.tar.gz", hash = "sha256:ad200a8dbdaaa2bbc5f26d2ee7d707d9a1fded353a0f4bd751ce8c7d9f449c60"}, + {file = "kombu-5.4.1-py3-none-any.whl", hash = "sha256:621d365f234e4c089596f3a2510f1ade07026efc28caca426161d8f458786cab"}, + {file = "kombu-5.4.1.tar.gz", hash = "sha256:1c05178826dab811f8cab5b0a154d42a7a33d8bcdde9fa3d7b4582e43c3c03db"}, ] [package.dependencies] @@ -4333,7 +4341,7 @@ confluentkafka = ["confluent-kafka (>=2.2.0)"] consul = ["python-consul2 (==0.1.5)"] librabbitmq = ["librabbitmq (>=2.0.0)"] mongodb = ["pymongo (>=4.1.1)"] -msgpack = ["msgpack (==1.0.8)"] +msgpack = ["msgpack (==1.1.0)"] pyro = ["pyro4 (==4.82)"] qpid = ["qpid-python (>=0.26)", "qpid-tools (>=0.26)"] redis = ["redis (>=4.5.2,!=4.5.5,!=5.0.2)"] @@ -4385,13 +4393,13 @@ six = "*" [[package]] name = "langfuse" -version = "2.46.3" +version = "2.48.0" description = "A client library for accessing langfuse" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langfuse-2.46.3-py3-none-any.whl", hash = "sha256:59dcca4b13ea5f5c7f5a9344266116c3b8b998ae63274e4e9d0dabb51a47d361"}, - {file = "langfuse-2.46.3.tar.gz", hash = "sha256:a68c2dba630f53ccd473205164082ac1b29a1cbdb73500004daee72b5b522624"}, + {file = "langfuse-2.48.0-py3-none-any.whl", hash = "sha256:475b047e461f8a45e3c7d81b6a87e0b9e389c489d465b838aa69cbdd16eeacce"}, + {file = "langfuse-2.48.0.tar.gz", hash = "sha256:46e7e6e6e97fe03115a9f95d7f29b3fcd1848a9d1bb34608ebb42a3931919e45"}, ] [package.dependencies] @@ -4410,13 +4418,13 @@ openai = ["openai (>=0.27.8)"] [[package]] name = "langsmith" -version = "0.1.115" +version = "0.1.118" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.115-py3-none-any.whl", hash = "sha256:04e35cfd4c2d4ff1ea10bb577ff43957b05ebb3d9eb4e06e200701f4a2b4ac9f"}, - {file = "langsmith-0.1.115.tar.gz", hash = "sha256:3b775377d858d32354f3ee0dd1ed637068cfe9a1f13e7b3bfa82db1615cdffc9"}, + {file = "langsmith-0.1.118-py3-none-any.whl", hash = "sha256:f017127b3efb037da5e46ff4f8583e8192e7955191737240c327f3eadc144d7c"}, + {file = "langsmith-0.1.118.tar.gz", hash = "sha256:ff1ca06c92c6081250244ebbce5d0bb347b9d898d2e9b60a13b11f0f0720f09f"}, ] [package.dependencies] @@ -4886,15 +4894,15 @@ files = [ [[package]] name = "milvus-lite" -version = "2.4.9" +version = "2.4.10" description = "A lightweight version of Milvus wrapped with Python." optional = false python-versions = ">=3.7" files = [ - {file = "milvus_lite-2.4.9-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d3e617b3d68c09ad656d54bc3d8cc4ef6ef56c54015e1563d4fe4bcec6b7c90a"}, - {file = "milvus_lite-2.4.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6e7029282d6829b277ebb92f64e2370be72b938e34770e1eb649346bda5d1d7f"}, - {file = "milvus_lite-2.4.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9b8e991e4e433596f6a399a165c1a506f823ec9133332e03d7f8a114bff4550d"}, - {file = "milvus_lite-2.4.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:7f53e674602101cfbcf0a4a59d19eaa139dfd5580639f3040ad73d901f24fc0b"}, + {file = "milvus_lite-2.4.10-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:fc4246d3ed7d1910847afce0c9ba18212e93a6e9b8406048436940578dfad5cb"}, + {file = "milvus_lite-2.4.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:74a8e07c5e3b057df17fbb46913388e84df1dc403a200f4e423799a58184c800"}, + {file = "milvus_lite-2.4.10-py3-none-manylinux2014_aarch64.whl", hash = "sha256:240c7386b747bad696ecb5bd1f58d491e86b9d4b92dccee3315ed7256256eddc"}, + {file = "milvus_lite-2.4.10-py3-none-manylinux2014_x86_64.whl", hash = "sha256:211d2e334a043f9282bdd9755f76b9b2d93b23bffa7af240919ffce6a8dfe325"}, ] [package.dependencies] @@ -5038,22 +5046,22 @@ tests = ["pytest (>=4.6)"] [[package]] name = "msal" -version = "1.30.0" +version = "1.31.0" description = "The Microsoft Authentication Library (MSAL) for Python library enables your app to access the Microsoft Cloud by supporting authentication of users with Microsoft Azure Active Directory accounts (AAD) and Microsoft Accounts (MSA) using industry standard OAuth2 and OpenID Connect." optional = false python-versions = ">=3.7" files = [ - {file = "msal-1.30.0-py3-none-any.whl", hash = "sha256:423872177410cb61683566dc3932db7a76f661a5d2f6f52f02a047f101e1c1de"}, - {file = "msal-1.30.0.tar.gz", hash = "sha256:b4bf00850092e465157d814efa24a18f788284c9a479491024d62903085ea2fb"}, + {file = "msal-1.31.0-py3-none-any.whl", hash = "sha256:96bc37cff82ebe4b160d5fc0f1196f6ca8b50e274ecd0ec5bf69c438514086e7"}, + {file = "msal-1.31.0.tar.gz", hash = "sha256:2c4f189cf9cc8f00c80045f66d39b7c0f3ed45873fd3d1f2af9f22db2e12ff4b"}, ] [package.dependencies] -cryptography = ">=2.5,<45" +cryptography = ">=2.5,<46" PyJWT = {version = ">=1.0.0,<3", extras = ["crypto"]} requests = ">=2.0.0,<3" [package.extras] -broker = ["pymsalruntime (>=0.13.2,<0.17)"] +broker = ["pymsalruntime (>=0.14,<0.18)", "pymsalruntime (>=0.17,<0.18)"] [[package]] name = "msal-extensions" @@ -5110,102 +5118,107 @@ async = ["aiodns", "aiohttp (>=3.0)"] [[package]] name = "multidict" -version = "6.0.5" +version = "6.1.0" description = "multidict implementation" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"}, - {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"}, - {file = "multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc"}, - {file = "multidict-6.0.5-cp310-cp310-win32.whl", hash = "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319"}, - {file = "multidict-6.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8"}, - {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba"}, - {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e"}, - {file = "multidict-6.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e"}, - {file = "multidict-6.0.5-cp311-cp311-win32.whl", hash = "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c"}, - {file = "multidict-6.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda"}, - {file = "multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5"}, - {file = "multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556"}, - {file = "multidict-6.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc"}, - {file = "multidict-6.0.5-cp37-cp37m-win32.whl", hash = "sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee"}, - {file = "multidict-6.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423"}, - {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54"}, - {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d"}, - {file = "multidict-6.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44"}, - {file = "multidict-6.0.5-cp38-cp38-win32.whl", hash = "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241"}, - {file = "multidict-6.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c"}, - {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929"}, - {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9"}, - {file = "multidict-6.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c"}, - {file = "multidict-6.0.5-cp39-cp39-win32.whl", hash = "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b"}, - {file = "multidict-6.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755"}, - {file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"}, - {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"}, -] + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} [[package]] name = "multiprocess" @@ -6219,19 +6232,19 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.2-py3-none-any.whl", hash = "sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617"}, + {file = "platformdirs-4.3.2.tar.gz", hash = "sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] [[package]] name = "plotly" @@ -6295,13 +6308,13 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p [[package]] name = "posthog" -version = "3.6.3" +version = "3.6.5" description = "Integrate PostHog into any python application." optional = false python-versions = "*" files = [ - {file = "posthog-3.6.3-py2.py3-none-any.whl", hash = "sha256:cdd6c5d8919fd6158bbc4103bccc7129c712d8104dc33828be02bada7b6320a4"}, - {file = "posthog-3.6.3.tar.gz", hash = "sha256:6e1104a20638eab2b5d9cde6b6202a2900d67436237b3ac3521614ec17686701"}, + {file = "posthog-3.6.5-py2.py3-none-any.whl", hash = "sha256:f8b7c573826b061a1d22c9495169c38ebe83a1df2729f49c7129a9c23a02acf6"}, + {file = "posthog-3.6.5.tar.gz", hash = "sha256:7fd3ca809e15476c35f75d18cd6bba31395daf0a17b75242965c469fb6292510"}, ] [package.dependencies] @@ -6586,24 +6599,24 @@ test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] [[package]] name = "pyasn1" -version = "0.6.0" +version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, - {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] [[package]] name = "pyasn1-modules" -version = "0.4.0" +version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, - {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, ] [package.dependencies] @@ -7012,24 +7025,24 @@ files = [ [[package]] name = "pyreadline3" -version = "3.4.1" +version = "3.4.3" description = "A python implementation of GNU readline." optional = false python-versions = "*" files = [ - {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, - {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, + {file = "pyreadline3-3.4.3-py3-none-any.whl", hash = "sha256:f832c5898f4f9a0f81d48a8c499b39d0179de1a465ea3def1a7e7231840b4ed6"}, + {file = "pyreadline3-3.4.3.tar.gz", hash = "sha256:ebab0baca37f50e2faa1dd99a6da1c75de60e0d68a3b229c134bbd12786250e2"}, ] [[package]] name = "pytest" -version = "8.3.2" +version = "8.3.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, ] [package.dependencies] @@ -7065,21 +7078,21 @@ histogram = ["pygal", "pygaljs"] [[package]] name = "pytest-env" -version = "1.1.3" +version = "1.1.4" description = "pytest plugin that allows you to add environment variables." optional = false python-versions = ">=3.8" files = [ - {file = "pytest_env-1.1.3-py3-none-any.whl", hash = "sha256:aada77e6d09fcfb04540a6e462c58533c37df35fa853da78707b17ec04d17dfc"}, - {file = "pytest_env-1.1.3.tar.gz", hash = "sha256:fcd7dc23bb71efd3d35632bde1bbe5ee8c8dc4489d6617fb010674880d96216b"}, + {file = "pytest_env-1.1.4-py3-none-any.whl", hash = "sha256:a4212056d4d440febef311a98fdca56c31256d58fb453d103cba4e8a532b721d"}, + {file = "pytest_env-1.1.4.tar.gz", hash = "sha256:86653658da8f11c6844975db955746c458a9c09f1e64957603161e2ff93f5133"}, ] [package.dependencies] -pytest = ">=7.4.3" +pytest = ">=8.3.2" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} [package.extras] -test = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "pytest-mock (>=3.12)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "pytest-mock (>=3.14)"] [[package]] name = "pytest-mock" @@ -7293,13 +7306,13 @@ XlsxWriter = ">=0.5.7" [[package]] name = "pytz" -version = "2024.1" +version = "2024.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, - {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, ] [[package]] @@ -7642,90 +7655,105 @@ rpds-py = ">=0.7.0" [[package]] name = "regex" -version = "2024.7.24" +version = "2024.9.11" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b0d3f567fafa0633aee87f08b9276c7062da9616931382993c03808bb68ce"}, - {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3426de3b91d1bc73249042742f45c2148803c111d1175b283270177fdf669024"}, - {file = "regex-2024.7.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f273674b445bcb6e4409bf8d1be67bc4b58e8b46fd0d560055d515b8830063cd"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23acc72f0f4e1a9e6e9843d6328177ae3074b4182167e34119ec7233dfeccf53"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65fd3d2e228cae024c411c5ccdffae4c315271eee4a8b839291f84f796b34eca"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c414cbda77dbf13c3bc88b073a1a9f375c7b0cb5e115e15d4b73ec3a2fbc6f59"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7a89eef64b5455835f5ed30254ec19bf41f7541cd94f266ab7cbd463f00c41"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19c65b00d42804e3fbea9708f0937d157e53429a39b7c61253ff15670ff62cb5"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7a5486ca56c8869070a966321d5ab416ff0f83f30e0e2da1ab48815c8d165d46"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f51f9556785e5a203713f5efd9c085b4a45aecd2a42573e2b5041881b588d1f"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a4997716674d36a82eab3e86f8fa77080a5d8d96a389a61ea1d0e3a94a582cf7"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c0abb5e4e8ce71a61d9446040c1e86d4e6d23f9097275c5bd49ed978755ff0fe"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:18300a1d78cf1290fa583cd8b7cde26ecb73e9f5916690cf9d42de569c89b1ce"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:416c0e4f56308f34cdb18c3f59849479dde5b19febdcd6e6fa4d04b6c31c9faa"}, - {file = "regex-2024.7.24-cp310-cp310-win32.whl", hash = "sha256:fb168b5924bef397b5ba13aabd8cf5df7d3d93f10218d7b925e360d436863f66"}, - {file = "regex-2024.7.24-cp310-cp310-win_amd64.whl", hash = "sha256:6b9fc7e9cc983e75e2518496ba1afc524227c163e43d706688a6bb9eca41617e"}, - {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:382281306e3adaaa7b8b9ebbb3ffb43358a7bbf585fa93821300a418bb975281"}, - {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fdd1384619f406ad9037fe6b6eaa3de2749e2e12084abc80169e8e075377d3b"}, - {file = "regex-2024.7.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3d974d24edb231446f708c455fd08f94c41c1ff4f04bcf06e5f36df5ef50b95a"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2ec4419a3fe6cf8a4795752596dfe0adb4aea40d3683a132bae9c30b81e8d73"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb563dd3aea54c797adf513eeec819c4213d7dbfc311874eb4fd28d10f2ff0f2"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45104baae8b9f67569f0f1dca5e1f1ed77a54ae1cd8b0b07aba89272710db61e"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:994448ee01864501912abf2bad9203bffc34158e80fe8bfb5b031f4f8e16da51"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fac296f99283ac232d8125be932c5cd7644084a30748fda013028c815ba3364"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e37e809b9303ec3a179085415cb5f418ecf65ec98cdfe34f6a078b46ef823ee"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:01b689e887f612610c869421241e075c02f2e3d1ae93a037cb14f88ab6a8934c"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f6442f0f0ff81775eaa5b05af8a0ffa1dda36e9cf6ec1e0d3d245e8564b684ce"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:871e3ab2838fbcb4e0865a6e01233975df3a15e6fce93b6f99d75cacbd9862d1"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c918b7a1e26b4ab40409820ddccc5d49871a82329640f5005f73572d5eaa9b5e"}, - {file = "regex-2024.7.24-cp311-cp311-win32.whl", hash = "sha256:2dfbb8baf8ba2c2b9aa2807f44ed272f0913eeeba002478c4577b8d29cde215c"}, - {file = "regex-2024.7.24-cp311-cp311-win_amd64.whl", hash = "sha256:538d30cd96ed7d1416d3956f94d54e426a8daf7c14527f6e0d6d425fcb4cca52"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38"}, - {file = "regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc"}, - {file = "regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908"}, - {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:66b4c0731a5c81921e938dcf1a88e978264e26e6ac4ec96a4d21ae0354581ae0"}, - {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88ecc3afd7e776967fa16c80f974cb79399ee8dc6c96423321d6f7d4b881c92b"}, - {file = "regex-2024.7.24-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64bd50cf16bcc54b274e20235bf8edbb64184a30e1e53873ff8d444e7ac656b2"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb462f0e346fcf41a901a126b50f8781e9a474d3927930f3490f38a6e73b6950"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a82465ebbc9b1c5c50738536fdfa7cab639a261a99b469c9d4c7dcbb2b3f1e57"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68a8f8c046c6466ac61a36b65bb2395c74451df2ffb8458492ef49900efed293"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac8e84fff5d27420f3c1e879ce9929108e873667ec87e0c8eeb413a5311adfe"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba2537ef2163db9e6ccdbeb6f6424282ae4dea43177402152c67ef869cf3978b"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:43affe33137fcd679bdae93fb25924979517e011f9dea99163f80b82eadc7e53"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c9bb87fdf2ab2370f21e4d5636e5317775e5d51ff32ebff2cf389f71b9b13750"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:945352286a541406f99b2655c973852da7911b3f4264e010218bbc1cc73168f2"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:8bc593dcce679206b60a538c302d03c29b18e3d862609317cb560e18b66d10cf"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3f3b6ca8eae6d6c75a6cff525c8530c60e909a71a15e1b731723233331de4169"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c51edc3541e11fbe83f0c4d9412ef6c79f664a3745fab261457e84465ec9d5a8"}, - {file = "regex-2024.7.24-cp38-cp38-win32.whl", hash = "sha256:d0a07763776188b4db4c9c7fb1b8c494049f84659bb387b71c73bbc07f189e96"}, - {file = "regex-2024.7.24-cp38-cp38-win_amd64.whl", hash = "sha256:8fd5afd101dcf86a270d254364e0e8dddedebe6bd1ab9d5f732f274fa00499a5"}, - {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0ffe3f9d430cd37d8fa5632ff6fb36d5b24818c5c986893063b4e5bdb84cdf24"}, - {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25419b70ba00a16abc90ee5fce061228206173231f004437730b67ac77323f0d"}, - {file = "regex-2024.7.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33e2614a7ce627f0cdf2ad104797d1f68342d967de3695678c0cb84f530709f8"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33a0021893ede5969876052796165bab6006559ab845fd7b515a30abdd990dc"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04ce29e2c5fedf296b1a1b0acc1724ba93a36fb14031f3abfb7abda2806c1535"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16582783f44fbca6fcf46f61347340c787d7530d88b4d590a397a47583f31dd"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:836d3cc225b3e8a943d0b02633fb2f28a66e281290302a79df0e1eaa984ff7c1"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:438d9f0f4bc64e8dea78274caa5af971ceff0f8771e1a2333620969936ba10be"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:973335b1624859cb0e52f96062a28aa18f3a5fc77a96e4a3d6d76e29811a0e6e"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c5e69fd3eb0b409432b537fe3c6f44ac089c458ab6b78dcec14478422879ec5f"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fbf8c2f00904eaf63ff37718eb13acf8e178cb940520e47b2f05027f5bb34ce3"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2757ace61bc4061b69af19e4689fa4416e1a04840f33b441034202b5cd02d4"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:44fc61b99035fd9b3b9453f1713234e5a7c92a04f3577252b45feefe1b327759"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:84c312cdf839e8b579f504afcd7b65f35d60b6285d892b19adea16355e8343c9"}, - {file = "regex-2024.7.24-cp39-cp39-win32.whl", hash = "sha256:ca5b2028c2f7af4e13fb9fc29b28d0ce767c38c7facdf64f6c2cd040413055f1"}, - {file = "regex-2024.7.24-cp39-cp39-win_amd64.whl", hash = "sha256:7c479f5ae937ec9985ecaf42e2e10631551d909f203e31308c12d703922742f9"}, - {file = "regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, + {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, + {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, + {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, + {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, + {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, + {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, + {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, + {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"}, + {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"}, + {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"}, + {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"}, + {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"}, + {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, ] [[package]] @@ -7831,13 +7859,13 @@ requests = "2.31.0" [[package]] name = "rich" -version = "13.8.0" +version = "13.8.1" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.7.0" files = [ - {file = "rich-13.8.0-py3-none-any.whl", hash = "sha256:2e85306a063b9492dffc86278197a60cbece75bcb766022f3436f567cae11bdc"}, - {file = "rich-13.8.0.tar.gz", hash = "sha256:a5ac1f1cd448ade0d59cc3356f7db7a7ccda2c8cbae9c7a90c28ff463d3e91f4"}, + {file = "rich-13.8.1-py3-none-any.whl", hash = "sha256:1760a3c0848469b97b558fc61c85233e3dafb69c7a071b4d60c38099d3cd4c06"}, + {file = "rich-13.8.1.tar.gz", hash = "sha256:8260cda28e3db6bf04d2d1ef4dbc03ba80a824c88b0e7668a0f23126a424844a"}, ] [package.dependencies] @@ -8194,13 +8222,13 @@ test = ["accelerate (>=0.24.1,<=0.27.0)", "apache-airflow (==2.9.3)", "apache-ai [[package]] name = "sagemaker-core" -version = "1.0.2" +version = "1.0.4" description = "An python package for sagemaker core functionalities" optional = false python-versions = ">=3.8" files = [ - {file = "sagemaker_core-1.0.2-py3-none-any.whl", hash = "sha256:ce8d38a4a32efa83e4bc037a8befc7e29f87cd3eaf99acc4472b607f75a0f45a"}, - {file = "sagemaker_core-1.0.2.tar.gz", hash = "sha256:8fb942aac5e7ed928dab512ffe6facf8c6bdd4595df63c59c0bd0795ea434f8d"}, + {file = "sagemaker_core-1.0.4-py3-none-any.whl", hash = "sha256:bf71d988dbda03a3cd1557524f2fab4f19d89e54bd38fc7f05bbbcf580715f95"}, + {file = "sagemaker_core-1.0.4.tar.gz", hash = "sha256:203f4eb9d0d2a0e6ba80d79ba8c28b8ea27c94d04f6d9ff01c2fd55b95615c78"}, ] [package.dependencies] @@ -8229,32 +8257,32 @@ files = [ [[package]] name = "scikit-learn" -version = "1.5.1" +version = "1.5.2" description = "A set of python modules for machine learning and data mining" optional = false python-versions = ">=3.9" files = [ - {file = "scikit_learn-1.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:781586c414f8cc58e71da4f3d7af311e0505a683e112f2f62919e3019abd3745"}, - {file = "scikit_learn-1.5.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:f5b213bc29cc30a89a3130393b0e39c847a15d769d6e59539cd86b75d276b1a7"}, - {file = "scikit_learn-1.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ff4ba34c2abff5ec59c803ed1d97d61b036f659a17f55be102679e88f926fac"}, - {file = "scikit_learn-1.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:161808750c267b77b4a9603cf9c93579c7a74ba8486b1336034c2f1579546d21"}, - {file = "scikit_learn-1.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:10e49170691514a94bb2e03787aa921b82dbc507a4ea1f20fd95557862c98dc1"}, - {file = "scikit_learn-1.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:154297ee43c0b83af12464adeab378dee2d0a700ccd03979e2b821e7dd7cc1c2"}, - {file = "scikit_learn-1.5.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:b5e865e9bd59396220de49cb4a57b17016256637c61b4c5cc81aaf16bc123bbe"}, - {file = "scikit_learn-1.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:909144d50f367a513cee6090873ae582dba019cb3fca063b38054fa42704c3a4"}, - {file = "scikit_learn-1.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689b6f74b2c880276e365fe84fe4f1befd6a774f016339c65655eaff12e10cbf"}, - {file = "scikit_learn-1.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:9a07f90846313a7639af6a019d849ff72baadfa4c74c778821ae0fad07b7275b"}, - {file = "scikit_learn-1.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5944ce1faada31c55fb2ba20a5346b88e36811aab504ccafb9f0339e9f780395"}, - {file = "scikit_learn-1.5.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0828673c5b520e879f2af6a9e99eee0eefea69a2188be1ca68a6121b809055c1"}, - {file = "scikit_learn-1.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508907e5f81390e16d754e8815f7497e52139162fd69c4fdbd2dfa5d6cc88915"}, - {file = "scikit_learn-1.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97625f217c5c0c5d0505fa2af28ae424bd37949bb2f16ace3ff5f2f81fb4498b"}, - {file = "scikit_learn-1.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:da3f404e9e284d2b0a157e1b56b6566a34eb2798205cba35a211df3296ab7a74"}, - {file = "scikit_learn-1.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88e0672c7ac21eb149d409c74cc29f1d611d5158175846e7a9c2427bd12b3956"}, - {file = "scikit_learn-1.5.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:7b073a27797a283187a4ef4ee149959defc350b46cbf63a84d8514fe16b69855"}, - {file = "scikit_learn-1.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b59e3e62d2be870e5c74af4e793293753565c7383ae82943b83383fdcf5cc5c1"}, - {file = "scikit_learn-1.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bd8d3a19d4bd6dc5a7d4f358c8c3a60934dc058f363c34c0ac1e9e12a31421d"}, - {file = "scikit_learn-1.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:5f57428de0c900a98389c4a433d4a3cf89de979b3aa24d1c1d251802aa15e44d"}, - {file = "scikit_learn-1.5.1.tar.gz", hash = "sha256:0ea5d40c0e3951df445721927448755d3fe1d80833b0b7308ebff5d2a45e6414"}, + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:299406827fb9a4f862626d0fe6c122f5f87f8910b86fe5daa4c32dcd742139b6"}, + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2d4cad1119c77930b235579ad0dc25e65c917e756fe80cab96aa3b9428bd3fb0"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c412ccc2ad9bf3755915e3908e677b367ebc8d010acbb3f182814524f2e5540"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a686885a4b3818d9e62904d91b57fa757fc2bed3e465c8b177be652f4dd37c8"}, + {file = "scikit_learn-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:c15b1ca23d7c5f33cc2cb0a0d6aaacf893792271cddff0edbd6a40e8319bc113"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03b6158efa3faaf1feea3faa884c840ebd61b6484167c711548fce208ea09445"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1ff45e26928d3b4eb767a8f14a9a6efbf1cbff7c05d1fb0f95f211a89fd4f5de"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f763897fe92d0e903aa4847b0aec0e68cadfff77e8a0687cabd946c89d17e675"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8b0ccd4a902836493e026c03256e8b206656f91fbcc4fde28c57a5b752561f1"}, + {file = "scikit_learn-1.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:6c16d84a0d45e4894832b3c4d0bf73050939e21b99b01b6fd59cbb0cf39163b6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca64b3089a6d9b9363cd3546f8978229dcbb737aceb2c12144ee3f70f95684b7"}, + {file = "scikit_learn-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:3bed4909ba187aca80580fe2ef370d9180dcf18e621a27c4cf2ef10d279a7efe"}, + {file = "scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d"}, ] [package.dependencies] @@ -8266,11 +8294,11 @@ threadpoolctl = ">=3.1.0" [package.extras] benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.23)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] maintenance = ["conda-lock (==2.5.6)"] -tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.23)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.2.1)", "scikit-image (>=0.17.2)"] +tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.2.1)", "scikit-image (>=0.17.2)"] [[package]] name = "scipy" @@ -8647,13 +8675,13 @@ doc = ["sphinx"] [[package]] name = "starlette" -version = "0.38.4" +version = "0.38.5" description = "The little ASGI library that shines." optional = false python-versions = ">=3.8" files = [ - {file = "starlette-0.38.4-py3-none-any.whl", hash = "sha256:526f53a77f0e43b85f583438aee1a940fd84f8fd610353e8b0c1a77ad8a87e76"}, - {file = "starlette-0.38.4.tar.gz", hash = "sha256:53a7439060304a208fea17ed407e998f46da5e5d9b1addfea3040094512a6379"}, + {file = "starlette-0.38.5-py3-none-any.whl", hash = "sha256:632f420a9d13e3ee2a6f18f437b0a9f1faecb0bc42e1942aa2ea0e379a4c4206"}, + {file = "starlette-0.38.5.tar.gz", hash = "sha256:04a92830a9b6eb1442c766199d62260c3d4dc9c4f9188360626b1e0273cb7077"}, ] [package.dependencies] @@ -8750,13 +8778,13 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tencentcloud-sdk-python-common" -version = "3.0.1226" +version = "3.0.1230" description = "Tencent Cloud Common SDK for Python" optional = false python-versions = "*" files = [ - {file = "tencentcloud-sdk-python-common-3.0.1226.tar.gz", hash = "sha256:8e126cdce6adffce6fa5a3b464f0a6e483af7c7f78939883823393c2c5e8fc62"}, - {file = "tencentcloud_sdk_python_common-3.0.1226-py2.py3-none-any.whl", hash = "sha256:6165481280147afa226c6bb91df4cd0c43c5230f566be3d3f9c45a826b1105c5"}, + {file = "tencentcloud-sdk-python-common-3.0.1230.tar.gz", hash = "sha256:1e0f3bab80026fcb0083820869239b3f8cf30beb8e00e12c213bdecc75eb7577"}, + {file = "tencentcloud_sdk_python_common-3.0.1230-py2.py3-none-any.whl", hash = "sha256:03616c79685c154c689536a9c823d52b855cf49eada70679826a92aff5afd596"}, ] [package.dependencies] @@ -8764,17 +8792,17 @@ requests = ">=2.16.0" [[package]] name = "tencentcloud-sdk-python-hunyuan" -version = "3.0.1226" +version = "3.0.1230" description = "Tencent Cloud Hunyuan SDK for Python" optional = false python-versions = "*" files = [ - {file = "tencentcloud-sdk-python-hunyuan-3.0.1226.tar.gz", hash = "sha256:c9b9c3a373d967b691444bd590e3be1424aaab9f1ab30c57d98777113e2b7882"}, - {file = "tencentcloud_sdk_python_hunyuan-3.0.1226-py2.py3-none-any.whl", hash = "sha256:87a1d63f85c25b5ec6c07f16d813091411ea6f296a1bf7fb608a529852b38bbe"}, + {file = "tencentcloud-sdk-python-hunyuan-3.0.1230.tar.gz", hash = "sha256:900d15cb9dc2217b1282d985898ec7ecf97859351c86c6f7efc74685f08a5f85"}, + {file = "tencentcloud_sdk_python_hunyuan-3.0.1230-py2.py3-none-any.whl", hash = "sha256:604dab0d4d66ea942f23d7980c76b5f0f6af3d68a8374e619331a4dd2910991e"}, ] [package.dependencies] -tencentcloud-sdk-python-common = "3.0.1226" +tencentcloud-sdk-python-common = "3.0.1230" [[package]] name = "threadpoolctl" @@ -9177,13 +9205,13 @@ typing-extensions = ">=3.7.4.3" [[package]] name = "types-requests" -version = "2.32.0.20240905" +version = "2.32.0.20240907" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240905.tar.gz", hash = "sha256:e97fd015a5ed982c9ddcd14cc4afba9d111e0e06b797c8f776d14602735e9bd6"}, - {file = "types_requests-2.32.0.20240905-py3-none-any.whl", hash = "sha256:f46ecb55f5e1a37a58be684cf3f013f166da27552732ef2469a0cc8e62a72881"}, + {file = "types-requests-2.32.0.20240907.tar.gz", hash = "sha256:ff33935f061b5e81ec87997e91050f7b4af4f82027a7a7a9d9aaea04a963fdf8"}, + {file = "types_requests-2.32.0.20240907-py3-none-any.whl", hash = "sha256:1d1e79faeaf9d42def77f3c304893dea17a97cae98168ac69f3cb465516ee8da"}, ] [package.dependencies] @@ -10388,4 +10416,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "8179c7e3f91b5a00054e26297040b1969f59b37cb9a707fbaa9c2ea419954718" +content-hash = "726af69ca5a577808dfe76dbce098de77ce358bf64862a4d27309cb1900cea0c" diff --git a/api/pyproject.toml b/api/pyproject.toml index 1822f59419984..eb930329e11bd 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -130,7 +130,7 @@ authlib = "1.3.1" azure-identity = "1.16.1" azure-storage-blob = "12.13.0" beautifulsoup4 = "4.12.2" -boto3 = "1.34.148" +boto3 = "1.35.17" sagemaker = "2.231.0" bs4 = "~0.0.1" cachetools = "~5.3.0" From aa11659062dd71ab460163d91fe49dcda9ee1b6b Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Thu, 12 Sep 2024 20:06:06 +0800 Subject: [PATCH 08/25] Revert "Feat: update app published time after clicking publish button" (#8320) --- .../components/app/app-publisher/index.tsx | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/web/app/components/app/app-publisher/index.tsx b/web/app/components/app/app-publisher/index.tsx index 2bcc74ec01dad..0558e29956039 100644 --- a/web/app/components/app/app-publisher/index.tsx +++ b/web/app/components/app/app-publisher/index.tsx @@ -63,7 +63,6 @@ const AppPublisher = ({ const [published, setPublished] = useState(false) const [open, setOpen] = useState(false) const appDetail = useAppStore(state => state.appDetail) - const [publishedTime, setPublishedTime] = useState(publishedAt) const { app_base_url: appBaseURL = '', access_token: accessToken = '' } = appDetail?.site ?? {} const appMode = (appDetail?.mode !== 'completion' && appDetail?.mode !== 'workflow') ? 'chat' : appDetail.mode const appURL = `${appBaseURL}/${appMode}/${accessToken}` @@ -77,7 +76,6 @@ const AppPublisher = ({ try { await onPublish?.(modelAndParameter) setPublished(true) - setPublishedTime(Date.now()) } catch (e) { setPublished(false) @@ -133,13 +131,13 @@ const AppPublisher = ({
- {publishedTime ? t('workflow.common.latestPublished') : t('workflow.common.currentDraftUnpublished')} + {publishedAt ? t('workflow.common.latestPublished') : t('workflow.common.currentDraftUnpublished')}
- {publishedTime + {publishedAt ? (
- {t('workflow.common.publishedAt')} {formatTimeFromNow(publishedTime)} + {t('workflow.common.publishedAt')} {formatTimeFromNow(publishedAt)}
- }>{t('workflow.common.runApp')} + }>{t('workflow.common.runApp')} {appDetail?.mode === 'workflow' ? ( } > @@ -201,16 +199,16 @@ const AppPublisher = ({ setEmbeddingModalOpen(true) handleTrigger() }} - disabled={!publishedTime} + disabled={!publishedAt} icon={} > {t('workflow.common.embedIntoSite')} )} - }>{t('workflow.common.accessAPIReference')} + }>{t('workflow.common.accessAPIReference')} {appDetail?.mode === 'workflow' && ( Date: Thu, 12 Sep 2024 20:22:57 +0800 Subject: [PATCH 09/25] chore:add Azure openai api version 2024-08-01-preview (#8291) --- .../model_providers/azure_openai/azure_openai.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml index 700935b07b0c0..867f9fec42000 100644 --- a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml +++ b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml @@ -53,6 +53,12 @@ model_credential_schema: type: select required: true options: + - label: + en_US: 2024-08-01-preview + value: 2024-08-01-preview + - label: + en_US: 2024-07-01-preview + value: 2024-07-01-preview - label: en_US: 2024-05-01-preview value: 2024-05-01-preview From 404db1ae5b4746be00925c6b2c725303d2d62218 Mon Sep 17 00:00:00 2001 From: Tamer Date: Thu, 12 Sep 2024 20:27:55 +0800 Subject: [PATCH 10/25] Fix VariableEntityType Bug external-data-tool -> external_data_tool (#8299) --- api/core/app/app_config/entities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/app/app_config/entities.py b/api/core/app/app_config/entities.py index d208db2b01dc2..7e5899bafabae 100644 --- a/api/core/app/app_config/entities.py +++ b/api/core/app/app_config/entities.py @@ -92,7 +92,7 @@ class VariableEntityType(str, Enum): SELECT = "select" PARAGRAPH = "paragraph" NUMBER = "number" - EXTERNAL_DATA_TOOL = "external-data-tool" + EXTERNAL_DATA_TOOL = "external_data_tool" class VariableEntity(BaseModel): From 5db0b56c5b57b6ceffb39bf4a219cf80082dae78 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Thu, 12 Sep 2024 21:33:07 +0900 Subject: [PATCH 11/25] docs: update lambda_translate_utils.yaml (#8293) --- .../provider/builtin/aws/tools/lambda_translate_utils.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/tools/provider/builtin/aws/tools/lambda_translate_utils.yaml b/api/core/tools/provider/builtin/aws/tools/lambda_translate_utils.yaml index a35c9f49fb972..3bb133c7ec8d1 100644 --- a/api/core/tools/provider/builtin/aws/tools/lambda_translate_utils.yaml +++ b/api/core/tools/provider/builtin/aws/tools/lambda_translate_utils.yaml @@ -10,7 +10,7 @@ description: human: en_US: A util tools for LLM translation, extra deployment is needed on AWS. Please refer Github Repo - https://github.com/ybalbert001/dynamodb-rag zh_Hans: 大语言模型翻译工具(专词映射获取),需要在AWS上进行额外部署,可参考Github Repo - https://github.com/ybalbert001/dynamodb-rag - pt_BR: A util tools for LLM translation, specfic Lambda Function deployment is needed on AWS. Please refer Github Repo - https://github.com/ybalbert001/dynamodb-rag + pt_BR: A util tools for LLM translation, specific Lambda Function deployment is needed on AWS. Please refer Github Repo - https://github.com/ybalbert001/dynamodb-rag llm: A util tools for translation. parameters: - name: text_content From 153807f243952f3a5fd50e9969c4826080d0fbd2 Mon Sep 17 00:00:00 2001 From: Nam Vu Date: Thu, 12 Sep 2024 22:17:29 +0700 Subject: [PATCH 12/25] fix: response_format label (#8326) --- .../model_providers/baichuan/llm/baichuan3-turbo-128k.yaml | 2 +- .../model_providers/baichuan/llm/baichuan3-turbo.yaml | 2 +- .../model_runtime/model_providers/baichuan/llm/baichuan4.yaml | 2 +- .../model_providers/deepseek/llm/deepseek-chat.yaml | 2 +- .../model_providers/moonshot/llm/moonshot-v1-128k.yaml | 2 +- .../model_providers/moonshot/llm/moonshot-v1-32k.yaml | 2 +- .../model_providers/moonshot/llm/moonshot-v1-8k.yaml | 2 +- .../model_providers/openai/llm/chatgpt-4o-latest.yaml | 2 +- .../model_providers/openai/llm/gpt-3.5-turbo-0125.yaml | 2 +- .../model_providers/openai/llm/gpt-3.5-turbo-1106.yaml | 2 +- .../model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml | 2 +- .../model_providers/openai/llm/gpt-4-0125-preview.yaml | 2 +- .../model_providers/openai/llm/gpt-4-1106-preview.yaml | 2 +- .../model_runtime/model_providers/openai/llm/gpt-4-32k.yaml | 2 +- .../model_providers/openai/llm/gpt-4-turbo-2024-04-09.yaml | 2 +- .../model_providers/openai/llm/gpt-4-turbo-preview.yaml | 2 +- .../model_runtime/model_providers/openai/llm/gpt-4-turbo.yaml | 2 +- .../model_providers/openai/llm/gpt-4-vision-preview.yaml | 2 +- api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml | 2 +- .../model_providers/openai/llm/gpt-4o-2024-05-13.yaml | 2 +- .../model_providers/openai/llm/gpt-4o-2024-08-06.yaml | 2 +- .../model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml | 2 +- .../model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml | 2 +- api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml | 2 +- .../model_providers/openrouter/llm/gpt-3.5-turbo.yaml | 2 +- .../model_runtime/model_providers/openrouter/llm/gpt-4-32k.yaml | 2 +- .../model_runtime/model_providers/openrouter/llm/gpt-4.yaml | 2 +- .../model_providers/openrouter/llm/gpt-4o-2024-08-06.yaml | 2 +- .../model_providers/openrouter/llm/gpt-4o-mini.yaml | 2 +- .../model_runtime/model_providers/openrouter/llm/gpt-4o.yaml | 2 +- 30 files changed, 30 insertions(+), 30 deletions(-) diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo-128k.yaml b/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo-128k.yaml index c6c6c7e9e9194..d9cd086e82c99 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo-128k.yaml +++ b/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo-128k.yaml @@ -33,7 +33,7 @@ parameter_rules: - name: res_format label: zh_Hans: 回复格式 - en_US: response format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo.yaml b/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo.yaml index ee8a9ff0d5408..58f9b39a438a0 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo.yaml +++ b/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo.yaml @@ -33,7 +33,7 @@ parameter_rules: - name: res_format label: zh_Hans: 回复格式 - en_US: response format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan4.yaml b/api/core/model_runtime/model_providers/baichuan/llm/baichuan4.yaml index e5e6aeb49158e..6a1135e165fca 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan4.yaml +++ b/api/core/model_runtime/model_providers/baichuan/llm/baichuan4.yaml @@ -33,7 +33,7 @@ parameter_rules: - name: res_format label: zh_Hans: 回复格式 - en_US: response format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml index 6588a4b5e0146..4973ac8ad6981 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml +++ b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml @@ -62,7 +62,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-128k.yaml b/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-128k.yaml index 1078e84c59dca..59c0915ee9b61 100644 --- a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-128k.yaml +++ b/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-128k.yaml @@ -24,7 +24,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-32k.yaml b/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-32k.yaml index 9c739d05019f5..724f2aa5a29f9 100644 --- a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-32k.yaml +++ b/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-32k.yaml @@ -24,7 +24,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-8k.yaml b/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-8k.yaml index 187a86999e170..5872295bfad1e 100644 --- a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-8k.yaml +++ b/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-8k.yaml @@ -24,7 +24,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml b/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml index 98e236650c9e7..b47449a49abc2 100644 --- a/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml @@ -28,7 +28,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0125.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0125.yaml index c1602b2efcb8d..ffa725ec40f4f 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0125.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0125.yaml @@ -27,7 +27,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-1106.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-1106.yaml index 56ab965c39c4d..21150fc3a6df6 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-1106.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-1106.yaml @@ -27,7 +27,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml index 6eb15e6c0df39..d3a8ee535a9d1 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml @@ -27,7 +27,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-0125-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-0125-preview.yaml index 007cfed0f3a6f..ac4ec5840bd77 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-0125-preview.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4-0125-preview.yaml @@ -40,7 +40,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-1106-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-1106-preview.yaml index f4fa6317af876..d7752397701f9 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-1106-preview.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4-1106-preview.yaml @@ -40,7 +40,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-32k.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-32k.yaml index f92173ccfd996..8358425e6d290 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-32k.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4-32k.yaml @@ -40,7 +40,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-2024-04-09.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-2024-04-09.yaml index 6b36361efe80d..0234499164abf 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-2024-04-09.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-2024-04-09.yaml @@ -41,7 +41,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-preview.yaml index c0350ae2c6d75..8d29cf0c04a1d 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-preview.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-preview.yaml @@ -40,7 +40,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo.yaml index 575acb7fa294b..b25ff6a81269f 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo.yaml @@ -41,7 +41,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-vision-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-vision-preview.yaml index a63b60842396c..07037c66438dd 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-vision-preview.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4-vision-preview.yaml @@ -38,7 +38,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml index a7a5bf3c864a6..f7b5138b7df36 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml @@ -40,7 +40,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml index f0d835cba217d..b630d6f63075c 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml @@ -28,7 +28,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml index 7e430c51a710f..73b7f6970076c 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml @@ -28,7 +28,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml index 03e28772e607f..df38270f79b1c 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml @@ -28,7 +28,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml index 23dcf85085e12..5e3c94fbe255c 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml @@ -28,7 +28,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml index 4f141f772fd14..3090a9e090c2c 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml @@ -28,7 +28,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-3.5-turbo.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-3.5-turbo.yaml index 1737c50bb1086..186c1cc663604 100644 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-3.5-turbo.yaml +++ b/api/core/model_runtime/model_providers/openrouter/llm/gpt-3.5-turbo.yaml @@ -26,7 +26,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4-32k.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4-32k.yaml index 2d55cf85656f1..8c2989b300076 100644 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4-32k.yaml +++ b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4-32k.yaml @@ -41,7 +41,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4.yaml index 12015f6f6432f..ef19d4f6f0dba 100644 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4.yaml +++ b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4.yaml @@ -41,7 +41,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-2024-08-06.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-2024-08-06.yaml index cf2de0f73a0b8..0be325f55bdc9 100644 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-2024-08-06.yaml +++ b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-2024-08-06.yaml @@ -28,7 +28,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml index de0bad413653e..3b1d95643d22a 100644 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml +++ b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml @@ -27,7 +27,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o.yaml index 6945402c7207a..a8c97efdd64b1 100644 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o.yaml +++ b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o.yaml @@ -27,7 +27,7 @@ parameter_rules: - name: response_format label: zh_Hans: 回复格式 - en_US: response_format + en_US: Response Format type: string help: zh_Hans: 指定模型必须输出的格式 From e90d3c29ab9abd86611396af0dde0b427796e5a5 Mon Sep 17 00:00:00 2001 From: takatost Date: Fri, 13 Sep 2024 02:15:19 +0800 Subject: [PATCH 13/25] feat: add OpenAI o1 series models support (#8328) --- .../model_providers/openai/llm/_position.yaml | 4 ++ .../model_providers/openai/llm/llm.py | 45 +++++++++++++++++-- .../openai/llm/o1-mini-2024-09-12.yaml | 33 ++++++++++++++ .../model_providers/openai/llm/o1-mini.yaml | 33 ++++++++++++++ .../openai/llm/o1-preview-2024-09-12.yaml | 33 ++++++++++++++ .../openai/llm/o1-preview.yaml | 33 ++++++++++++++ api/pyproject.toml | 3 +- 7 files changed, 180 insertions(+), 4 deletions(-) create mode 100644 api/core/model_runtime/model_providers/openai/llm/o1-mini-2024-09-12.yaml create mode 100644 api/core/model_runtime/model_providers/openai/llm/o1-mini.yaml create mode 100644 api/core/model_runtime/model_providers/openai/llm/o1-preview-2024-09-12.yaml create mode 100644 api/core/model_runtime/model_providers/openai/llm/o1-preview.yaml diff --git a/api/core/model_runtime/model_providers/openai/llm/_position.yaml b/api/core/model_runtime/model_providers/openai/llm/_position.yaml index ac7313aaa1bf0..7501bc1164dc4 100644 --- a/api/core/model_runtime/model_providers/openai/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/_position.yaml @@ -5,6 +5,10 @@ - chatgpt-4o-latest - gpt-4o-mini - gpt-4o-mini-2024-07-18 +- o1-preview +- o1-preview-2024-09-12 +- o1-mini +- o1-mini-2024-09-12 - gpt-4-turbo - gpt-4-turbo-2024-04-09 - gpt-4-turbo-preview diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py index c4b381df41b68..048f27f30592e 100644 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai/llm/llm.py @@ -613,6 +613,13 @@ def _chat_generate( # clear illegal prompt messages prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages) + block_as_stream = False + if model.startswith("o1"): + block_as_stream = True + stream = False + if "stream_options" in extra_model_kwargs: + del extra_model_kwargs["stream_options"] + # chat model response = client.chat.completions.create( messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], @@ -625,7 +632,39 @@ def _chat_generate( if stream: return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) - return self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) + block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) + + if block_as_stream: + return self._handle_chat_block_as_stream_response(block_result, prompt_messages) + + return block_result + + def _handle_chat_block_as_stream_response( + self, + block_result: LLMResult, + prompt_messages: list[PromptMessage], + ) -> Generator[LLMResultChunk, None, None]: + """ + Handle llm chat response + + :param model: model name + :param credentials: credentials + :param response: response + :param prompt_messages: prompt messages + :param tools: tools for tool calling + :return: llm response chunk generator + """ + yield LLMResultChunk( + model=block_result.model, + prompt_messages=prompt_messages, + system_fingerprint=block_result.system_fingerprint, + delta=LLMResultChunkDelta( + index=0, + message=block_result.message, + finish_reason="stop", + usage=block_result.usage, + ), + ) def _handle_chat_generate_response( self, @@ -960,7 +999,7 @@ def _num_tokens_from_messages( model = model.split(":")[1] # Currently, we can use gpt4o to calculate chatgpt-4o-latest's token. - if model == "chatgpt-4o-latest": + if model == "chatgpt-4o-latest" or model.startswith("o1"): model = "gpt-4o" try: @@ -975,7 +1014,7 @@ def _num_tokens_from_messages( tokens_per_message = 4 # if there's a name, the role is omitted tokens_per_name = -1 - elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"): + elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4") or model.startswith("o1"): tokens_per_message = 3 tokens_per_name = 1 else: diff --git a/api/core/model_runtime/model_providers/openai/llm/o1-mini-2024-09-12.yaml b/api/core/model_runtime/model_providers/openai/llm/o1-mini-2024-09-12.yaml new file mode 100644 index 0000000000000..07a3bc9a7aaaf --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/o1-mini-2024-09-12.yaml @@ -0,0 +1,33 @@ +model: o1-mini-2024-09-12 +label: + zh_Hans: o1-mini-2024-09-12 + en_US: o1-mini-2024-09-12 +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + default: 65563 + min: 1 + max: 65563 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '3.00' + output: '12.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/o1-mini.yaml b/api/core/model_runtime/model_providers/openai/llm/o1-mini.yaml new file mode 100644 index 0000000000000..3e8352920178c --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/o1-mini.yaml @@ -0,0 +1,33 @@ +model: o1-mini +label: + zh_Hans: o1-mini + en_US: o1-mini +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + default: 65563 + min: 1 + max: 65563 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '3.00' + output: '12.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/o1-preview-2024-09-12.yaml b/api/core/model_runtime/model_providers/openai/llm/o1-preview-2024-09-12.yaml new file mode 100644 index 0000000000000..c9da96f611bc9 --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/o1-preview-2024-09-12.yaml @@ -0,0 +1,33 @@ +model: o1-preview-2024-09-12 +label: + zh_Hans: o1-preview-2024-09-12 + en_US: o1-preview-2024-09-12 +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + default: 32768 + min: 1 + max: 32768 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '15.00' + output: '60.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/o1-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/o1-preview.yaml new file mode 100644 index 0000000000000..c83874b76582e --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/o1-preview.yaml @@ -0,0 +1,33 @@ +model: o1-preview +label: + zh_Hans: o1-preview + en_US: o1-preview +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + default: 32768 + min: 1 + max: 32768 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '15.00' + output: '60.00' + unit: '0.000001' + currency: USD diff --git a/api/pyproject.toml b/api/pyproject.toml index eb930329e11bd..823dbb651281b 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -60,7 +60,8 @@ ignore = [ "SIM113", # eumerate-for-loop "SIM117", # multiple-with-statements "SIM210", # if-expr-with-true-false - "SIM300", # yoda-conditions + "SIM300", # yoda-conditions, + "PT004", # pytest-no-assert ] [tool.ruff.lint.per-file-ignores] From c78828ab7ce36647985b2c2ec104940180a006d2 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 13 Sep 2024 02:48:24 +0800 Subject: [PATCH 14/25] chore: update Dify version to 0.8.1 (#8329) --- api/configs/packaging/__init__.py | 2 +- docker-legacy/docker-compose.yaml | 6 +++--- docker/docker-compose.yaml | 6 +++--- web/package.json | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py index e03dfeb27ca3e..103a6c58c3d75 100644 --- a/api/configs/packaging/__init__.py +++ b/api/configs/packaging/__init__.py @@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings): CURRENT_VERSION: str = Field( description="Dify version", - default="0.8.0", + default="0.8.1", ) COMMIT_SHA: str = Field( diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml index 7075a31f2b8e7..33c7fefcbab09 100644 --- a/docker-legacy/docker-compose.yaml +++ b/docker-legacy/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: # API service api: - image: langgenius/dify-api:0.8.0 + image: langgenius/dify-api:0.8.1 restart: always environment: # Startup mode, 'api' starts the API server. @@ -227,7 +227,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.8.0 + image: langgenius/dify-api:0.8.1 restart: always environment: CONSOLE_WEB_URL: '' @@ -396,7 +396,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.8.0 + image: langgenius/dify-web:0.8.1 restart: always environment: # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 5afb876a1cf82..0f97c90d5f4c5 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -208,7 +208,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.8.0 + image: langgenius/dify-api:0.8.1 restart: always environment: # Use the shared environment variables. @@ -228,7 +228,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.8.0 + image: langgenius/dify-api:0.8.1 restart: always environment: # Use the shared environment variables. @@ -247,7 +247,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.8.0 + image: langgenius/dify-web:0.8.1 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/web/package.json b/web/package.json index 374286f8f717c..4f640b7ba361f 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "dify-web", - "version": "0.8.0", + "version": "0.8.1", "private": true, "engines": { "node": ">=18.17.0" From 49cee773c5315c9c035db66efcf48f0ec4e33bc2 Mon Sep 17 00:00:00 2001 From: Jyong <76649700+JohnJyong@users.noreply.github.com> Date: Fri, 13 Sep 2024 10:21:58 +0800 Subject: [PATCH 15/25] fixed score threshold is none (#8342) --- api/core/rag/retrieval/dataset_retrieval.py | 2 +- .../tool/dataset_retriever/dataset_multi_retriever_tool.py | 2 +- api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py | 2 +- api/services/hit_testing_service.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index cdaca8387d227..12868d6ae4394 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -429,7 +429,7 @@ def _retriever(self, flask_app: Flask, dataset_id: str, query: str, top_k: int, top_k=top_k, score_threshold=retrieval_model.get("score_threshold", 0.0) if retrieval_model["score_threshold_enabled"] - else None, + else 0.0, reranking_model=retrieval_model.get("reranking_model", None) if retrieval_model["reranking_enable"] else None, diff --git a/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py b/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py index 067600c601337..6073b8e92e39b 100644 --- a/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py +++ b/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py @@ -179,7 +179,7 @@ def _retriever( top_k=self.top_k, score_threshold=retrieval_model.get("score_threshold", 0.0) if retrieval_model["score_threshold_enabled"] - else None, + else 0.0, reranking_model=retrieval_model.get("reranking_model", None) if retrieval_model["reranking_enable"] else None, diff --git a/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py b/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py index ad533946a17bd..8dc60408c93b4 100644 --- a/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py +++ b/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py @@ -72,7 +72,7 @@ def _run(self, query: str) -> str: top_k=self.top_k, score_threshold=retrieval_model.get("score_threshold", 0.0) if retrieval_model["score_threshold_enabled"] - else None, + else 0.0, reranking_model=retrieval_model.get("reranking_model", None) if retrieval_model["reranking_enable"] else None, diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index a9f963dbacfeb..3dafafd5b46f6 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -42,7 +42,7 @@ def retrieve(cls, dataset: Dataset, query: str, account: Account, retrieval_mode top_k=retrieval_model.get("top_k", 2), score_threshold=retrieval_model.get("score_threshold", 0.0) if retrieval_model["score_threshold_enabled"] - else None, + else 0.0, reranking_model=retrieval_model.get("reranking_model", None) if retrieval_model["reranking_enable"] else None, From a9c1f1a041699c2d16da9117fcb42b6b3d6dd599 Mon Sep 17 00:00:00 2001 From: Pika Date: Fri, 13 Sep 2024 11:03:39 +0800 Subject: [PATCH 16/25] fix(workflow): fix var-selector not update when edges change (#8259) Co-authored-by: Chen(MAC) --- .../variable/var-reference-picker.tsx | 28 ++++++------------- .../_base/hooks/use-available-var-list.ts | 6 ++-- 2 files changed, 13 insertions(+), 21 deletions(-) diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx index e2b1f0a31c805..7fb4ad68d8bbd 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx @@ -8,6 +8,7 @@ import { } from '@remixicon/react' import produce from 'immer' import { useStoreApi } from 'reactflow' +import useAvailableVarList from '../../hooks/use-available-var-list' import VarReferencePopup from './var-reference-popup' import { getNodeInfoById, isConversationVar, isENV, isSystemVar } from './utils' import ConstantField from './constant-field' @@ -26,7 +27,6 @@ import { } from '@/app/components/base/portal-to-follow-elem' import { useIsChatMode, - useWorkflow, useWorkflowVariables, } from '@/app/components/workflow/hooks' import { VarType as VarKindType } from '@/app/components/workflow/nodes/tool/types' @@ -67,7 +67,7 @@ const VarReferencePicker: FC = ({ onlyLeafNodeVar, filterVar = () => true, availableNodes: passedInAvailableNodes, - availableVars, + availableVars: passedInAvailableVars, isAddBtnTrigger, schema, valueTypePlaceHolder, @@ -79,11 +79,12 @@ const VarReferencePicker: FC = ({ } = store.getState() const isChatMode = useIsChatMode() - const { getTreeLeafNodes, getBeforeNodesInSameBranch } = useWorkflow() - const { getCurrentVariableType, getNodeAvailableVars } = useWorkflowVariables() - const availableNodes = useMemo(() => { - return passedInAvailableNodes || (onlyLeafNodeVar ? getTreeLeafNodes(nodeId) : getBeforeNodesInSameBranch(nodeId)) - }, [getBeforeNodesInSameBranch, getTreeLeafNodes, nodeId, onlyLeafNodeVar, passedInAvailableNodes]) + const { getCurrentVariableType } = useWorkflowVariables() + const { availableNodes, availableVars } = useAvailableVarList(nodeId, { + onlyLeafNodeVar, + passedInAvailableNodes, + filterVar, + }) const startNode = availableNodes.find((node: any) => { return node.data.type === BlockEnum.Start }) @@ -102,19 +103,8 @@ const VarReferencePicker: FC = ({ const [varKindType, setVarKindType] = useState(defaultVarKindType) const isConstant = isSupportConstantValue && varKindType === VarKindType.constant - const outputVars = useMemo(() => { - if (availableVars) - return availableVars - - const vars = getNodeAvailableVars({ - parentNode: iterationNode, - beforeNodes: availableNodes, - isChatMode, - filterVar, - }) - return vars - }, [iterationNode, availableNodes, isChatMode, filterVar, availableVars, getNodeAvailableVars]) + const outputVars = useMemo(() => (passedInAvailableVars || availableVars), [passedInAvailableVars, availableVars]) const [open, setOpen] = useState(false) useEffect(() => { diff --git a/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts b/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts index b81feab805370..bd17bb1de859a 100644 --- a/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts +++ b/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts @@ -4,12 +4,13 @@ import { useWorkflow, useWorkflowVariables, } from '@/app/components/workflow/hooks' -import type { ValueSelector, Var } from '@/app/components/workflow/types' +import type { Node, ValueSelector, Var } from '@/app/components/workflow/types' type Params = { onlyLeafNodeVar?: boolean hideEnv?: boolean hideChatVar?: boolean filterVar: (payload: Var, selector: ValueSelector) => boolean + passedInAvailableNodes?: Node[] } const useAvailableVarList = (nodeId: string, { @@ -17,6 +18,7 @@ const useAvailableVarList = (nodeId: string, { filterVar, hideEnv, hideChatVar, + passedInAvailableNodes, }: Params = { onlyLeafNodeVar: false, filterVar: () => true, @@ -25,7 +27,7 @@ const useAvailableVarList = (nodeId: string, { const { getNodeAvailableVars } = useWorkflowVariables() const isChatMode = useIsChatMode() - const availableNodes = onlyLeafNodeVar ? getTreeLeafNodes(nodeId) : getBeforeNodesInSameBranch(nodeId) + const availableNodes = passedInAvailableNodes || (onlyLeafNodeVar ? getTreeLeafNodes(nodeId) : getBeforeNodesInSameBranch(nodeId)) const { parentNode: iterationNode, From 5f03e664891c043763d844212582ec187b364946 Mon Sep 17 00:00:00 2001 From: fanlia <3093932086@qq.com> Date: Fri, 13 Sep 2024 11:03:57 +0800 Subject: [PATCH 17/25] Feature/service api workflow logs (#8323) --- api/controllers/service_api/app/workflow.py | 27 +++++ .../develop/template/template_workflow.en.mdx | 106 ++++++++++++++++++ .../develop/template/template_workflow.zh.mdx | 106 ++++++++++++++++++ 3 files changed, 239 insertions(+) diff --git a/api/controllers/service_api/app/workflow.py b/api/controllers/service_api/app/workflow.py index 5822e0921bc70..96d1337632826 100644 --- a/api/controllers/service_api/app/workflow.py +++ b/api/controllers/service_api/app/workflow.py @@ -1,6 +1,7 @@ import logging from flask_restful import Resource, fields, marshal_with, reqparse +from flask_restful.inputs import int_range from werkzeug.exceptions import InternalServerError from controllers.service_api import api @@ -22,10 +23,12 @@ ) from core.model_runtime.errors.invoke import InvokeError from extensions.ext_database import db +from fields.workflow_app_log_fields import workflow_app_log_pagination_fields from libs import helper from models.model import App, AppMode, EndUser from models.workflow import WorkflowRun from services.app_generate_service import AppGenerateService +from services.workflow_app_service import WorkflowAppService logger = logging.getLogger(__name__) @@ -113,6 +116,30 @@ def post(self, app_model: App, end_user: EndUser, task_id: str): return {"result": "success"} +class WorkflowAppLogApi(Resource): + @validate_app_token + @marshal_with(workflow_app_log_pagination_fields) + def get(self, app_model: App): + """ + Get workflow app logs + """ + parser = reqparse.RequestParser() + parser.add_argument("keyword", type=str, location="args") + parser.add_argument("status", type=str, choices=["succeeded", "failed", "stopped"], location="args") + parser.add_argument("page", type=int_range(1, 99999), default=1, location="args") + parser.add_argument("limit", type=int_range(1, 100), default=20, location="args") + args = parser.parse_args() + + # get paginate workflow app logs + workflow_app_service = WorkflowAppService() + workflow_app_log_pagination = workflow_app_service.get_paginate_workflow_app_logs( + app_model=app_model, args=args + ) + + return workflow_app_log_pagination + + api.add_resource(WorkflowRunApi, "/workflows/run") api.add_resource(WorkflowRunDetailApi, "/workflows/run/") api.add_resource(WorkflowTaskStopApi, "/workflows/tasks//stop") +api.add_resource(WorkflowAppLogApi, "/workflows/logs") diff --git a/web/app/components/develop/template/template_workflow.en.mdx b/web/app/components/develop/template/template_workflow.en.mdx index 495b051bd056d..2bd0fe9daf436 100644 --- a/web/app/components/develop/template/template_workflow.en.mdx +++ b/web/app/components/develop/template/template_workflow.en.mdx @@ -413,3 +413,109 @@ Workflow applications offers non-session support and is ideal for translation, a + +--- + + + + + Returns worklfow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order. + + ### Query + + + + Keyword to search + + + succeeded/failed/stopped + + + current page, default is 1. + + + How many chat history messages to return in one request, default is 20. + + + + ### Response + - `page` (int) Current page + - `limit` (int) Number of returned items, if input exceeds system limit, returns system limit amount + - `total` (int) Number of total items + - `has_more` (bool) Whether there is a next page + - `data` (array[object]) Log list + - `id` (string) ID + - `workflow_run` (object) Workflow run + - `id` (string) ID + - `version` (string) Version + - `status` (string) status of execution, `running` / `succeeded` / `failed` / `stopped` + - `error` (string) Optional reason of error + - `elapsed_time` (float) total seconds to be used + - `total_tokens` (int) tokens to be used + - `total_steps` (int) default 0 + - `created_at` (timestamp) start time + - `finished_at` (timestamp) end time + - `created_from` (string) Created from + - `created_by_role` (string) Created by role + - `created_by_account` (string) Optional Created by account + - `created_by_end_user` (object) Created by end user + - `id` (string) ID + - `type` (string) Type + - `is_anonymous` (bool) Is anonymous + - `session_id` (string) Session ID + - `created_at` (timestamp) create time + + + + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/workflows/logs?limit=1' + --header 'Authorization: Bearer {api_key}' + ``` + + + ### Response Example + + ```json {{ title: 'Response' }} + { + "page": 1, + "limit": 1, + "total": 7, + "has_more": true, + "data": [ + { + "id": "e41b93f1-7ca2-40fd-b3a8-999aeb499cc0", + "workflow_run": { + "id": "c0640fc8-03ef-4481-a96c-8a13b732a36e", + "version": "2024-08-01 12:17:09.771832", + "status": "succeeded", + "error": null, + "elapsed_time": 1.3588523610014818, + "total_tokens": 0, + "total_steps": 3, + "created_at": 1726139643, + "finished_at": 1726139644 + }, + "created_from": "service-api", + "created_by_role": "end_user", + "created_by_account": null, + "created_by_end_user": { + "id": "7f7d9117-dd9d-441d-8970-87e5e7e687a3", + "type": "service_api", + "is_anonymous": false, + "session_id": "abc-123" + }, + "created_at": 1726139644 + } + ] + } + ``` + + + diff --git a/web/app/components/develop/template/template_workflow.zh.mdx b/web/app/components/develop/template/template_workflow.zh.mdx index 640a4b3f9259c..d7d672fbd0c44 100644 --- a/web/app/components/develop/template/template_workflow.zh.mdx +++ b/web/app/components/develop/template/template_workflow.zh.mdx @@ -409,3 +409,109 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等 + +--- + + + + + 倒序返回workflow日志 + + ### Query + + + + 关键字 + + + 执行状态 succeeded/failed/stopped + + + 当前页码, 默认1. + + + 每页条数, 默认20. + + + + ### Response + - `page` (int) 当前页码 + - `limit` (int) 每页条数 + - `total` (int) 总条数 + - `has_more` (bool) 是否还有更多数据 + - `data` (array[object]) 当前页码的数据 + - `id` (string) 标识 + - `workflow_run` (object) Workflow 执行日志 + - `id` (string) 标识 + - `version` (string) 版本 + - `status` (string) 执行状态, `running` / `succeeded` / `failed` / `stopped` + - `error` (string) (可选) 错误 + - `elapsed_time` (float) 耗时,单位秒 + - `total_tokens` (int) 消耗的token数量 + - `total_steps` (int) 执行步骤长度 + - `created_at` (timestamp) 开始时间 + - `finished_at` (timestamp) 结束时间 + - `created_from` (string) 来源 + - `created_by_role` (string) 角色 + - `created_by_account` (string) (可选) 帐号 + - `created_by_end_user` (object) 用户 + - `id` (string) 标识 + - `type` (string) 类型 + - `is_anonymous` (bool) 是否匿名 + - `session_id` (string) 会话标识 + - `created_at` (timestamp) 创建时间 + + + + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/workflows/logs?limit=1' + --header 'Authorization: Bearer {api_key}' + ``` + + + ### Response Example + + ```json {{ title: 'Response' }} + { + "page": 1, + "limit": 1, + "total": 7, + "has_more": true, + "data": [ + { + "id": "e41b93f1-7ca2-40fd-b3a8-999aeb499cc0", + "workflow_run": { + "id": "c0640fc8-03ef-4481-a96c-8a13b732a36e", + "version": "2024-08-01 12:17:09.771832", + "status": "succeeded", + "error": null, + "elapsed_time": 1.3588523610014818, + "total_tokens": 0, + "total_steps": 3, + "created_at": 1726139643, + "finished_at": 1726139644 + }, + "created_from": "service-api", + "created_by_role": "end_user", + "created_by_account": null, + "created_by_end_user": { + "id": "7f7d9117-dd9d-441d-8970-87e5e7e687a3", + "type": "service_api", + "is_anonymous": false, + "session_id": "abc-123" + }, + "created_at": 1726139644 + } + ] + } + ``` + + + From 8d2269f7621d11a195aaff4896fa2421dfc8238f Mon Sep 17 00:00:00 2001 From: Yi Xiao <54782454+YIXIAO0@users.noreply.github.com> Date: Fri, 13 Sep 2024 12:20:56 +0800 Subject: [PATCH 18/25] fix: copy and paste shortcut in the textarea of the workflow run panel (#8345) --- web/app/components/workflow/hooks/use-shortcuts.ts | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/web/app/components/workflow/hooks/use-shortcuts.ts b/web/app/components/workflow/hooks/use-shortcuts.ts index 439b521a30977..8b1003e89c677 100644 --- a/web/app/components/workflow/hooks/use-shortcuts.ts +++ b/web/app/components/workflow/hooks/use-shortcuts.ts @@ -70,15 +70,16 @@ export const useShortcuts = (): void => { }) useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.c`, (e) => { - const { showDebugAndPreviewPanel, showInputsPanel } = workflowStore.getState() - if (shouldHandleShortcut(e) && !showDebugAndPreviewPanel && !showInputsPanel) { + const { showDebugAndPreviewPanel } = workflowStore.getState() + if (shouldHandleShortcut(e) && !showDebugAndPreviewPanel) { e.preventDefault() handleNodesCopy() } }, { exactMatch: true, useCapture: true }) useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.v`, (e) => { - if (shouldHandleShortcut(e)) { + const { showDebugAndPreviewPanel } = workflowStore.getState() + if (shouldHandleShortcut(e) && !showDebugAndPreviewPanel) { e.preventDefault() handleNodesPaste() } @@ -99,7 +100,8 @@ export const useShortcuts = (): void => { }, { exactMatch: true, useCapture: true }) useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.z`, (e) => { - if (shouldHandleShortcut(e)) { + const { showDebugAndPreviewPanel } = workflowStore.getState() + if (shouldHandleShortcut(e) && !showDebugAndPreviewPanel) { e.preventDefault() workflowHistoryShortcutsEnabled && handleHistoryBack() } From 4637ddaa7feaf8aca5193e8ba587da9ca32e80fd Mon Sep 17 00:00:00 2001 From: takatost Date: Fri, 13 Sep 2024 13:08:27 +0800 Subject: [PATCH 19/25] feat: add o1-series models support in Agent App (ReACT only) (#8350) --- .../model_providers/openai/llm/llm.py | 29 +++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py index 048f27f30592e..95533ccfaf771 100644 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai/llm/llm.py @@ -620,6 +620,9 @@ def _chat_generate( if "stream_options" in extra_model_kwargs: del extra_model_kwargs["stream_options"] + if "stop" in extra_model_kwargs: + del extra_model_kwargs["stop"] + # chat model response = client.chat.completions.create( messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], @@ -635,7 +638,7 @@ def _chat_generate( block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) if block_as_stream: - return self._handle_chat_block_as_stream_response(block_result, prompt_messages) + return self._handle_chat_block_as_stream_response(block_result, prompt_messages, stop) return block_result @@ -643,6 +646,7 @@ def _handle_chat_block_as_stream_response( self, block_result: LLMResult, prompt_messages: list[PromptMessage], + stop: Optional[list[str]] = None, ) -> Generator[LLMResultChunk, None, None]: """ Handle llm chat response @@ -652,15 +656,22 @@ def _handle_chat_block_as_stream_response( :param response: response :param prompt_messages: prompt messages :param tools: tools for tool calling + :param stop: stop words :return: llm response chunk generator """ + text = block_result.message.content + text = cast(str, text) + + if stop: + text = self.enforce_stop_tokens(text, stop) + yield LLMResultChunk( model=block_result.model, prompt_messages=prompt_messages, system_fingerprint=block_result.system_fingerprint, delta=LLMResultChunkDelta( index=0, - message=block_result.message, + message=AssistantPromptMessage(content=text), finish_reason="stop", usage=block_result.usage, ), @@ -912,6 +923,20 @@ def _clear_illegal_prompt_messages(self, model: str, prompt_messages: list[Promp ] ) + if model.startswith("o1"): + system_message_count = len([m for m in prompt_messages if isinstance(m, SystemPromptMessage)]) + if system_message_count > 0: + new_prompt_messages = [] + for prompt_message in prompt_messages: + if isinstance(prompt_message, SystemPromptMessage): + prompt_message = UserPromptMessage( + content=prompt_message.content, + name=prompt_message.name, + ) + + new_prompt_messages.append(prompt_message) + prompt_messages = new_prompt_messages + return prompt_messages def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: From 82f7875a52a201a7efe922a622eabcec399fb202 Mon Sep 17 00:00:00 2001 From: Joe <79627742+ZhouhaoJiang@users.noreply.github.com> Date: Fri, 13 Sep 2024 13:44:19 +0800 Subject: [PATCH 20/25] feat: add langfuse sentry ignore error (#8353) --- api/extensions/ext_sentry.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api/extensions/ext_sentry.py b/api/extensions/ext_sentry.py index 3b7b0a37f4e8e..86e4720b3ffce 100644 --- a/api/extensions/ext_sentry.py +++ b/api/extensions/ext_sentry.py @@ -1,5 +1,6 @@ import openai import sentry_sdk +from langfuse import parse_error from sentry_sdk.integrations.celery import CeleryIntegration from sentry_sdk.integrations.flask import FlaskIntegration from werkzeug.exceptions import HTTPException @@ -10,7 +11,7 @@ def init_app(app): sentry_sdk.init( dsn=app.config.get("SENTRY_DSN"), integrations=[FlaskIntegration(), CeleryIntegration()], - ignore_errors=[HTTPException, ValueError, openai.APIStatusError], + ignore_errors=[HTTPException, ValueError, openai.APIStatusError, parse_error.defaultErrorResponse], traces_sample_rate=app.config.get("SENTRY_TRACES_SAMPLE_RATE", 1.0), profiles_sample_rate=app.config.get("SENTRY_PROFILES_SAMPLE_RATE", 1.0), environment=app.config.get("DEPLOY_ENV"), From 80a322aaa2223871013a0e1aa0d58903d38778b1 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 13 Sep 2024 13:45:13 +0800 Subject: [PATCH 21/25] chore: update version to 0.8.2 in packaging and docker-compose files (#8352) --- api/configs/packaging/__init__.py | 2 +- api/pyproject.toml | 1 - docker-legacy/docker-compose.yaml | 6 +++--- docker/docker-compose.yaml | 6 +++--- web/package.json | 2 +- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py index 103a6c58c3d75..3815a6fca2638 100644 --- a/api/configs/packaging/__init__.py +++ b/api/configs/packaging/__init__.py @@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings): CURRENT_VERSION: str = Field( description="Dify version", - default="0.8.1", + default="0.8.2", ) COMMIT_SHA: str = Field( diff --git a/api/pyproject.toml b/api/pyproject.toml index 823dbb651281b..83aa35c542929 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -61,7 +61,6 @@ ignore = [ "SIM117", # multiple-with-statements "SIM210", # if-expr-with-true-false "SIM300", # yoda-conditions, - "PT004", # pytest-no-assert ] [tool.ruff.lint.per-file-ignores] diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml index 33c7fefcbab09..f8c5700cd9422 100644 --- a/docker-legacy/docker-compose.yaml +++ b/docker-legacy/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: # API service api: - image: langgenius/dify-api:0.8.1 + image: langgenius/dify-api:0.8.2 restart: always environment: # Startup mode, 'api' starts the API server. @@ -227,7 +227,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.8.1 + image: langgenius/dify-api:0.8.2 restart: always environment: CONSOLE_WEB_URL: '' @@ -396,7 +396,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.8.1 + image: langgenius/dify-web:0.8.2 restart: always environment: # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 0f97c90d5f4c5..b8e068fde028a 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -208,7 +208,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.8.1 + image: langgenius/dify-api:0.8.2 restart: always environment: # Use the shared environment variables. @@ -228,7 +228,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.8.1 + image: langgenius/dify-api:0.8.2 restart: always environment: # Use the shared environment variables. @@ -247,7 +247,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.8.1 + image: langgenius/dify-web:0.8.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/web/package.json b/web/package.json index 4f640b7ba361f..197a1e7e05de2 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "dify-web", - "version": "0.8.1", + "version": "0.8.2", "private": true, "engines": { "node": ">=18.17.0" From a45ac6ab9812897c69c6ce8392403b6c1cd5b5e0 Mon Sep 17 00:00:00 2001 From: sino Date: Fri, 13 Sep 2024 14:19:24 +0800 Subject: [PATCH 22/25] fix: ark token usage is none (#8351) --- .../model_providers/volcengine_maas/client.py | 6 ++---- .../model_providers/volcengine_maas/llm/llm.py | 12 +++++------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/api/core/model_runtime/model_providers/volcengine_maas/client.py b/api/core/model_runtime/model_providers/volcengine_maas/client.py index d6f1356651e7b..cfe21e4b9f461 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/client.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/client.py @@ -208,11 +208,9 @@ def stream_chat( presence_penalty=presence_penalty, top_p=top_p, temperature=temperature, + stream_options={"include_usage": True}, ) - for chunk in chunks: - if not chunk.choices: - continue - yield chunk + yield from chunks def embeddings(self, texts: list[str]) -> CreateEmbeddingResponse: return self.ark.embeddings.create(model=self.endpoint_id, input=texts) diff --git a/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py b/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py index f8bf8fb821978..dec6c9d789c13 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py @@ -239,16 +239,14 @@ def _generate_v3( def _handle_stream_chat_response(chunks: Generator[ChatCompletionChunk]) -> Generator: for chunk in chunks: - if not chunk.choices: - continue - choice = chunk.choices[0] - yield LLMResultChunk( model=model, prompt_messages=prompt_messages, delta=LLMResultChunkDelta( - index=choice.index, - message=AssistantPromptMessage(content=choice.delta.content, tool_calls=[]), + index=0, + message=AssistantPromptMessage( + content=chunk.choices[0].delta.content if chunk.choices else "", tool_calls=[] + ), usage=self._calc_response_usage( model=model, credentials=credentials, @@ -257,7 +255,7 @@ def _handle_stream_chat_response(chunks: Generator[ChatCompletionChunk]) -> Gene ) if chunk.usage else None, - finish_reason=choice.finish_reason, + finish_reason=chunk.choices[0].finish_reason if chunk.choices else None, ), ) From 08c486452f38a6102860c8a36da15bc50196763e Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 13 Sep 2024 14:24:35 +0800 Subject: [PATCH 23/25] fix: score_threshold handling in vector search methods (#8356) --- api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py | 4 ++-- api/core/rag/datasource/vdb/chroma/chroma_vector.py | 2 +- .../rag/datasource/vdb/elasticsearch/elasticsearch_vector.py | 2 +- api/core/rag/datasource/vdb/milvus/milvus_vector.py | 2 +- api/core/rag/datasource/vdb/myscale/myscale_vector.py | 2 +- api/core/rag/datasource/vdb/opensearch/opensearch_vector.py | 2 +- api/core/rag/datasource/vdb/oracle/oraclevector.py | 4 ++-- api/core/rag/datasource/vdb/pgvecto_rs/pgvecto_rs.py | 2 +- api/core/rag/datasource/vdb/pgvector/pgvector.py | 2 +- api/core/rag/datasource/vdb/qdrant/qdrant_vector.py | 4 ++-- api/core/rag/datasource/vdb/relyt/relyt_vector.py | 2 +- api/core/rag/datasource/vdb/tencent/tencent_vector.py | 2 +- api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py | 2 +- api/core/rag/datasource/vdb/weaviate/weaviate_vector.py | 2 +- 14 files changed, 17 insertions(+), 17 deletions(-) diff --git a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py index ef48d22963947..612542dab1df1 100644 --- a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py +++ b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py @@ -239,7 +239,7 @@ def delete_by_metadata_field(self, key: str, value: str) -> None: def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: from alibabacloud_gpdb20160503 import models as gpdb_20160503_models - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = kwargs.get("score_threshold") or 0.0 request = gpdb_20160503_models.QueryCollectionDataRequest( dbinstance_id=self.config.instance_id, region_id=self.config.region_id, @@ -267,7 +267,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: from alibabacloud_gpdb20160503 import models as gpdb_20160503_models - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) request = gpdb_20160503_models.QueryCollectionDataRequest( dbinstance_id=self.config.instance_id, region_id=self.config.region_id, diff --git a/api/core/rag/datasource/vdb/chroma/chroma_vector.py b/api/core/rag/datasource/vdb/chroma/chroma_vector.py index 41f749279dc14..610aa498ab462 100644 --- a/api/core/rag/datasource/vdb/chroma/chroma_vector.py +++ b/api/core/rag/datasource/vdb/chroma/chroma_vector.py @@ -92,7 +92,7 @@ def text_exists(self, id: str) -> bool: def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: collection = self._client.get_or_create_collection(self._collection_name) results: QueryResult = collection.query(query_embeddings=query_vector, n_results=kwargs.get("top_k", 4)) - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) ids: list[str] = results["ids"][0] documents: list[str] = results["documents"][0] diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py index c52ca5f488ccf..f8300cc2715a6 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py @@ -131,7 +131,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc docs = [] for doc, score in docs_and_scores: - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) if score > score_threshold: doc.metadata["score"] = score docs.append(doc) diff --git a/api/core/rag/datasource/vdb/milvus/milvus_vector.py b/api/core/rag/datasource/vdb/milvus/milvus_vector.py index 7d78f542567a8..bdca59f869ca5 100644 --- a/api/core/rag/datasource/vdb/milvus/milvus_vector.py +++ b/api/core/rag/datasource/vdb/milvus/milvus_vector.py @@ -141,7 +141,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc for result in results[0]: metadata = result["entity"].get(Field.METADATA_KEY.value) metadata["score"] = result["distance"] - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) if result["distance"] > score_threshold: doc = Document(page_content=result["entity"].get(Field.CONTENT_KEY.value), metadata=metadata) docs.append(doc) diff --git a/api/core/rag/datasource/vdb/myscale/myscale_vector.py b/api/core/rag/datasource/vdb/myscale/myscale_vector.py index fbd7864eddce7..1bd5bcd3e456a 100644 --- a/api/core/rag/datasource/vdb/myscale/myscale_vector.py +++ b/api/core/rag/datasource/vdb/myscale/myscale_vector.py @@ -122,7 +122,7 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: def _search(self, dist: str, order: SortOrder, **kwargs: Any) -> list[Document]: top_k = kwargs.get("top_k", 5) - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) where_str = ( f"WHERE dist < {1 - score_threshold}" if self._metric.upper() == "COSINE" and order == SortOrder.ASC and score_threshold > 0.0 diff --git a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py index a6f70c873342f..8d2e0a86ab9e5 100644 --- a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py +++ b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py @@ -170,7 +170,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc metadata = {} metadata["score"] = hit["_score"] - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) if hit["_score"] > score_threshold: doc = Document(page_content=hit["_source"].get(Field.CONTENT_KEY.value), metadata=metadata) docs.append(doc) diff --git a/api/core/rag/datasource/vdb/oracle/oraclevector.py b/api/core/rag/datasource/vdb/oracle/oraclevector.py index 16365823620ab..b974fa80a4a21 100644 --- a/api/core/rag/datasource/vdb/oracle/oraclevector.py +++ b/api/core/rag/datasource/vdb/oracle/oraclevector.py @@ -200,7 +200,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc [numpy.array(query_vector)], ) docs = [] - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) for record in cur: metadata, text, distance = record score = 1 - distance @@ -212,7 +212,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: top_k = kwargs.get("top_k", 5) # just not implement fetch by score_threshold now, may be later - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) if len(query) > 0: # Check which language the query is in zh_pattern = re.compile("[\u4e00-\u9fa5]+") diff --git a/api/core/rag/datasource/vdb/pgvecto_rs/pgvecto_rs.py b/api/core/rag/datasource/vdb/pgvecto_rs/pgvecto_rs.py index 765436006b3b0..de2d65b2233c6 100644 --- a/api/core/rag/datasource/vdb/pgvecto_rs/pgvecto_rs.py +++ b/api/core/rag/datasource/vdb/pgvecto_rs/pgvecto_rs.py @@ -198,7 +198,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc metadata = record.meta score = 1 - dis metadata["score"] = score - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) if score > score_threshold: doc = Document(page_content=record.text, metadata=metadata) docs.append(doc) diff --git a/api/core/rag/datasource/vdb/pgvector/pgvector.py b/api/core/rag/datasource/vdb/pgvector/pgvector.py index c35464b464c82..79879d4f6394b 100644 --- a/api/core/rag/datasource/vdb/pgvector/pgvector.py +++ b/api/core/rag/datasource/vdb/pgvector/pgvector.py @@ -144,7 +144,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc (json.dumps(query_vector),), ) docs = [] - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) for record in cur: metadata, text, distance = record score = 1 - distance diff --git a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py index 2bf417e99313d..f418e3ca0514a 100644 --- a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py +++ b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py @@ -333,13 +333,13 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc limit=kwargs.get("top_k", 4), with_payload=True, with_vectors=True, - score_threshold=kwargs.get("score_threshold", 0.0), + score_threshold=float(kwargs.get("score_threshold") or 0.0), ) docs = [] for result in results: metadata = result.payload.get(Field.METADATA_KEY.value) or {} # duplicate check score threshold - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) if result.score > score_threshold: metadata["score"] = result.score doc = Document( diff --git a/api/core/rag/datasource/vdb/relyt/relyt_vector.py b/api/core/rag/datasource/vdb/relyt/relyt_vector.py index 76f214ffa4dd7..76da14496591a 100644 --- a/api/core/rag/datasource/vdb/relyt/relyt_vector.py +++ b/api/core/rag/datasource/vdb/relyt/relyt_vector.py @@ -230,7 +230,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc # Organize results. docs = [] for document, score in results: - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) if 1 - score > score_threshold: docs.append(document) return docs diff --git a/api/core/rag/datasource/vdb/tencent/tencent_vector.py b/api/core/rag/datasource/vdb/tencent/tencent_vector.py index b550743f3959c..faa373017becf 100644 --- a/api/core/rag/datasource/vdb/tencent/tencent_vector.py +++ b/api/core/rag/datasource/vdb/tencent/tencent_vector.py @@ -153,7 +153,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc limit=kwargs.get("top_k", 4), timeout=self._client_config.timeout, ) - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) return self._get_search_res(res, score_threshold) def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: diff --git a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py index a2eff6ff043c0..20490d32154a4 100644 --- a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py +++ b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py @@ -185,7 +185,7 @@ def delete_by_metadata_field(self, key: str, value: str) -> None: def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: top_k = kwargs.get("top_k", 5) - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) filter = kwargs.get("filter") distance = 1 - score_threshold diff --git a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py index 64e92c5b825ad..6eee344b9bc18 100644 --- a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py +++ b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py @@ -205,7 +205,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc docs = [] for doc, score in docs_and_scores: - score_threshold = kwargs.get("score_threshold", 0.0) + score_threshold = float(kwargs.get("score_threshold") or 0.0) # check score threshold if score > score_threshold: doc.metadata["score"] = score From 6613b8f2e015789cad41299b969f47c848825273 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Fri, 13 Sep 2024 14:24:49 +0800 Subject: [PATCH 24/25] chore: fix unnecessary string concatation in single line (#8311) --- api/commands.py | 9 ++++----- api/configs/feature/__init__.py | 6 +++--- api/controllers/console/app/workflow.py | 2 +- api/controllers/console/datasets/datasets.py | 2 +- api/controllers/console/error.py | 4 +--- api/controllers/console/workspace/model_providers.py | 2 +- .../easy_ui_based_app/prompt_template/manager.py | 2 +- .../app_config/easy_ui_based_app/variables/manager.py | 2 +- api/core/app/apps/base_app_runner.py | 2 +- api/core/app/apps/workflow_logging_callback.py | 6 +++--- .../model_runtime/model_providers/__base/ai_model.py | 2 +- api/core/model_runtime/model_providers/cohere/llm/llm.py | 2 +- .../model_providers/huggingface_hub/llm/llm.py | 4 ++-- .../huggingface_hub/text_embedding/text_embedding.py | 2 +- api/core/model_runtime/model_providers/ollama/llm/llm.py | 4 ++-- .../model_runtime/model_providers/replicate/llm/llm.py | 2 +- api/core/model_runtime/model_providers/tongyi/llm/llm.py | 2 +- api/core/rag/datasource/vdb/relyt/relyt_vector.py | 2 +- api/core/rag/docstore/dataset_docstore.py | 2 +- api/core/rag/extractor/notion_extractor.py | 2 +- api/core/rag/splitter/text_splitter.py | 6 +++--- api/core/tools/provider/builtin/gaode/gaode.py | 2 +- .../tools/provider/builtin/gaode/tools/gaode_weather.py | 2 +- .../provider/builtin/github/tools/github_repositories.py | 2 +- .../tools/provider/builtin/pubmed/tools/pubmed_search.py | 4 ++-- .../tools/provider/builtin/twilio/tools/send_message.py | 2 +- api/libs/json_in_md_parser.py | 2 +- api/services/app_dsl_service.py | 6 +++--- api/services/dataset_service.py | 6 +++--- .../core/prompt/test_advanced_prompt_transform.py | 2 +- 30 files changed, 46 insertions(+), 49 deletions(-) diff --git a/api/commands.py b/api/commands.py index db96fbae461f4..887270b43e677 100644 --- a/api/commands.py +++ b/api/commands.py @@ -104,7 +104,7 @@ def reset_email(email, new_email, email_confirm): ) @click.confirmation_option( prompt=click.style( - "Are you sure you want to reset encrypt key pair?" " this operation cannot be rolled back!", fg="red" + "Are you sure you want to reset encrypt key pair? this operation cannot be rolled back!", fg="red" ) ) def reset_encrypt_key_pair(): @@ -131,7 +131,7 @@ def reset_encrypt_key_pair(): click.echo( click.style( - "Congratulations! " "the asymmetric key pair of workspace {} has been reset.".format(tenant.id), + "Congratulations! The asymmetric key pair of workspace {} has been reset.".format(tenant.id), fg="green", ) ) @@ -275,8 +275,7 @@ def migrate_knowledge_vector_database(): for dataset in datasets: total_count = total_count + 1 click.echo( - f"Processing the {total_count} dataset {dataset.id}. " - + f"{create_count} created, {skipped_count} skipped." + f"Processing the {total_count} dataset {dataset.id}. {create_count} created, {skipped_count} skipped." ) try: click.echo("Create dataset vdb index: {}".format(dataset.id)) @@ -594,7 +593,7 @@ def create_tenant(email: str, language: Optional[str] = None, name: Optional[str click.echo( click.style( - "Congratulations! Account and tenant created.\n" "Account: {}\nPassword: {}".format(email, new_password), + "Congratulations! Account and tenant created.\nAccount: {}\nPassword: {}".format(email, new_password), fg="green", ) ) diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index 12e8e6659395b..f794552c36d26 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -129,12 +129,12 @@ class EndpointConfig(BaseSettings): ) SERVICE_API_URL: str = Field( - description="Service API Url prefix." "used to display Service API Base Url to the front-end.", + description="Service API Url prefix. used to display Service API Base Url to the front-end.", default="", ) APP_WEB_URL: str = Field( - description="WebApp Url prefix." "used to display WebAPP API Base Url to the front-end.", + description="WebApp Url prefix. used to display WebAPP API Base Url to the front-end.", default="", ) @@ -272,7 +272,7 @@ class LoggingConfig(BaseSettings): """ LOG_LEVEL: str = Field( - description="Log output level, default to INFO." "It is recommended to set it to ERROR for production.", + description="Log output level, default to INFO. It is recommended to set it to ERROR for production.", default="INFO", ) diff --git a/api/controllers/console/app/workflow.py b/api/controllers/console/app/workflow.py index e44820f6345c4..b488deb89d90c 100644 --- a/api/controllers/console/app/workflow.py +++ b/api/controllers/console/app/workflow.py @@ -465,6 +465,6 @@ def post(self, app_model: App): api.add_resource(PublishedWorkflowApi, "/apps//workflows/publish") api.add_resource(DefaultBlockConfigsApi, "/apps//workflows/default-workflow-block-configs") api.add_resource( - DefaultBlockConfigApi, "/apps//workflows/default-workflow-block-configs" "/" + DefaultBlockConfigApi, "/apps//workflows/default-workflow-block-configs/" ) api.add_resource(ConvertToWorkflowApi, "/apps//convert-to-workflow") diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 08603bfc218d3..2c4e5ac60789e 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -399,7 +399,7 @@ def post(self): ) except LLMBadRequestError: raise ProviderNotInitializeError( - "No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider." + "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider." ) except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) diff --git a/api/controllers/console/error.py b/api/controllers/console/error.py index 1c70ea6c591ec..870e547728ae9 100644 --- a/api/controllers/console/error.py +++ b/api/controllers/console/error.py @@ -18,9 +18,7 @@ class NotSetupError(BaseHTTPException): class NotInitValidateError(BaseHTTPException): error_code = "not_init_validated" - description = ( - "Init validation has not been completed yet. " "Please proceed with the init validation process first." - ) + description = "Init validation has not been completed yet. Please proceed with the init validation process first." code = 401 diff --git a/api/controllers/console/workspace/model_providers.py b/api/controllers/console/workspace/model_providers.py index 8c384202260a0..fe0bcf73384ed 100644 --- a/api/controllers/console/workspace/model_providers.py +++ b/api/controllers/console/workspace/model_providers.py @@ -218,7 +218,7 @@ def get(self, provider: str): api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers//credentials/validate") api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/") api.add_resource( - ModelProviderIconApi, "/workspaces/current/model-providers//" "/" + ModelProviderIconApi, "/workspaces/current/model-providers///" ) api.add_resource( diff --git a/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py b/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py index de91c9a0657bd..82a0e56ce840e 100644 --- a/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py +++ b/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py @@ -86,7 +86,7 @@ def validate_and_set_defaults(cls, app_mode: AppMode, config: dict) -> tuple[dic if config["prompt_type"] == PromptTemplateEntity.PromptType.ADVANCED.value: if not config["chat_prompt_config"] and not config["completion_prompt_config"]: raise ValueError( - "chat_prompt_config or completion_prompt_config is required " "when prompt_type is advanced" + "chat_prompt_config or completion_prompt_config is required when prompt_type is advanced" ) model_mode_vals = [mode.value for mode in ModelMode] diff --git a/api/core/app/app_config/easy_ui_based_app/variables/manager.py b/api/core/app/app_config/easy_ui_based_app/variables/manager.py index 2c0232c743096..e70522f21de95 100644 --- a/api/core/app/app_config/easy_ui_based_app/variables/manager.py +++ b/api/core/app/app_config/easy_ui_based_app/variables/manager.py @@ -115,7 +115,7 @@ def validate_variables_and_set_defaults(cls, config: dict) -> tuple[dict, list[s pattern = re.compile(r"^(?!\d)[\u4e00-\u9fa5A-Za-z0-9_\U0001F300-\U0001F64F\U0001F680-\U0001F6FF]{1,100}$") if pattern.match(form_item["variable"]) is None: - raise ValueError("variable in user_input_form must be a string, " "and cannot start with a number") + raise ValueError("variable in user_input_form must be a string, and cannot start with a number") variables.append(form_item["variable"]) diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index bd2be18bfd2d5..1b412b86396c2 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -379,7 +379,7 @@ def check_hosting_moderation( queue_manager=queue_manager, app_generate_entity=application_generate_entity, prompt_messages=prompt_messages, - text="I apologize for any confusion, " "but I'm an AI assistant to be helpful, harmless, and honest.", + text="I apologize for any confusion, but I'm an AI assistant to be helpful, harmless, and honest.", stream=application_generate_entity.stream, ) diff --git a/api/core/app/apps/workflow_logging_callback.py b/api/core/app/apps/workflow_logging_callback.py index 388cb83180227..60683b0f21baf 100644 --- a/api/core/app/apps/workflow_logging_callback.py +++ b/api/core/app/apps/workflow_logging_callback.py @@ -84,7 +84,7 @@ def on_workflow_node_execute_succeeded(self, event: NodeRunSucceededEvent) -> No if route_node_state.node_run_result: node_run_result = route_node_state.node_run_result self.print_text( - f"Inputs: " f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", + f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="green", ) self.print_text( @@ -116,7 +116,7 @@ def on_workflow_node_execute_failed(self, event: NodeRunFailedEvent) -> None: node_run_result = route_node_state.node_run_result self.print_text(f"Error: {node_run_result.error}", color="red") self.print_text( - f"Inputs: " f"" f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", + f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="red", ) self.print_text( @@ -125,7 +125,7 @@ def on_workflow_node_execute_failed(self, event: NodeRunFailedEvent) -> None: color="red", ) self.print_text( - f"Outputs: " f"{jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", + f"Outputs: {jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", color="red", ) diff --git a/api/core/model_runtime/model_providers/__base/ai_model.py b/api/core/model_runtime/model_providers/__base/ai_model.py index e7e343f00ddeb..79a1d28ebe637 100644 --- a/api/core/model_runtime/model_providers/__base/ai_model.py +++ b/api/core/model_runtime/model_providers/__base/ai_model.py @@ -200,7 +200,7 @@ def predefined_models(self) -> list[AIModelEntity]: except Exception as e: model_schema_yaml_file_name = os.path.basename(model_schema_yaml_path).rstrip(".yaml") raise Exception( - f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}:" f" {str(e)}" + f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}: {str(e)}" ) # cache model schema diff --git a/api/core/model_runtime/model_providers/cohere/llm/llm.py b/api/core/model_runtime/model_providers/cohere/llm/llm.py index 203ca9c4a00f6..3863ad3308196 100644 --- a/api/core/model_runtime/model_providers/cohere/llm/llm.py +++ b/api/core/model_runtime/model_providers/cohere/llm/llm.py @@ -621,7 +621,7 @@ def _convert_tools(self, tools: list[PromptMessageTool]) -> list[Tool]: desc = p_val["description"] if "enum" in p_val: - desc += f"; Only accepts one of the following predefined options: " f"[{', '.join(p_val['enum'])}]" + desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]" parameter_definitions[p_key] = ToolParameterDefinitionsValue( description=desc, type=p_val["type"], required=required diff --git a/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py b/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py index 10c6d553f3d63..48ab477c50ca8 100644 --- a/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py +++ b/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py @@ -96,7 +96,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None: if credentials["task_type"] not in ("text2text-generation", "text-generation"): raise CredentialsValidateFailedError( - "Huggingface Hub Task Type must be one of text2text-generation, " "text-generation." + "Huggingface Hub Task Type must be one of text2text-generation, text-generation." ) client = InferenceClient(token=credentials["huggingfacehub_api_token"]) @@ -282,7 +282,7 @@ def _get_hosted_model_task_type(huggingfacehub_api_token: str, model_name: str): valid_tasks = ("text2text-generation", "text-generation") if model_info.pipeline_tag not in valid_tasks: - raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.") + raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.") except Exception as e: raise CredentialsValidateFailedError(f"{str(e)}") diff --git a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py index cb7a30bbe5134..4ad96c4233fb1 100644 --- a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py @@ -121,7 +121,7 @@ def _check_hosted_model_task_type(huggingfacehub_api_token: str, model_name: str valid_tasks = "feature-extraction" if model_info.pipeline_tag not in valid_tasks: - raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.") + raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.") except Exception as e: raise CredentialsValidateFailedError(f"{str(e)}") diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py index 3f32f454e499d..1ed77a2ee8ed4 100644 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py @@ -572,7 +572,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US="Size of context window"), type=ParameterType.INT, help=I18nObject( - en_US="Sets the size of the context window used to generate the next token. " "(Default: 2048)" + en_US="Sets the size of the context window used to generate the next token. (Default: 2048)" ), default=2048, min=1, @@ -650,7 +650,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US="Format"), type=ParameterType.STRING, help=I18nObject( - en_US="the format to return a response in." " Currently the only accepted value is json." + en_US="the format to return a response in. Currently the only accepted value is json." ), options=["json"], ), diff --git a/api/core/model_runtime/model_providers/replicate/llm/llm.py b/api/core/model_runtime/model_providers/replicate/llm/llm.py index e77372cae23b6..daef8949fb87a 100644 --- a/api/core/model_runtime/model_providers/replicate/llm/llm.py +++ b/api/core/model_runtime/model_providers/replicate/llm/llm.py @@ -86,7 +86,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None: if model.count("/") != 1: raise CredentialsValidateFailedError( - "Replicate Model Name must be provided, " "format: {user_name}/{model_name}" + "Replicate Model Name must be provided, format: {user_name}/{model_name}" ) try: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index ca708d931c09f..cd7718361ff03 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -472,7 +472,7 @@ def _convert_tools(self, tools: list[PromptMessageTool]) -> list[dict]: for p_key, p_val in properties.items(): desc = p_val["description"] if "enum" in p_val: - desc += f"; Only accepts one of the following predefined options: " f"[{', '.join(p_val['enum'])}]" + desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]" properties_definitions[p_key] = { "description": desc, diff --git a/api/core/rag/datasource/vdb/relyt/relyt_vector.py b/api/core/rag/datasource/vdb/relyt/relyt_vector.py index 76da14496591a..f47f75718a744 100644 --- a/api/core/rag/datasource/vdb/relyt/relyt_vector.py +++ b/api/core/rag/datasource/vdb/relyt/relyt_vector.py @@ -245,7 +245,7 @@ def similarity_search_with_score_by_vector( try: from sqlalchemy.engine import Row except ImportError: - raise ImportError("Could not import Row from sqlalchemy.engine. " "Please 'pip install sqlalchemy>=1.4'.") + raise ImportError("Could not import Row from sqlalchemy.engine. Please 'pip install sqlalchemy>=1.4'.") filter_condition = "" if filter is not None: diff --git a/api/core/rag/docstore/dataset_docstore.py b/api/core/rag/docstore/dataset_docstore.py index 0d4dff5b89027..319a2612c7ecb 100644 --- a/api/core/rag/docstore/dataset_docstore.py +++ b/api/core/rag/docstore/dataset_docstore.py @@ -88,7 +88,7 @@ def add_documents(self, docs: Sequence[Document], allow_update: bool = True) -> # NOTE: doc could already exist in the store, but we overwrite it if not allow_update and segment_document: raise ValueError( - f"doc_id {doc.metadata['doc_id']} already exists. " "Set allow_update to True to overwrite." + f"doc_id {doc.metadata['doc_id']} already exists. Set allow_update to True to overwrite." ) # calc embedding use tokens diff --git a/api/core/rag/extractor/notion_extractor.py b/api/core/rag/extractor/notion_extractor.py index b02e30de623b7..0ee24983a4264 100644 --- a/api/core/rag/extractor/notion_extractor.py +++ b/api/core/rag/extractor/notion_extractor.py @@ -50,7 +50,7 @@ def __init__( integration_token = dify_config.NOTION_INTEGRATION_TOKEN if integration_token is None: raise ValueError( - "Must specify `integration_token` or set environment " "variable `NOTION_INTEGRATION_TOKEN`." + "Must specify `integration_token` or set environment variable `NOTION_INTEGRATION_TOKEN`." ) self._notion_access_token = integration_token diff --git a/api/core/rag/splitter/text_splitter.py b/api/core/rag/splitter/text_splitter.py index 97d0721304405..161c36607da91 100644 --- a/api/core/rag/splitter/text_splitter.py +++ b/api/core/rag/splitter/text_splitter.py @@ -60,7 +60,7 @@ def __init__( """ if chunk_overlap > chunk_size: raise ValueError( - f"Got a larger chunk overlap ({chunk_overlap}) than chunk size " f"({chunk_size}), should be smaller." + f"Got a larger chunk overlap ({chunk_overlap}) than chunk size ({chunk_size}), should be smaller." ) self._chunk_size = chunk_size self._chunk_overlap = chunk_overlap @@ -117,7 +117,7 @@ def _merge_splits(self, splits: Iterable[str], separator: str, lengths: list[int if total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size: if total > self._chunk_size: logger.warning( - f"Created a chunk of size {total}, " f"which is longer than the specified {self._chunk_size}" + f"Created a chunk of size {total}, which is longer than the specified {self._chunk_size}" ) if len(current_doc) > 0: doc = self._join_docs(current_doc, separator) @@ -153,7 +153,7 @@ def _huggingface_tokenizer_length(text: str) -> int: except ImportError: raise ValueError( - "Could not import transformers python package. " "Please install it with `pip install transformers`." + "Could not import transformers python package. Please install it with `pip install transformers`." ) return cls(length_function=_huggingface_tokenizer_length, **kwargs) diff --git a/api/core/tools/provider/builtin/gaode/gaode.py b/api/core/tools/provider/builtin/gaode/gaode.py index a3e50da0012ef..49a8e537fb907 100644 --- a/api/core/tools/provider/builtin/gaode/gaode.py +++ b/api/core/tools/provider/builtin/gaode/gaode.py @@ -14,7 +14,7 @@ def _validate_credentials(self, credentials: dict) -> None: try: response = requests.get( - url="https://restapi.amap.com/v3/geocode/geo?address={address}&key={apikey}" "".format( + url="https://restapi.amap.com/v3/geocode/geo?address={address}&key={apikey}".format( address=urllib.parse.quote("广东省广州市天河区广州塔"), apikey=credentials.get("api_key") ) ) diff --git a/api/core/tools/provider/builtin/gaode/tools/gaode_weather.py b/api/core/tools/provider/builtin/gaode/tools/gaode_weather.py index 843504eefd962..ea06e2ce611cb 100644 --- a/api/core/tools/provider/builtin/gaode/tools/gaode_weather.py +++ b/api/core/tools/provider/builtin/gaode/tools/gaode_weather.py @@ -27,7 +27,7 @@ def _invoke( city_response = s.request( method="GET", headers={"Content-Type": "application/json; charset=utf-8"}, - url="{url}/config/district?keywords={keywords}" "&subdistrict=0&extensions=base&key={apikey}" "".format( + url="{url}/config/district?keywords={keywords}&subdistrict=0&extensions=base&key={apikey}".format( url=api_domain, keywords=city, apikey=self.runtime.credentials.get("api_key") ), ) diff --git a/api/core/tools/provider/builtin/github/tools/github_repositories.py b/api/core/tools/provider/builtin/github/tools/github_repositories.py index 3eab8bf8dc64a..32f9922e65178 100644 --- a/api/core/tools/provider/builtin/github/tools/github_repositories.py +++ b/api/core/tools/provider/builtin/github/tools/github_repositories.py @@ -39,7 +39,7 @@ def _invoke( response = s.request( method="GET", headers=headers, - url=f"{api_domain}/search/repositories?" f"q={quote(query)}&sort=stars&per_page={top_n}&order=desc", + url=f"{api_domain}/search/repositories?q={quote(query)}&sort=stars&per_page={top_n}&order=desc", ) response_data = response.json() if response.status_code == 200 and isinstance(response_data.get("items"), list): diff --git a/api/core/tools/provider/builtin/pubmed/tools/pubmed_search.py b/api/core/tools/provider/builtin/pubmed/tools/pubmed_search.py index fedfdbd859b4f..3a4f374ea0b0b 100644 --- a/api/core/tools/provider/builtin/pubmed/tools/pubmed_search.py +++ b/api/core/tools/provider/builtin/pubmed/tools/pubmed_search.py @@ -51,7 +51,7 @@ def run(self, query: str) -> str: try: # Retrieve the top-k results for the query docs = [ - f"Published: {result['pub_date']}\nTitle: {result['title']}\n" f"Summary: {result['summary']}" + f"Published: {result['pub_date']}\nTitle: {result['title']}\nSummary: {result['summary']}" for result in self.load(query[: self.ARXIV_MAX_QUERY_LENGTH]) ] @@ -97,7 +97,7 @@ def retrieve_article(self, uid: str, webenv: str) -> dict: if e.code == 429 and retry < self.max_retry: # Too Many Requests error # wait for an exponentially increasing amount of time - print(f"Too Many Requests, " f"waiting for {self.sleep_time:.2f} seconds...") + print(f"Too Many Requests, waiting for {self.sleep_time:.2f} seconds...") time.sleep(self.sleep_time) self.sleep_time *= 2 retry += 1 diff --git a/api/core/tools/provider/builtin/twilio/tools/send_message.py b/api/core/tools/provider/builtin/twilio/tools/send_message.py index 156249bc960c8..5ee839baa56f0 100644 --- a/api/core/tools/provider/builtin/twilio/tools/send_message.py +++ b/api/core/tools/provider/builtin/twilio/tools/send_message.py @@ -39,7 +39,7 @@ def set_validator(cls, values: dict) -> dict: try: from twilio.rest import Client except ImportError: - raise ImportError("Could not import twilio python package. " "Please install it with `pip install twilio`.") + raise ImportError("Could not import twilio python package. Please install it with `pip install twilio`.") account_sid = values.get("account_sid") auth_token = values.get("auth_token") values["from_number"] = values.get("from_number") diff --git a/api/libs/json_in_md_parser.py b/api/libs/json_in_md_parser.py index 39c17534e747b..185ff3f95eba3 100644 --- a/api/libs/json_in_md_parser.py +++ b/api/libs/json_in_md_parser.py @@ -37,6 +37,6 @@ def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict: for key in expected_keys: if key not in json_obj: raise OutputParserError( - f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}" + f"Got invalid return object. Expected key `{key}` to be present, but got {json_obj}" ) return json_obj diff --git a/api/services/app_dsl_service.py b/api/services/app_dsl_service.py index 8dcefbadc084d..2fe39b522491a 100644 --- a/api/services/app_dsl_service.py +++ b/api/services/app_dsl_service.py @@ -238,7 +238,7 @@ def _import_and_create_new_workflow_based_app( :param use_icon_as_answer_icon: use app icon as answer icon """ if not workflow_data: - raise ValueError("Missing workflow in data argument " "when app mode is advanced-chat or workflow") + raise ValueError("Missing workflow in data argument when app mode is advanced-chat or workflow") app = cls._create_app( tenant_id=tenant_id, @@ -283,7 +283,7 @@ def _import_and_overwrite_workflow_based_app( :param account: Account instance """ if not workflow_data: - raise ValueError("Missing workflow in data argument " "when app mode is advanced-chat or workflow") + raise ValueError("Missing workflow in data argument when app mode is advanced-chat or workflow") # fetch draft workflow by app_model workflow_service = WorkflowService() @@ -337,7 +337,7 @@ def _import_and_create_new_model_config_based_app( :param icon_background: app icon background """ if not model_config_data: - raise ValueError("Missing model_config in data argument " "when app mode is chat, agent-chat or completion") + raise ValueError("Missing model_config in data argument when app mode is chat, agent-chat or completion") app = cls._create_app( tenant_id=tenant_id, diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index a52b5b7e7d785..fa017bfa42a7c 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -181,7 +181,7 @@ def check_dataset_model_setting(dataset): "in the Settings -> Model Provider." ) except ProviderTokenNotInitError as ex: - raise ValueError(f"The dataset in unavailable, due to: " f"{ex.description}") + raise ValueError(f"The dataset in unavailable, due to: {ex.description}") @staticmethod def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, embedding_model: str): @@ -195,10 +195,10 @@ def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, ) except LLMBadRequestError: raise ValueError( - "No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider." + "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider." ) except ProviderTokenNotInitError as ex: - raise ValueError(f"The dataset in unavailable, due to: " f"{ex.description}") + raise ValueError(f"The dataset in unavailable, due to: {ex.description}") @staticmethod def update_dataset(dataset_id, data, user): diff --git a/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py b/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py index 24bbde6d4ebb6..24b338601df7d 100644 --- a/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py +++ b/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py @@ -53,7 +53,7 @@ def test__get_completion_model_prompt_messages(): "#context#": context, "#histories#": "\n".join( [ - f"{'Human' if prompt.role.value == 'user' else 'Assistant'}: " f"{prompt.content}" + f"{'Human' if prompt.role.value == 'user' else 'Assistant'}: {prompt.content}" for prompt in history_prompt_messages ] ), From 258d39d4fd7fd1db069d0d54c7c6514f91fe88b3 Mon Sep 17 00:00:00 2001 From: Joel Date: Fri, 13 Sep 2024 16:00:42 +0800 Subject: [PATCH 25/25] fix: in edit condition not pass the empty value hidden --- .../model-modal/model-load-balancing-entry-modal.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/app/components/header/account-setting/model-provider-page/model-modal/model-load-balancing-entry-modal.tsx b/web/app/components/header/account-setting/model-provider-page/model-modal/model-load-balancing-entry-modal.tsx index 86857f1ab24c3..e18b49084485d 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-modal/model-load-balancing-entry-modal.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-modal/model-load-balancing-entry-modal.tsx @@ -192,12 +192,12 @@ const ModelLoadBalancingEntryModal: FC = ({ }) const getSecretValues = useCallback((v: FormValue) => { return secretFormSchemas.reduce((prev, next) => { - if (v[next.variable] === initialFormSchemasValue[next.variable]) + if (isEditMode && v[next.variable] && v[next.variable] === initialFormSchemasValue[next.variable]) prev[next.variable] = '[__HIDDEN__]' return prev }, {} as Record) - }, [initialFormSchemasValue, secretFormSchemas]) + }, [initialFormSchemasValue, isEditMode, secretFormSchemas]) // const handleValueChange = ({ __model_type, __model_name, ...v }: FormValue) => { const handleValueChange = (v: FormValue) => {