Skip to content

Commit 8a3b9aa

Browse files
authored
Revert "Feat: add recall strategy (#414)"
This reverts commit a375911.
1 parent a375911 commit 8a3b9aa

File tree

18 files changed

+41
-1393
lines changed

18 files changed

+41
-1393
lines changed

poetry.lock

Lines changed: 3 additions & 47 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,7 @@ all = [
107107
"markitdown[docx,pdf,pptx,xls,xlsx] (>=0.1.1,<0.2.0)",
108108
"pymilvus (>=2.6.1,<3.0.0)",
109109
"datasketch (>=1.6.5,<2.0.0)",
110-
"jieba (>=0.38.1,<0.42.1)",
111-
"rank-bm25 (>=0.2.2)",
112-
"cachetools (>=6.0.0)",
110+
113111
# NOT exist in the above optional groups
114112
# Because they are either huge-size dependencies or infrequently used dependencies.
115113
# We kindof don't want users to install them.

src/memos/api/config.py

Lines changed: 3 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -419,31 +419,17 @@ def get_embedder_config() -> dict[str, Any]:
419419
},
420420
}
421421

422-
@staticmethod
423-
def get_reader_config() -> dict[str, Any]:
424-
"""Get reader configuration."""
425-
return {
426-
"backend": os.getenv("MEM_READER_BACKEND", "simple_struct"),
427-
"config": {
428-
"chunk_type": os.getenv("MEM_READER_CHAT_CHUNK_TYPE", "default"),
429-
"chunk_length": int(os.getenv("MEM_READER_CHAT_CHUNK_TOKEN_SIZE", 1600)),
430-
"chunk_session": int(os.getenv("MEM_READER_CHAT_CHUNK_SESS_SIZE", 20)),
431-
"chunk_overlap": int(os.getenv("MEM_READER_CHAT_CHUNK_OVERLAP", 2)),
432-
},
433-
}
434-
435422
@staticmethod
436423
def get_internet_config() -> dict[str, Any]:
437424
"""Get embedder configuration."""
438-
reader_config = APIConfig.get_reader_config()
439425
return {
440426
"backend": "bocha",
441427
"config": {
442428
"api_key": os.getenv("BOCHA_API_KEY"),
443429
"max_results": 15,
444430
"num_per_request": 10,
445431
"reader": {
446-
"backend": reader_config["backend"],
432+
"backend": "simple_struct",
447433
"config": {
448434
"llm": {
449435
"backend": "openai",
@@ -469,7 +455,6 @@ def get_internet_config() -> dict[str, Any]:
469455
"min_sentences_per_chunk": 1,
470456
},
471457
},
472-
"chat_chunker": reader_config,
473458
},
474459
},
475460
},
@@ -671,8 +656,6 @@ def get_product_default_config() -> dict[str, Any]:
671656
openai_config = APIConfig.get_openai_config()
672657
qwen_config = APIConfig.qwen_config()
673658
vllm_config = APIConfig.vllm_config()
674-
reader_config = APIConfig.get_reader_config()
675-
676659
backend_model = {
677660
"openai": openai_config,
678661
"huggingface": qwen_config,
@@ -684,7 +667,7 @@ def get_product_default_config() -> dict[str, Any]:
684667
"user_id": os.getenv("MOS_USER_ID", "root"),
685668
"chat_model": {"backend": backend, "config": backend_model[backend]},
686669
"mem_reader": {
687-
"backend": reader_config["backend"],
670+
"backend": "simple_struct",
688671
"config": {
689672
"llm": APIConfig.get_memreader_config(),
690673
"embedder": APIConfig.get_embedder_config(),
@@ -697,7 +680,6 @@ def get_product_default_config() -> dict[str, Any]:
697680
"min_sentences_per_chunk": 1,
698681
},
699682
},
700-
"chat_chunker": reader_config,
701683
},
702684
},
703685
"enable_textual_memory": True,
@@ -768,7 +750,6 @@ def create_user_config(user_name: str, user_id: str) -> tuple[MOSConfig, General
768750
qwen_config = APIConfig.qwen_config()
769751
vllm_config = APIConfig.vllm_config()
770752
mysql_config = APIConfig.get_mysql_config()
771-
reader_config = APIConfig.get_reader_config()
772753
backend = os.getenv("MOS_CHAT_MODEL_PROVIDER", "openai")
773754
backend_model = {
774755
"openai": openai_config,
@@ -783,7 +764,7 @@ def create_user_config(user_name: str, user_id: str) -> tuple[MOSConfig, General
783764
"config": backend_model[backend],
784765
},
785766
"mem_reader": {
786-
"backend": reader_config["backend"],
767+
"backend": "simple_struct",
787768
"config": {
788769
"llm": APIConfig.get_memreader_config(),
789770
"embedder": APIConfig.get_embedder_config(),
@@ -796,7 +777,6 @@ def create_user_config(user_name: str, user_id: str) -> tuple[MOSConfig, General
796777
"min_sentences_per_chunk": 1,
797778
},
798779
},
799-
"chat_chunker": reader_config,
800780
},
801781
},
802782
"enable_textual_memory": True,
@@ -865,10 +845,6 @@ def create_user_config(user_name: str, user_id: str) -> tuple[MOSConfig, General
865845
"LongTermMemory": os.getenv("NEBULAR_LONGTERM_MEMORY", 1e6),
866846
"UserMemory": os.getenv("NEBULAR_USER_MEMORY", 1e6),
867847
},
868-
"search_strategy": {
869-
"bm25": bool(os.getenv("BM25_CALL", "false") == "true"),
870-
"cot": bool(os.getenv("VEC_COT_CALL", "false") == "true"),
871-
},
872848
},
873849
},
874850
"act_mem": {}
@@ -936,10 +912,6 @@ def get_default_cube_config() -> GeneralMemCubeConfig | None:
936912
"LongTermMemory": os.getenv("NEBULAR_LONGTERM_MEMORY", 1e6),
937913
"UserMemory": os.getenv("NEBULAR_USER_MEMORY", 1e6),
938914
},
939-
"search_strategy": {
940-
"bm25": bool(os.getenv("BM25_CALL", "false") == "true"),
941-
"cot": bool(os.getenv("VEC_COT_CALL", "false") == "true"),
942-
},
943915
"mode": os.getenv("ASYNC_MODE", "sync"),
944916
},
945917
},

src/memos/configs/mem_reader.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -36,19 +36,11 @@ def parse_datetime(cls, value):
3636
description="whether remove example in memory extraction prompt to save token",
3737
)
3838

39-
chat_chunker: dict[str, Any] = Field(
40-
default=None, description="Configuration for the MemReader chat chunk strategy"
41-
)
42-
4339

4440
class SimpleStructMemReaderConfig(BaseMemReaderConfig):
4541
"""SimpleStruct MemReader configuration class."""
4642

4743

48-
class StrategyStructMemReaderConfig(BaseMemReaderConfig):
49-
"""StrategyStruct MemReader configuration class."""
50-
51-
5244
class MemReaderConfigFactory(BaseConfig):
5345
"""Factory class for creating MemReader configurations."""
5446

@@ -57,7 +49,6 @@ class MemReaderConfigFactory(BaseConfig):
5749

5850
backend_to_class: ClassVar[dict[str, Any]] = {
5951
"simple_struct": SimpleStructMemReaderConfig,
60-
"strategy_struct": StrategyStructMemReaderConfig,
6152
}
6253

6354
@field_validator("backend")

src/memos/configs/memory.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -184,13 +184,6 @@ class TreeTextMemoryConfig(BaseTextMemoryConfig):
184184
),
185185
)
186186

187-
search_strategy: dict[str, bool] | None = Field(
188-
default=None,
189-
description=(
190-
'Set search strategy for this memory configuration.{"bm25": true, "cot": false}'
191-
),
192-
)
193-
194187
mode: str | None = Field(
195188
default="sync",
196189
description=("whether use asynchronous mode in memory add"),

src/memos/llms/openai.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -58,18 +58,15 @@ def clear_cache(cls):
5858
logger.info("OpenAI LLM instance cache cleared")
5959

6060
@timed(log=True, log_prefix="OpenAI LLM")
61-
def generate(self, messages: MessageList, **kwargs) -> str:
62-
"""Generate a response from OpenAI LLM, optionally overriding generation params."""
63-
temperature = kwargs.get("temperature", self.config.temperature)
64-
max_tokens = kwargs.get("max_tokens", self.config.max_tokens)
65-
top_p = kwargs.get("top_p", self.config.top_p)
61+
def generate(self, messages: MessageList) -> str:
62+
"""Generate a response from OpenAI LLM."""
6663
response = self.client.chat.completions.create(
6764
model=self.config.model_name_or_path,
6865
messages=messages,
6966
extra_body=self.config.extra_body,
70-
temperature=temperature,
71-
max_tokens=max_tokens,
72-
top_p=top_p,
67+
temperature=self.config.temperature,
68+
max_tokens=self.config.max_tokens,
69+
top_p=self.config.top_p,
7370
)
7471
logger.info(f"Response from OpenAI: {response.model_dump_json()}")
7572
response_content = response.choices[0].message.content

src/memos/mem_reader/factory.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from memos.configs.mem_reader import MemReaderConfigFactory
44
from memos.mem_reader.base import BaseMemReader
55
from memos.mem_reader.simple_struct import SimpleStructMemReader
6-
from memos.mem_reader.strategy_struct import StrategyStructMemReader
76
from memos.memos_tools.singleton import singleton_factory
87

98

@@ -12,7 +11,6 @@ class MemReaderFactory(BaseMemReader):
1211

1312
backend_to_class: ClassVar[dict[str, Any]] = {
1413
"simple_struct": SimpleStructMemReader,
15-
"strategy_struct": StrategyStructMemReader,
1614
}
1715

1816
@classmethod

0 commit comments

Comments
 (0)