테스트

This commit is contained in:
2026-01-28 18:47:02 +09:00
parent 0499c89cdf
commit 95c5fb7867
3 changed files with 58 additions and 36 deletions

View File

@@ -100,19 +100,31 @@ def get_qwen_model():
return _model, _tokenizer
def query_select(sessionId: str, query: str) :
def query_select(sessionId: str, query: str, limit_unlock: bool) :
keywords = extract_keywords_simple(query)
print(keywords)
filter_list = [{"sessionId": sessionId}]
filter_conditions = [{"sessionId": sessionId}]
if keywords['dept'] != '' :
filter_list.append({"deptCd": keywords['dept']})
filter_conditions.append({"deptCd": keywords['dept']})
if keywords['rank'] != '' :
filter_list.append({"gradeCd": keywords['rank']})
print(filter_list)
results = collection.query(
query_texts=[keywords['keyword']],
where={"$and": filter_list},
)
filter_conditions.append({"gradeCd": keywords['rank']})
# 2. 조건이 1개보다 많을 때만 $and로 묶기
if len(filter_conditions) > -1:
where_clause = {"$and": filter_conditions}
else:
where_clause = filter_conditions[0]
print(where_clause)
if limit_unlock :
results = collection.get(
where=where_clause
)
else :
results = collection.query(
query_texts=[keywords['keyword']],
where=where_clause
)
return results, keywords['keyword']
@@ -125,27 +137,34 @@ def query_select_summarize_stream(results, query, ai, min_similarity: float = 0.
Generator: 스트리밍 응답 제너레이터
"""
if not results['documents'] or not results['documents'][0]:
def generate_empty():
yield json.dumps({"kind": "text", "text": "관련 문서를 찾을 수 없습니다."}) + "\n"
return generate_empty
if ai :
if not results['documents'] or not results['documents'][0]:
def generate_empty():
yield json.dumps({"kind": "text", "text": "관련 문서를 찾을 수 없습니다."}) + "\n"
return generate_empty
# 유사도 계산 및 필터링
filtered_docs = []
if results['distances'] and results['distances'][0]:
for doc, dist in zip(results['documents'][0], results['distances'][0]):
similarity = 1 - dist
if similarity >= min_similarity:
filtered_docs.append((doc, similarity))
if ai and len(filtered_docs) >= 5:
break
print(f"필터링된 문서: {filtered_docs}")
# 유사도 계산 및 필터링
filtered_docs = []
if results['distances'] and results['distances'][0]:
for doc, dist in zip(results['documents'][0], results['distances'][0]):
similarity = 1 - dist
if similarity >= min_similarity:
filtered_docs.append((doc, similarity))
if ai and len(filtered_docs) >= 5:
break
print(f"필터링된 문서: {filtered_docs}")
# 컨텍스트 생성
context_parts = []
for i, (doc, sim) in enumerate(filtered_docs):
context_parts.append(f"[청크 {i+1} | 유사도: {sim:.3f}]\n{doc}")
context = "\n\n".join(context_parts)
# 컨텍스트 생성
context_parts = []
for i, (doc, sim) in enumerate(filtered_docs):
context_parts.append(f"[유사도: {sim:.3f}]\n{doc}")
context = "\n\n".join(context_parts)
else :
print('일반')
context_parts = []
for doc in results.get('documents') :
context_parts.append(f"{doc}")
context = "\n".join(context_parts)
# 모델 로드
model, tokenizer = get_qwen_model()
@@ -183,7 +202,7 @@ def query_select_summarize_stream(results, query, ai, min_similarity: float = 0.
generation_kwargs = dict(
**model_inputs,
streamer=streamer,
max_new_tokens=150,
max_new_tokens=3000,
do_sample=True,
temperature=0.3,
top_p=0.9,
@@ -196,6 +215,7 @@ def query_select_summarize_stream(results, query, ai, min_similarity: float = 0.
# 제너레이터 함수 정의
def generate():
for new_text in streamer:
if new_text:
yield json.dumps({"kind": "text", "text": new_text}) + "\n"
@@ -235,8 +255,6 @@ def query_summarize_simple(query: str) :
}
]
print(f'Messages: {messages}')
# 토큰화
text = tokenizer.apply_chat_template(
messages,
@@ -320,12 +338,12 @@ def question(sessionId: str, query: str):
"""
type = query_summarize_simple(query=query)
if(type == '0') :
results, keyword = query_select(sessionId, query)
print('단순질문 AI')
results, keyword = query_select(sessionId, query, False)
print(f'단순질문 AI : {len(results)}')
generate = query_select_summarize_stream(results, query=keyword, ai=True)
else :
results, keyword = query_select(sessionId, query)
print('단순질문 데이터베이스조회')
results, keyword = query_select(sessionId, query, True)
print(f'개수 데이터베이스조회 : {len(results.get('ids'))}')
generate = query_select_summarize_stream(results, query=keyword, ai=False)