Я экспериментирую с развитием LLM.
Вот мой код:
import langchain, pydantic, transformers
from langchain import HuggingFacePipeline
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables.base import RunnableSequence, RunnableMap, RunnableLambda
from langchain.callbacks import get_openai_callback
from pydantic import BaseModel, Field
from langchain.output_parsers import PydanticOutputParser
from transformers import pipeline
class MedicalSpecialty(BaseModel):
medical_specialty: str = Field(description = "medical specialty the patient should go to")
urgent: bool = Field(description = "the patient should go to the hospital immediately")
parser = PydanticOutputParser(pydantic_object=MedicalSpecialty)
queries = ["i have ache in my chest and in my left arm. Which medical specialty should I go to?"]
template = """
Question: {question}
"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = HuggingFacePipeline.from_model_id(
model_id = "bigscience/bloom-1b7",
task = "text-generation",
model_kwargs = {"max_length": 1024},
device=-1 # Ensure it runs on CPU for macOS M1
)
# Wrap the prompt in a RunnableLambda to make it a Runnable
prompt_runnable = RunnableLambda(lambda x: prompt.format(**x))
# Define the sequence that includes the prompt and LLM
sequence = RunnableSequence([
prompt_runnable,
llm
])
with get_openai_callback() as CB:
for query in queries:
result = sequence.invoke({"question": query})
print(query)
print(result)
print("=================================== = ")
# Print the costs of the requests
print(cb)
К сожалению, после нескольких итераций я продолжаю получать эту ошибку:
TypeError Traceback (most recent call last)
Cell In[6], line 19
16 prompt_runnable = RunnableLambda(lambda x: prompt.format(**x))
18 # Define the sequence that includes the prompt and LLM
---> 19 sequence = RunnableSequence([
20 prompt_runnable,
21 llm
22 ])
24 with get_openai_callback() as CB:
25 for query in queries:
File /opt/anaconda3/envs/LLM/lib/python3.11/site- packages/langchain_core/runnables/base.py:2632, in RunnableSequence.__init__(self, name, first, middle, last, *steps)
2630 steps_flat.extend(step.steps)
2631 else:
-> 2632 steps_flat.append(coerce_to_runnable(step))
2633 if len(steps_flat) < 2:
2634 raise ValueError(
2635 f"RunnableSequence must have at least 2 steps, got {len(steps_flat)}"
2636 )
File /opt/anaconda3/envs/LLM/lib/python3.11/site- packages/langchain_core/runnables/base.py:5554, in coerce_to_runnable(thing)
5552 return cast(Runnable[Input, Output], RunnableParallel(thing))
5553 else:
-> 5554 raise TypeError(
5555 f"Expected a Runnable, callable or dict."
5556 f"Instead got an unsupported type: {type(thing)}"
5557 )
TypeError: Expected a Runnable, callable or dict.Instead got an unsupported type: <class 'list'>
Пожалуйста, кто-нибудь помогите!
Нет необходимости использовать лямбду для этой простой подсказки с одним вводом.
Попробуйте использовать ниже и сообщите здесь о том, что произошло.
prompt_runnable = PromptRunnable(prompt)
Отличный совет! Я указал мне правильное направление. Я написал две дополнительные строки кода, используя RunnableSequence и RunnableLambda + немного поработал со структурой данных. Спасибо!!