The following piece of code is from https://python.langchain.com/docs/modules/model_io/models/llms/integrations/gpt4all
from langchain import PromptTemplate, LLMChain
from langchain.llms import GPT4All
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
local_path = (
"./models/ggml-gpt4all-l13b-snoozy.bin" # replace with your desired local file path
)
# Callbacks support token-wise streaming
callbacks = [StreamingStdOutCallbackHandler()]
# Verbose is required to pass to the callback manager
llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)
# If you want to use a custom model add the backend parameter
# Check https://docs.gpt4all.io/gpt4all_python.html for supported backends
llm = GPT4All(model=local_path, backend="gptj", callbacks=callbacks, verbose=True)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What NFL team won the Super Bowl in the year Justin Bieber was born?"
llm_chain.run(question)`
Here is the error message:
Found model file at ./models/ggml-gpt4all-l13b-snoozy.bin
Invalid model file
---------------------------------------------------------------------------
ValidationError Traceback (most recent call last)
Cell In[16], line 19
17 callbacks = [StreamingStdOutCallbackHandler()]
18 # Verbose is required to pass to the callback manager
---> 19 llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)
20 # If you want to use a custom model add the backend parameter
21 # Check https://docs.gpt4all.io/gpt4all_python.html for supported backends
22 llm = GPT4All(model=local_path, backend="gptj", callbacks=callbacks, verbose=True)
File ~/anaconda3/lib/python3.10/site-packages/pydantic/main.py:341, in pydantic.main.BaseModel.__init__()
ValidationError: 1 validation error for GPT4All
__root__
Unable to instantiate model (type=value_error)