Contents

unstructured_text = """The original, historic library building is the Fisher Ames Baker Memorial Library; it opened in 1928 with a collection of 240,000 volumes. The building was designed by Jens Fredrick Larson, modeled after Independence Hall in Philadelphia, and funded by a gift to Dartmouth College by George Fisher Baker in memory of his uncle, Fisher Ames Baker, Dartmouth class of 1859. The facility was expanded in 1941 and 1957–1958 and received its one millionth volume in 1970.

In 1992, John Berry and the Baker family donated US $30 million for the construction of a new facility, the Berry Library designed by architect Robert Venturi, adjoining the Baker Library. The new complex, the Baker-Berry Library, opened in 2000 and was completed in 2002.[6] The Dartmouth College libraries presently hold over 2 million volumes in their collections."""

print(unstructured_text)
The original, historic library building is the Fisher Ames Baker Memorial Library; it opened in 1928 with a collection of 240,000 volumes. The building was designed by Jens Fredrick Larson, modeled after Independence Hall in Philadelphia, and funded by a gift to Dartmouth College by George Fisher Baker in memory of his uncle, Fisher Ames Baker, Dartmouth class of 1859. The facility was expanded in 1941 and 1957–1958 and received its one millionth volume in 1970.

In 1992, John Berry and the Baker family donated US $30 million for the construction of a new facility, the Berry Library designed by architect Robert Venturi, adjoining the Baker Library. The new complex, the Baker-Berry Library, opened in 2000 and was completed in 2002.[6] The Dartmouth College libraries presently hold over 2 million volumes in their collections.
from langchain_dartmouth.llms import ChatDartmouth
from dotenv import find_dotenv, load_dotenv

load_dotenv(find_dotenv())
True
from dataclasses import dataclass


@dataclass
class Event:
    """An event in the timeline"""

    year: int  # The year of the event
    description: str  # The description of the event


@dataclass
class Timeline:
    """A sequence of events"""

    events: list[Event]  # The events of the timeline
from langchain.agents import create_agent

llm = ChatDartmouth(model_name="openai.gpt-oss-120b")

agent = create_agent(model=llm, response_format=Timeline)


prompt = (
    "Extract a succinct timeline of events directly related the Library from the following text: \n\n"
    + unstructured_text
)
response = agent.invoke({"messages": ("human", prompt)})

timeline = response["structured_response"]
---------------------------------------------------------------------------
APIStatusError                            Traceback (most recent call last)
Cell In[5], line 1
----> 1 response = agent.invoke({"messages": ("human", prompt)})
      3 timeline = response["structured_response"]

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langgraph/pregel/main.py:3071, in Pregel.invoke(self, input, config, context, stream_mode, print_mode, output_keys, interrupt_before, interrupt_after, durability, **kwargs)
   3068 chunks: list[dict[str, Any] | Any] = []
   3069 interrupts: list[Interrupt] = []
-> 3071 for chunk in self.stream(
   3072     input,
   3073     config,
   3074     context=context,
   3075     stream_mode=["updates", "values"]
   3076     if stream_mode == "values"
   3077     else stream_mode,
   3078     print_mode=print_mode,
   3079     output_keys=output_keys,
   3080     interrupt_before=interrupt_before,
   3081     interrupt_after=interrupt_after,
   3082     durability=durability,
   3083     **kwargs,
   3084 ):
   3085     if stream_mode == "values":
   3086         if len(chunk) == 2:

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langgraph/pregel/main.py:2646, in Pregel.stream(self, input, config, context, stream_mode, print_mode, output_keys, interrupt_before, interrupt_after, durability, subgraphs, debug, **kwargs)
   2644 for task in loop.match_cached_writes():
   2645     loop.output_writes(task.id, task.writes, cached=True)
-> 2646 for _ in runner.tick(
   2647     [t for t in loop.tasks.values() if not t.writes],
   2648     timeout=self.step_timeout,
   2649     get_waiter=get_waiter,
   2650     schedule_task=loop.accept_push,
   2651 ):
   2652     # emit output
   2653     yield from _output(
   2654         stream_mode, print_mode, subgraphs, stream.get, queue.Empty
   2655     )
   2656 loop.after_tick()

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langgraph/pregel/_runner.py:167, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter, schedule_task)
    165 t = tasks[0]
    166 try:
--> 167     run_with_retry(
    168         t,
    169         retry_policy,
    170         configurable={
    171             CONFIG_KEY_CALL: partial(
    172                 _call,
    173                 weakref.ref(t),
    174                 retry_policy=retry_policy,
    175                 futures=weakref.ref(futures),
    176                 schedule_task=schedule_task,
    177                 submit=self.submit,
    178             ),
    179         },
    180     )
    181     self.commit(t, None)
    182 except Exception as exc:

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langgraph/pregel/_retry.py:42, in run_with_retry(task, retry_policy, configurable)
     40     task.writes.clear()
     41     # run the task
---> 42     return task.proc.invoke(task.input, config)
     43 except ParentCommand as exc:
     44     ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langgraph/_internal/_runnable.py:656, in RunnableSeq.invoke(self, input, config, **kwargs)
    654     # run in context
    655     with set_config_context(config, run) as context:
--> 656         input = context.run(step.invoke, input, config, **kwargs)
    657 else:
    658     input = step.invoke(input, config)

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langgraph/_internal/_runnable.py:400, in RunnableCallable.invoke(self, input, config, **kwargs)
    398         run_manager.on_chain_end(ret)
    399 else:
--> 400     ret = self.func(*args, **kwargs)
    401 if self.recurse and isinstance(ret, Runnable):
    402     return ret.invoke(input, config)

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain/agents/factory.py:1261, in create_agent.<locals>.model_node(state, runtime)
   1249 request = ModelRequest(
   1250     model=model,
   1251     tools=default_tools,
   (...)   1257     runtime=runtime,
   1258 )
   1260 if wrap_model_call_handler is None:
-> 1261     model_response = _execute_model_sync(request)
   1262     return _build_commands(model_response)
   1264 result = wrap_model_call_handler(request, _execute_model_sync)

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain/agents/factory.py:1233, in create_agent.<locals>._execute_model_sync(request)
   1230 if request.system_message:
   1231     messages = [request.system_message, *messages]
-> 1233 output = model_.invoke(messages)
   1234 if name:
   1235     output.name = name

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_core/runnables/base.py:5695, in RunnableBindingBase.invoke(self, input, config, **kwargs)
   5688 @override
   5689 def invoke(
   5690     self,
   (...)   5693     **kwargs: Any | None,
   5694 ) -> Output:
-> 5695     return self.bound.invoke(
   5696         input,
   5697         self._merge_configs(config),
   5698         **{**self.kwargs, **kwargs},
   5699     )

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_dartmouth/llms.py:537, in ChatDartmouth.invoke(self, *args, **kwargs)
    535     raise InvalidKeyError("API key invalid!")
    536 else:
--> 537     raise e

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_dartmouth/llms.py:526, in ChatDartmouth.invoke(self, *args, **kwargs)
    518 """Invokes the model to get a response to a query.
    519 
    520 See `LangChain's API documentation <https://python.langchain.com/v0.1/docs/expression_language/interface/>`_ for details on how to use this method.
   (...)    523 :rtype: BaseMessage
    524 """
    525 try:
--> 526     response = super().invoke(*args, **kwargs)
    527     return add_response_cost_to_usage_metadata(response)
    528 except APIStatusError as e:

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:402, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
    388 @override
    389 def invoke(
    390     self,
   (...)    395     **kwargs: Any,
    396 ) -> AIMessage:
    397     config = ensure_config(config)
    398     return cast(
    399         "AIMessage",
    400         cast(
    401             "ChatGeneration",
--> 402             self.generate_prompt(
    403                 [self._convert_input(input)],
    404                 stop=stop,
    405                 callbacks=config.get("callbacks"),
    406                 tags=config.get("tags"),
    407                 metadata=config.get("metadata"),
    408                 run_name=config.get("run_name"),
    409                 run_id=config.pop("run_id", None),
    410                 **kwargs,
    411             ).generations[0][0],
    412         ).message,
    413     )

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:1121, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
   1112 @override
   1113 def generate_prompt(
   1114     self,
   (...)   1118     **kwargs: Any,
   1119 ) -> LLMResult:
   1120     prompt_messages = [p.to_messages() for p in prompts]
-> 1121     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_dartmouth/llms.py:587, in ChatDartmouth.generate(self, *args, **kwargs)
    585 def generate(self, *args, **kwargs) -> LLMResult:
    586     try:
--> 587         return super().generate(*args, **kwargs)
    588     except BadRequestError as e:
    589         if "model not found" in str(e).lower():

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:931, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    928 for i, m in enumerate(input_messages):
    929     try:
    930         results.append(
--> 931             self._generate_with_cache(
    932                 m,
    933                 stop=stop,
    934                 run_manager=run_managers[i] if run_managers else None,
    935                 **kwargs,
    936             )
    937         )
    938     except BaseException as e:
    939         if run_managers:

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:1233, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
   1231     result = generate_from_stream(iter(chunks))
   1232 elif inspect.signature(self._generate).parameters.get("run_manager"):
-> 1233     result = self._generate(
   1234         messages, stop=stop, run_manager=run_manager, **kwargs
   1235     )
   1236 else:
   1237     result = self._generate(messages, stop=stop, **kwargs)

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_openai/chat_models/base.py:1473, in BaseChatOpenAI._generate(self, messages, stop, run_manager, **kwargs)
   1471     _handle_openai_bad_request(e)
   1472 except openai.APIError as e:
-> 1473     _handle_openai_api_error(e)
   1474 except Exception as e:
   1475     if raw_response is not None and hasattr(raw_response, "http_response"):

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/langchain_openai/chat_models/base.py:1445, in BaseChatOpenAI._generate(self, messages, stop, run_manager, **kwargs)
   1442 if "response_format" in payload:
   1443     payload.pop("stream")
   1444     raw_response = (
-> 1445         self.root_client.chat.completions.with_raw_response.parse(**payload)
   1446     )
   1447     response = raw_response.parse()
   1448 elif self._use_responses_api(payload):

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/openai/_legacy_response.py:364, in to_raw_response_wrapper.<locals>.wrapped(*args, **kwargs)
    360 extra_headers[RAW_RESPONSE_HEADER] = "true"
    362 kwargs["extra_headers"] = extra_headers
--> 364 return cast(LegacyAPIResponse[R], func(*args, **kwargs))

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/openai/resources/chat/completions/completions.py:184, in Completions.parse(self, messages, model, audio, response_format, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, prompt_cache_key, prompt_cache_retention, reasoning_effort, safety_identifier, seed, service_tier, stop, store, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, verbosity, web_search_options, extra_headers, extra_query, extra_body, timeout)
    177 def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
    178     return _parse_chat_completion(
    179         response_format=response_format,
    180         chat_completion=raw_completion,
    181         input_tools=chat_completion_tools,
    182     )
--> 184 return self._post(
    185     "/chat/completions",
    186     body=maybe_transform(
    187         {
    188             "messages": messages,
    189             "model": model,
    190             "audio": audio,
    191             "frequency_penalty": frequency_penalty,
    192             "function_call": function_call,
    193             "functions": functions,
    194             "logit_bias": logit_bias,
    195             "logprobs": logprobs,
    196             "max_completion_tokens": max_completion_tokens,
    197             "max_tokens": max_tokens,
    198             "metadata": metadata,
    199             "modalities": modalities,
    200             "n": n,
    201             "parallel_tool_calls": parallel_tool_calls,
    202             "prediction": prediction,
    203             "presence_penalty": presence_penalty,
    204             "prompt_cache_key": prompt_cache_key,
    205             "prompt_cache_retention": prompt_cache_retention,
    206             "reasoning_effort": reasoning_effort,
    207             "response_format": _type_to_response_format(response_format),
    208             "safety_identifier": safety_identifier,
    209             "seed": seed,
    210             "service_tier": service_tier,
    211             "stop": stop,
    212             "store": store,
    213             "stream": False,
    214             "stream_options": stream_options,
    215             "temperature": temperature,
    216             "tool_choice": tool_choice,
    217             "tools": tools,
    218             "top_logprobs": top_logprobs,
    219             "top_p": top_p,
    220             "user": user,
    221             "verbosity": verbosity,
    222             "web_search_options": web_search_options,
    223         },
    224         completion_create_params.CompletionCreateParams,
    225     ),
    226     options=make_request_options(
    227         extra_headers=extra_headers,
    228         extra_query=extra_query,
    229         extra_body=extra_body,
    230         timeout=timeout,
    231         post_parser=parser,
    232     ),
    233     # we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
    234     # in the `parser` function above
    235     cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
    236     stream=False,
    237 )

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/openai/_base_client.py:1297, in SyncAPIClient.post(self, path, cast_to, body, content, options, files, stream, stream_cls)
   1288     warnings.warn(
   1289         "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
   1290         "Please pass raw bytes via the `content` parameter instead.",
   1291         DeprecationWarning,
   1292         stacklevel=2,
   1293     )
   1294 opts = FinalRequestOptions.construct(
   1295     method="post", url=path, json_data=body, content=content, files=to_httpx_files(files), **options
   1296 )
-> 1297 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File /opt/hostedtoolcache/Python/3.11.14/x64/lib/python3.11/site-packages/openai/_base_client.py:1070, in SyncAPIClient.request(self, cast_to, options, stream, stream_cls)
   1067             err.response.read()
   1069         log.debug("Re-raising status error")
-> 1070         raise self._make_status_error_from_response(err.response) from None
   1072     break
   1074 assert response is not None, "could not resolve response (should never happen)"

APIStatusError: !DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>302 Moved Temporarily</title>
</head><body>
<h1>Moved </h1>
<p>The document has moved <a href="http://outage.dartmouth.edu">here</a>.</p>
</body></html>
During task with name 'model' and id 'f214eecc-cf5a-c8f8-4e0f-cd15a68d8505'
for event in timeline.events:
    print(f"{event.year}:\t{event.description}")