fromagenticraftimportAgentimportasyncioasyncdefmain():agent=Agent(name="StreamBot",model="gpt-4o-mini")# Stream responseasyncforchunkinagent.stream("Tell me a joke"):print(chunk.content,end="",flush=True)asyncio.run(main())
info=agent.get_provider_info()ifinfo['supports_streaming']:# Provider supports streamingasyncforchunkinagent.stream(prompt):...else:# Fall back to regular completionresponse=awaitagent.arun(prompt)
# Supports: Claude 3.5, Claude 3, Claude 2.1# Features: Event-based streaming, thinking tracesagent=Agent(provider="anthropic",model="claude-3-5-sonnet-latest")asyncforchunkinagent.stream(prompt):ifchunk.metadata.get('event_type')=='content_block_delta':print(chunk.content,end="")
# Supports: All Ollama models# Features: Low latency, local inferenceagent=Agent(provider="ollama",model="llama3.2")asyncforchunkinagent.stream(prompt):print(chunk.content,end="")
word_count=0asyncforchunkinagent.stream(long_prompt):words=chunk.content.split()word_count+=len(words)# Process and discard chunkprint(f"Total words: {word_count}")