When you need to extract the same kind of data from multiple URLs, run tasks in parallel. Each task gets its own browser session — they don’t interfere with each other.
import asyncio
from browser_use_sdk import AsyncBrowserUse
from pydantic import BaseModel
class Site(BaseModel):
name: str
description: str
top_headline: str
urls = [
"https://news.ycombinator.com",
"https://reddit.com/r/technology",
"https://techcrunch.com",
]
async def extract(client, url):
return await client.run(
f"Get the name of the site, a one-sentence description, and the top headline from {url}",
output_schema=Site,
)
async def main():
client = AsyncBrowserUse()
results = await asyncio.gather(*[extract(client, url) for url in urls])
for r in results:
print(r.output)
asyncio.run(main())
Each run() call auto-creates its own session. No need to manage sessions manually for independent tasks.