Help! crawl4Ai Beginner Issue
Hi All, I am new to crawl4ai and and tryign to cycle through some urls for a client project. I am sure ti is really simple but for some reason the browser keeps closing and reopening a new probwer after I login to the site. Appreciate if you could help me out. All i really am looking for is to login and the go to each of those links....import asyncio from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode from playwright.async_api import BrowserContext, Page # ✅ Multiple target URLs urls = [ "https://clublocker.com/clubs/188", "https://clublocker.com/organizations/200/home" ] # ✅ Browser config browser_config = BrowserConfig( headless=False, # Show browser window use_managed_browser=False, # Use system-installed browser verbose=True ) # ✅ Crawler config crawler_config = CrawlerRunConfig( wait_for_timeout=20000, # Wait for page elements delay_before_return_html=6.0, # Small delay before snapshot cache_mode=CacheMode.BYPASS, # Always fetch fresh verbose=True, target_elements=["usq-club-card"] # Only extract this tag ) async def main(): # ✅ Initialize crawler and keep browser open crawler = AsyncWebCrawler(config=browser_config) await crawler.start() # ✅ Get a persistent context (single browser session) context: BrowserContext = await crawler.get_browser_context() page: Page = await context.new_page() # ✅ Login once print("[🔐] Logging into ClubLocker...") await page.goto("https://clublocker.com/login") await page.fill('input[name="username"]', "myusername") await page.fill('input[name="password"]', "mypassword") await page.click('button[type="submit"]') await page.wait_for_load_state("networkidle") print("[✅] Login complete. Browser stays open.") # ✅ Reuse browser for all URLs for url in urls: print(f"\n🌍 Crawling: {url}") try: result = await crawler.arun(