Our API is designed to allow you to have multiple concurrent scraping operations. That means you can speed up scraping for hundreds, thousands or even millions of pages per day, depending on your plan.
The more concurrent requests limit you have the more calls you can have active in parallel, and the faster you can scrape.
using System;
using System.IO;
using System.Net;
using System.Web;
using System.Threading;
namespace test {
class test{
private static string BASE_URL = "https://app.scrapingbee.com/api/v1/?";
private static string API_KEY = "YOUR-API-KEY";
public static string Get(string uri)
{
HttpWebRequest request = (HttpWebRequest)WebRequest.Create(uri);
request.AutomaticDecompression = DecompressionMethods.GZip | DecompressionMethods.Deflate;
using(HttpWebResponse response = (HttpWebResponse)request.GetResponse())
using(Stream stream = response.GetResponseStream())
using(StreamReader reader = new StreamReader(stream))
{
return reader.ReadToEnd();
}
}
public static bool Scrape(string uri, string path) {
Console.WriteLine("Scraping " + uri);
var query = HttpUtility.ParseQueryString(string.Empty);
query["api_key"] = API_KEY;
query["url"] = uri;
string queryString = query.ToString(); // Transforming the URL queries to string
string output = Get(BASE_URL+queryString); // Make the request
try {
using (StreamWriter sw = File.CreateText(path))
{
sw.Write(output);
}
return true;
} catch {return false;}
}
public static void Main(string[] args) {
Thread thread1 = new Thread(() => Scrape("https://scrapingbee.com/blog", "./scrapingbeeBlog.html"));
Thread thread2 = new Thread(() => Scrape("https://scrapingbee.com/documentation", "./scrapingbeeDocumentation.html"));
thread1.Start();
thread2.Start();
}
}
}
Go back to tutorials