Scrapingdog
HomePricingSupportLogin
  • Documentation
  • Web Scraping API
    • Request Customization
      • Javascript Rendering
        • Wait when rendering Javascript
      • Custom Headers
      • Premium Residential Proxies
      • Geotargeting
      • Sessions
  • POST Request
  • Google Search Scraper API
    • Google Country Parameter: Supported Google Countries
    • Supported Google Countries via cr parameter
    • Google Domains Page
    • Google Language Page
    • Google LR Language Page
  • Google AI Overview API
  • Google Maps API
    • Google Maps Posts API
    • Google Maps Photos API
    • Google Maps Reviews API
    • Google Maps Places API
  • Google Trends API
    • Google Trends Autocomplete API
    • Google Trends Trending Now API
  • Google Images API
  • Google News API
    • Google News API 2.0
  • Google Shopping API
  • Google Product API
  • Google Videos API
  • Google Shorts API
  • Google Autocomplete API
  • Google Scholar API
    • Google Scholar Profiles API
    • Google Scholar Author API
      • Google Scholar Author Citation API
    • Google Scholar Cite API
  • Google Finance API
  • Google Lens API
  • Google Jobs API
  • Google Local API
  • Google Patents API
    • Google Patent Details API
  • Bing Search Scraper API
  • Amazon Scraper API
    • Amazon Product Scraper
    • Amazon Search Scraper
    • Amazon Reviews API
    • Amazon Autocomplete Scraper
  • Instagram Scraper API
  • Linkedin Scraper API
    • Person Profile Scraper
    • Company Profile Scraper
  • Linkedin Jobs Scraper
    • Scrape Linkedin Jobs
    • Scrape LinkedIn Job Overview
  • Yelp Scraper API
  • Twitter Scraping API
    • X Scraping API 2.0
  • Indeed Scraper API
  • Zillow Scraper API
  • Youtube Scraper API
    • Youtube Search API
    • YouTube Transcripts API
    • YouTube Channel API
  • Walmart Scraper API
    • Walmart Product Scraper
    • Walmart Search Scraper
    • Walmart Reviews Scraper
  • Screenshot API
  • Webhook Integration
  • Datacenter Proxies
  • Account API
Powered by GitBook
On this page
  1. Web Scraping API
  2. Request Customization

Javascript Rendering

If you want to scrape a page that loads its data after Javascript execution then our API can fetch those pages by using headless browsers. To use this feature and render Javascript, simply pass dynamic=true, and our API will fetch that page by using headless Chrome browsers. The cost of using this feature is 5 credits and 25 credits if used with premium proxies.

By default, this parameter is always set as true in our API. So, if you want to avoid using this feature and reduce your cost from 5 credits per request to 1 credit per request then simply pass dynamic=false.

Usage

curl "https://api.scrapingdog.com/scrape?api_key=5e5a97e5b1ca5b194f42da86&url=http://httpbin.org/ip"
import requests

url = "https://api.scrapingdog.com/scrape"

params = {
    "api_key": "5e5a97e5b1ca5b194f42da86",
    "url": "http://httpbin.org/ip"
}

response = requests.get(url, params=params)

print(response.text)
const axios = require('axios');

const apiUrl = 'https://api.scrapingdog.com/scrape';
const apiKey = '5e5a97e5b1ca5b194f42da86';
const targetUrl = 'http://httpbin.org/ip';

const params = {
  api_key: apiKey,
  url: targetUrl
};

axios
  .get(apiUrl, { params })
  .then((response) => {
    if (response.status === 200) {
      console.log(response.data);
    } else {
      console.error(`Failed to retrieve data. Status code: ${response.status}`);
    }
  })
  .catch((error) => {
    console.error('An error occurred:', error.message);
  });


<?php
$apiUrl = 'https://api.scrapingdog.com/scrape';
$apiKey = '5e5a97e5b1ca5b194f42da86';
$targetUrl = 'http://httpbin.org/ip';

$queryParams = [
    'api_key' => $apiKey,
    'url' => $targetUrl
];

$queryString = http_build_query($queryParams);

$fullUrl = $apiUrl . '?' . $queryString;

$curl = curl_init();
curl_setopt($curl, CURLOPT_URL, $fullUrl);
curl_setopt($curl, CURLOPT_RETURNTRANSFER, true);

$response = curl_exec($curl);

if ($response === false) {
    echo 'cURL error: ' . curl_error($curl);
} else {
    $httpCode = curl_getinfo($curl, CURLINFO_HTTP_CODE);
    if ($httpCode === 200) {
        echo $response;
    } else {
        echo 'Failed to retrieve data. Status code: ' . $httpCode;
    }
}

curl_close($curl);
?>
require 'net/http'
require 'uri'

api_url = 'https://api.scrapingdog.com/scrape'
api_key = '5e5a97e5b1ca5b194f42da86'
target_url = 'http://httpbin.org/ip'


uri = URI.parse(api_url)
params = {
  'api_key' => api_key,
  'url' => target_url
}

uri.query = URI.encode_www_form(params)

http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true

request = Net::HTTP::Get.new(uri.request_uri)

response = http.request(request)

if response.code == '200'
  puts response.body
else
  puts "Failed to retrieve data. Status code: #{response.code}"
end
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;

public class ScrapingDogAPITest {
    public static void main(String[] args) {
        try {
            String apiUrl = "https://api.scrapingdog.com/scrape";
            String apiKey = "5e5a97e5b1ca5b194f42da86";
            String targetUrl = "http://httpbin.org/ip";


            // Construct the query parameters
            Map<String, String> params = new HashMap<>();
            params.put("api_key", apiKey);
            params.put("url", targetUrl);
            params.put("dynamic", String.valueOf(dynamic));

            // Build the query URL
            StringBuilder query = new StringBuilder(apiUrl);
            query.append("?");
            for (Map.Entry<String, String> entry : params.entrySet()) {
                query.append(entry.getKey()).append("=").append(entry.getValue()).append("&");
            }
            String queryUrl = query.toString().substring(0, query.length() - 1);

            // Create an HTTP connection and set up the request
            URL url = new URL(queryUrl);
            HttpURLConnection connection = (HttpURLConnection) url.openConnection();
            connection.setRequestMethod("GET");

            // Get the response code
            int responseCode = connection.getResponseCode();

            if (responseCode == HttpURLConnection.HTTP_OK) {
                // Read and print the response
                BufferedReader in = new BufferedReader(new InputStreamReader(connection.getInputStream()));
                String inputLine;
                StringBuilder response = new StringBuilder();

                while ((inputLine = in.readLine()) != null) {
                    response.append(inputLine);
                }

                in.close();
                System.out.println(response.toString());
            } else {
                System.out.println("Failed to retrieve data. Status code: " + responseCode);
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

PreviousRequest CustomizationNextWait when rendering Javascript

Last updated 9 months ago