Javascript Rendering
If you want to scrape a page that loads its data after Javascript execution then our API can fetch those pages by using headless browsers. To use this feature and render Javascript, simply pass dynamic=true, and our API will fetch that page by using headless Chrome browsers. The cost of using this feature is 5 credits and 25 credits if used with premium proxies.
Usage
curl "https://api.scrapingdog.com/scrape?api_key=5e5a97e5b1ca5b194f42da86&url=http://httpbin.org/ip"import requests
url = "https://api.scrapingdog.com/scrape"
params = {
"api_key": "5e5a97e5b1ca5b194f42da86",
"url": "http://httpbin.org/ip"
}
response = requests.get(url, params=params)
print(response.text)const axios = require('axios');
const apiUrl = 'https://api.scrapingdog.com/scrape';
const apiKey = '5e5a97e5b1ca5b194f42da86';
const targetUrl = 'http://httpbin.org/ip';
const params = {
api_key: apiKey,
url: targetUrl
};
axios
.get(apiUrl, { params })
.then((response) => {
if (response.status === 200) {
console.log(response.data);
} else {
console.error(`Failed to retrieve data. Status code: ${response.status}`);
}
})
.catch((error) => {
console.error('An error occurred:', error.message);
});
<?php
$apiUrl = 'https://api.scrapingdog.com/scrape';
$apiKey = '5e5a97e5b1ca5b194f42da86';
$targetUrl = 'http://httpbin.org/ip';
$queryParams = [
'api_key' => $apiKey,
'url' => $targetUrl
];
$queryString = http_build_query($queryParams);
$fullUrl = $apiUrl . '?' . $queryString;
$curl = curl_init();
curl_setopt($curl, CURLOPT_URL, $fullUrl);
curl_setopt($curl, CURLOPT_RETURNTRANSFER, true);
$response = curl_exec($curl);
if ($response === false) {
echo 'cURL error: ' . curl_error($curl);
} else {
$httpCode = curl_getinfo($curl, CURLINFO_HTTP_CODE);
if ($httpCode === 200) {
echo $response;
} else {
echo 'Failed to retrieve data. Status code: ' . $httpCode;
}
}
curl_close($curl);
?>
require 'net/http'
require 'uri'
api_url = 'https://api.scrapingdog.com/scrape'
api_key = '5e5a97e5b1ca5b194f42da86'
target_url = 'http://httpbin.org/ip'
uri = URI.parse(api_url)
params = {
'api_key' => api_key,
'url' => target_url
}
uri.query = URI.encode_www_form(params)
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
request = Net::HTTP::Get.new(uri.request_uri)
response = http.request(request)
if response.code == '200'
puts response.body
else
puts "Failed to retrieve data. Status code: #{response.code}"
end
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
public class ScrapingDogAPITest {
public static void main(String[] args) {
try {
String apiUrl = "https://api.scrapingdog.com/scrape";
String apiKey = "5e5a97e5b1ca5b194f42da86";
String targetUrl = "http://httpbin.org/ip";
// Construct the query parameters
Map<String, String> params = new HashMap<>();
params.put("api_key", apiKey);
params.put("url", targetUrl);
params.put("dynamic", String.valueOf(dynamic));
// Build the query URL
StringBuilder query = new StringBuilder(apiUrl);
query.append("?");
for (Map.Entry<String, String> entry : params.entrySet()) {
query.append(entry.getKey()).append("=").append(entry.getValue()).append("&");
}
String queryUrl = query.toString().substring(0, query.length() - 1);
// Create an HTTP connection and set up the request
URL url = new URL(queryUrl);
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
// Get the response code
int responseCode = connection.getResponseCode();
if (responseCode == HttpURLConnection.HTTP_OK) {
// Read and print the response
BufferedReader in = new BufferedReader(new InputStreamReader(connection.getInputStream()));
String inputLine;
StringBuilder response = new StringBuilder();
while ((inputLine = in.readLine()) != null) {
response.append(inputLine);
}
in.close();
System.out.println(response.toString());
} else {
System.out.println("Failed to retrieve data. Status code: " + responseCode);
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
Last updated