Datacenter Proxies
Scrapingdog also provides a proxy method to use the web scraping API. It is just an alternative to the scraping API. The functionalities remain the same.
Any request to this proxy will be forwarded to the web scraping API.
Note- Remember to configure your code to not verify SSL and pass the target URL with http only.
Proxy Example
curl -x "http://scrapingdog:652c6647e4921e35dab690bc@proxy.scrapingdog.com:8081" -k "https://httpbin.org/ip"
import requests
# Define the proxy URL with credentials
proxy_url = "http://scrapingdog:652c6647e4921e35dab690bc@proxy.scrapingdog.com:8081"
# Target URL
target_url = "https://httpbin.org/ip"
# Set up the proxy for the request
proxies = {
"http": proxy_url,
"https": proxy_url,
}
# Make the GET request
response = requests.get(target_url, proxies=proxies, verify=False)
# Print the response content
print(response.text)
const axios = require('axios');
const config = {
method: 'get',
url: 'https://httpbin.org/ip',
proxy: {
host: 'proxy.scrapingdog.com',
port: 8081,
auth: {
username: 'scrapingdog',
password: '652c6647e4921e35dab690bc',
},
},
};
axios(config)
.then(response => {
console.log(response.data);
})
.catch(error => {
console.error(error);
});
<?php
$scraping_url = "https://httpbin.org/ip"; // Your target URL
$ch = curl_init();
// Set the target URL
curl_setopt($ch, CURLOPT_URL, $scraping_url);
// Set the proxy server details
curl_setopt($ch, CURLOPT_PROXY, "http://scrapingdog:652c6647e4921e35dab690bc@proxy.scrapingdog.com:8081");
// Allow connections to SSL sites without certificates
curl_setopt($ch, CURLOPT_SSL_VERIFYPEER, false);
// Execute the cURL request
$response = curl_exec($ch);
// Check for cURL errors
if ($response === false) {
echo 'cURL error: ' . curl_error($ch);
}
// Close cURL session
curl_close($ch);
// Output the response
echo $response;
?>
require 'httpclient'
# Your target URL
scraping_url = 'https://httpbin.org/ip'
client = HTTPClient.new
# Set the proxy server details
client.set_proxy('http://scrapingdog:652c6647e4921e35dab690bc@proxy.scrapingdog.com:8081')
# Send a GET request
response = client.get(scraping_url)
# Output the response
puts response.body
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.Proxy;
import java.net.URL;
public class CurlToJava {
public static void main(String[] args) {
try {
// Your target URL
String scrapingUrl = "https://httpbin.org/ip";
// Create a proxy
Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress("proxy.scrapingdog.com", 8081));
// Open a connection using the proxy
HttpURLConnection connection = (HttpURLConnection) new URL(scrapingUrl).openConnection(proxy);
// Set the proxy authentication if required
String userPass = "scrapingdog:652c6647e4921e35dab690bc";
String basicAuth = "Basic " + java.util.Base64.getEncoder().encodeToString(userPass.getBytes());
connection.setRequestProperty("Proxy-Authorization", basicAuth);
// Set the request method (GET)
connection.setRequestMethod("GET");
// Get the response
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
String line;
StringBuilder response = new StringBuilder();
while ((line = reader.readLine()) != null) {
response.append(line);
}
reader.close();
// Output the response
System.out.println(response.toString());
} catch (Exception e) {
e.printStackTrace();
}
}
}
Last updated