Complete developer guide
Transform any website into clean, structured data with our blazing-fast web scraping API. Better than Firecrawl, faster than traditional scrapers.
Transform any website into clean, structured data with just a few API calls. Our service is faster, more reliable, and easier to use than traditional web scraping.
Get structured data from any website in milliseconds with our optimized scraping engine.
Built with security-first approach. All data is encrypted and API keys are securely managed.
Scrape websites from anywhere in the world with our distributed infrastructure.
Simple REST API with comprehensive documentation and multiple response formats.
Sign up for a free account to get started with Neo Crawl API
POST /auth/registerGenerate your unique API key for authentication
POST /auth/generate-secretMake your first API call and get structured data
GET /api/scrapper{
"username": "your_username",
"password": "your_password"
}curl -X GET "https://api.neocrawl.com/api/scrapper?url=https://example.com" \
-H "x-api-key: your_api_key"{
"message": "Success",
"url": "https://example.com",
"result1": {
"title": "Example Domain",
"headings": ["Welcome to Example"],
"links": [...]
},
"result2": "Clean text content...",
"result3": "# Markdown formatted content"
}Secure your API access with our robust authentication system. Get your API key in three simple steps.
/auth/registerRegister a new user account to get started with Neo Crawl API.
| Name | Type | Required | Description |
|---|---|---|---|
| username | string | Required | Your unique username |
| password | string | Required | Strong password (min 8 characters) |
{
"username": "your_username",
"password": "your_secure_password"
}{
"msg": "user created",
"user_id": "12345"
}/auth/loginAuthenticate and receive an access token for API operations.
| Name | Type | Required | Description |
|---|---|---|---|
| username | string | Required | Your username |
| password | string | Required | Your password |
# Form Data
username=your_username
password=your_secure_password{
"access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
"token_type": "bearer",
"expires_in": 3600
}/auth/generate-secretGenerate your secret API key for making scraping requests.
Authorization:Bearer YOUR_ACCESS_TOKENAccess token from login
curl -X POST "https://api.neocrawl.com/auth/generate-secret" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN"{
"secret_token": "ncl_sk_1234567890abcdef...",
"created_at": "2025-08-25T10:00:00Z"
}Complete reference for all Neo Crawl API endpoints with detailed examples and response formats.
/api/scrapperExtract structured data from any website URL. Returns JSON, text, and markdown formats.
| Name | Type | Required | Description |
|---|---|---|---|
| url | string | Required | The URL to scrape (must be properly encoded) |
x-api-key:YOUR_API_KEYYour secret API key
curl -X GET "https://api.neocrawl.com/api/scrapper?url=https://example.com" \
-H "x-api-key: ncl_sk_1234567890abcdef..."{
"message": "Success",
"url": "https://example.com",
"result1": {
"title": "Example Domain",
"meta_description": "This domain is for use in illustrative examples...",
"headings": {
"h1": ["Example Domain"],
"h2": ["About", "Contact"],
"h3": []
},
"links": [
{
"text": "More information...",
"href": "https://www.iana.org/domains/example",
"type": "external"
}
],
"images": [
{
"src": "https://example.com/logo.png",
"alt": "Example Logo",
"width": 200,
"height": 100
}
],
"structured_data": {}
},
"result2": "Example Domain\n\nThis domain is for use in illustrative examples in documents...",
"result3": "# Example Domain\n\nThis domain is for use in illustrative examples..."
}/usage/dashboardGet detailed analytics and usage statistics for your API consumption.
Authorization:Bearer YOUR_ACCESS_TOKENAccess token from login
curl -X GET "https://api.neocrawl.com/usage/dashboard" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN"{
"user_id": "12345",
"plan": 1,
"monthly_limit": 20,
"calls_made": 15,
"calls_remaining": 5,
"reset_date": "2025-09-01T00:00:00Z",
"usage_history": [
{
"date": "2025-08-25",
"calls": 3,
"success_rate": 100
}
]
}Flexible pricing plans to suit your needs, from hobby projects to enterprise applications.
403 Forbidden errorReady-to-use code examples in your favorite programming language. Copy, paste, and start scraping!
const neoCrawl = async (url) => {
const response = await fetch(`https://api.neocrawl.com/api/scrapper?url=${encodeURIComponent(url)}`, {
headers: {
'x-api-key': 'your_api_key'
}
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return await response.json();
};
// Usage
try {
const data = await neoCrawl('https://example.com');
// Process your scraped data here
} catch (error) {
console.error('Error:', error);
}import requests
class NeoCrawl:
def __init__(self, api_key):
self.api_key = api_key
self.base_url = "https://api.neocrawl.com"
def scrape(self, url):
headers = {"x-api-key": self.api_key}
params = {"url": url}
response = requests.get(
f"{self.base_url}/api/scrapper",
headers=headers,
params=params
)
if response.status_code == 200:
return response.json()
else:
response.raise_for_status()
# Usage
crawler = NeoCrawl("your_api_key")
result = crawler.scrape("https://example.com")
print(result["result1"])const axios = require('axios');
class NeoCrawl {
constructor(apiKey) {
this.apiKey = apiKey;
this.baseURL = 'https://api.neocrawl.com';
}
async scrape(url) {
try {
const response = await axios.get(`${this.baseURL}/api/scrapper`, {
params: { url },
headers: { 'x-api-key': this.apiKey }
});
return response.data;
} catch (error) {
throw new Error(`Neo Crawl API Error: ${error.response?.data?.message || error.message}`);
}
}
}
// Usage
const crawler = new NeoCrawl('your_api_key');
crawler.scrape('https://example.com')
.then(data => {
// Process your data here
})
.catch(error => console.error(error));Get up and running with Neo Crawl in your favorite programming environment.
Neo Crawl is a REST API service - no installation needed. Just get your API key and start making requests.
While not required, we provide helpful code examples and wrapper functions for popular languages.
Let's make your first successful API call step by step.
After registration, generate your secret API key from the dashboard.
Start with a simple website like https://example.com
Send a GET request with your API key in the header.
curl -X GET "https://api.neocrawl.com/api/scrapper?url=https://example.com" \
-H "x-api-key: YOUR_API_KEY"Learn how to securely manage and use your API keys.
# Store your API key securely
export NEO_CRAWL_API_KEY="ncl_sk_1234567890abcdef..."
# Use in your requests
curl -H "x-api-key: $NEO_CRAWL_API_KEY" \
"https://api.neocrawl.com/api/scrapper?url=https://example.com"Create your Neo Crawl account to start scraping websites.
/auth/registerRegister a new user account with username and password.
| Name | Type | Required | Description |
|---|---|---|---|
| username | string | Required | Unique username (3-50 characters) |
| password | string | Required | Strong password (minimum 8 characters) |
{
"username": "your_username",
"password": "your_secure_password"
}{
"msg": "user created",
"user_id": "12345"
}Authenticate to access protected endpoints and manage your account.
/auth/loginLogin with your credentials to receive an access token.
| Name | Type | Required | Description |
|---|---|---|---|
| username | string | Required | Your registered username |
| password | string | Required | Your account password |
# Form Data
username=your_username
password=your_secure_password{
"access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
"token_type": "bearer",
"expires_in": 3600
}The main endpoint for extracting structured data from any website URL.
/api/scrapperExtract structured data from any website URL. Returns JSON, text, and markdown formats.
| Name | Type | Required | Description |
|---|---|---|---|
| url | string | Required | The URL to scrape (must be properly encoded) |
x-api-key:YOUR_API_KEYYour secret API key
curl -X GET "https://api.neocrawl.com/api/scrapper?url=https://example.com" \
-H "x-api-key: ncl_sk_1234567890abcdef..."{
"message": "Success",
"url": "https://example.com",
"result1": {
"title": "Example Domain",
"meta_description": "This domain is for use in illustrative examples...",
"headings": {
"h1": ["Example Domain"],
"h2": ["About", "Contact"],
"h3": []
},
"links": [
{
"text": "More information...",
"href": "https://www.iana.org/domains/example",
"type": "external"
}
],
"images": [
{
"src": "https://example.com/logo.png",
"alt": "Example Logo",
"width": 200,
"height": 100
}
],
"structured_data": {}
},
"result2": "Example Domain\n\nThis domain is for use in illustrative examples in documents...",
"result3": "# Example Domain\n\nThis domain is for use in illustrative examples..."
}Understanding rate limits and how to optimize your API usage.
Every API response includes rate limit information in the headers:
X-RateLimit-Limit: 60
X-RateLimit-Remaining: 59
X-RateLimit-Reset: 1640995200
X-RateLimit-Retry-After: 3600Neo Crawl provides multiple response formats to suit your needs.
Structured data with metadata, links, images, and more.
{
"title": "Page Title",
"meta_description": "...",
"headings": {...},
"links": [...],
"images": [...]
}Clean, readable text content without HTML tags.
Page Title
This is the main content
of the webpage in clean
text format...Formatted markdown ready for documentation or blogs.
# Page Title
This is the main **content**
of the webpage in clean
[markdown](link) format...Choose the plan that fits your scraping needs.
Monitor your API usage and track analytics in real-time.
/usage/dashboardRetrieve comprehensive usage analytics and account information.
Authorization:Bearer YOUR_ACCESS_TOKENAccess token from login
curl -X GET "https://api.neocrawl.com/usage/dashboard" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN"{
"user_id": "12345",
"plan": 1,
"monthly_limit": 20,
"calls_made": 15,
"calls_remaining": 5,
"reset_date": "2025-09-01T00:00:00Z",
"usage_history": [
{
"date": "2025-08-25",
"calls": 3,
"success_rate": 100
}
]
}Gain insights into your scraping patterns and optimize your usage.
Group similar URLs to maximize efficiency
Store frequently accessed data locally
Track usage to avoid hitting rate limits
Ready-to-use cURL commands for testing and automation.
# Simple scrape request
curl -X GET "https://api.neocrawl.com/api/scrapper?url=https://example.com" \
-H "x-api-key: YOUR_API_KEY" \
-H "Content-Type: application/json"# Scrape with error handling and output formatting
curl -X GET "https://api.neocrawl.com/api/scrapper?url=https://example.com" \
-H "x-api-key: YOUR_API_KEY" \
-w "HTTP Status: %{http_code}\nResponse Time: %{time_total}s\n" \
-s -S \
| jq '.'# Process multiple URLs
urls=("https://example.com" "https://google.com" "https://github.com")
for url in "${urls[@]}"; do
echo "Scraping: $url"
curl -X GET "https://api.neocrawl.com/api/scrapper?url=$url" \
-H "x-api-key: YOUR_API_KEY" \
-s | jq '.result1.title'
sleep 1 # Respect rate limits
doneReady-to-use JavaScript code for browser and Node.js environments.
const neoCrawl = async (url) => {
const response = await fetch(`https://api.neocrawl.com/api/scrapper?url=${encodeURIComponent(url)}`, {
headers: {
'x-api-key': 'your_api_key'
}
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return await response.json();
};
// Usage
try {
const data = await neoCrawl('https://example.com');
// Process your scraped data here
} catch (error) {
console.error('Error:', error);
}Python class and examples for easy integration.
import requests
class NeoCrawl:
def __init__(self, api_key):
self.api_key = api_key
self.base_url = "https://api.neocrawl.com"
def scrape(self, url):
headers = {"x-api-key": self.api_key}
params = {"url": url}
response = requests.get(
f"{self.base_url}/api/scrapper",
headers=headers,
params=params
)
if response.status_code == 200:
return response.json()
else:
response.raise_for_status()
# Usage
crawler = NeoCrawl("your_api_key")
result = crawler.scrape("https://example.com")
print(result["result1"])Server-side JavaScript implementation with axios.
const axios = require('axios');
class NeoCrawl {
constructor(apiKey) {
this.apiKey = apiKey;
this.baseURL = 'https://api.neocrawl.com';
}
async scrape(url) {
try {
const response = await axios.get(`${this.baseURL}/api/scrapper`, {
params: { url },
headers: { 'x-api-key': this.apiKey }
});
return response.data;
} catch (error) {
throw new Error(`Neo Crawl API Error: ${error.response?.data?.message || error.message}`);
}
}
}
// Usage
const crawler = new NeoCrawl('your_api_key');
crawler.scrape('https://example.com')
.then(data => {
// Process your data here
})
.catch(error => console.error(error));Join thousands of developers who trust Neo Crawl for their web scraping needs. Get started in minutes, not hours.