Skip to content

Commit

Permalink
Merge pull request #1 from mendableai/main
Browse files Browse the repository at this point in the history
Fix FIRECRAWL_API_URL bug, also various PyLint fixes
  • Loading branch information
mattjoyce authored May 22, 2024
2 parents 106c18d + df0550d commit 9663015
Show file tree
Hide file tree
Showing 26 changed files with 1,726 additions and 126 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ env:
SUPABASE_SERVICE_TOKEN: ${{ secrets.SUPABASE_SERVICE_TOKEN }}
SUPABASE_URL: ${{ secrets.SUPABASE_URL }}
TEST_API_KEY: ${{ secrets.TEST_API_KEY }}
HYPERDX_API_KEY: ${{ secrets.HYPERDX_API_KEY }}
HDX_NODE_BETA_MODE: 1


jobs:
pre-deploy:
Expand Down
5 changes: 5 additions & 0 deletions apps/api/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,8 @@ POSTHOG_HOST= # set if you'd like to send posthog events like job logs

STRIPE_PRICE_ID_STANDARD=
STRIPE_PRICE_ID_SCALE=

HYPERDX_API_KEY=
HDX_NODE_BETA_MODE=1

FIRE_ENGINE_BETA_URL= # set if you'd like to use the fire engine closed beta
10 changes: 10 additions & 0 deletions apps/api/openapi.json
Original file line number Diff line number Diff line change
Expand Up @@ -479,6 +479,16 @@
"format": "uri"
}
}
},
"llm_extraction": {
"type": "object",
"description": "Displayed when using LLM Extraction. Extracted data from the page following the schema defined.",
"nullable": true
},
"warning": {
"type": "string",
"nullable": true,
"description": "Can be displayed when using LLM Extraction. Warning message will let you know any issues with the extraction."
}
}
}
Expand Down
1 change: 1 addition & 0 deletions apps/api/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
"@bull-board/express": "^5.8.0",
"@devil7softwares/pos": "^1.0.2",
"@dqbd/tiktoken": "^1.0.13",
"@hyperdx/node-opentelemetry": "^0.7.0",
"@logtail/node": "^0.4.12",
"@nangohq/node": "^0.36.33",
"@sentry/node": "^7.48.0",
Expand Down
1,508 changes: 1,472 additions & 36 deletions apps/api/pnpm-lock.yaml

Large diffs are not rendered by default.

18 changes: 9 additions & 9 deletions apps/api/src/__tests__/e2e_withAuth/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ describe("E2E Tests for API Routes", () => {
expect(response.body.data).toHaveProperty("markdown");
expect(response.body.data).toHaveProperty("metadata");
expect(response.body.data).not.toHaveProperty("html");
expect(response.body.data.content).toContain("🔥 FireCrawl");
expect(response.body.data.content).toContain("🔥 Firecrawl");
}, 30000); // 30 seconds timeout

it("should return a successful response with a valid API key and includeHtml set to true", async () => {
Expand All @@ -99,8 +99,8 @@ describe("E2E Tests for API Routes", () => {
expect(response.body.data).toHaveProperty("markdown");
expect(response.body.data).toHaveProperty("html");
expect(response.body.data).toHaveProperty("metadata");
expect(response.body.data.content).toContain("🔥 FireCrawl");
expect(response.body.data.markdown).toContain("🔥 FireCrawl");
expect(response.body.data.content).toContain("🔥 Firecrawl");
expect(response.body.data.markdown).toContain("🔥 Firecrawl");
expect(response.body.data.html).toContain("<h1");
}, 30000); // 30 seconds timeout

Expand Down Expand Up @@ -266,7 +266,7 @@ describe("E2E Tests for API Routes", () => {
urls.forEach((url: string) => {
expect(url.startsWith("https://wwww.mendable.ai/blog/")).toBeFalsy();
});
}, 60000); // 60 seconds
}, 90000); // 90 seconds

it("should return a successful response with a valid API key and limit to 3", async () => {
const crawlResponse = await request(TEST_URL)
Expand Down Expand Up @@ -440,8 +440,8 @@ describe("E2E Tests for API Routes", () => {
// 120 seconds
expect(completedResponse.body.data[0]).toHaveProperty("html");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
expect(completedResponse.body.data[0].content).toContain("🔥 FireCrawl");
expect(completedResponse.body.data[0].markdown).toContain("FireCrawl");
expect(completedResponse.body.data[0].content).toContain("🔥 Firecrawl");
expect(completedResponse.body.data[0].markdown).toContain("Firecrawl");
expect(completedResponse.body.data[0].html).toContain("<h1");
}, 60000);
});
Expand Down Expand Up @@ -576,7 +576,7 @@ describe("E2E Tests for API Routes", () => {
expect(completedResponse.body.data[0]).toHaveProperty("content");
expect(completedResponse.body.data[0]).toHaveProperty("markdown");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
expect(completedResponse.body.data[0].content).toContain("🔥 FireCrawl");
expect(completedResponse.body.data[0].content).toContain("🔥 Firecrawl");
}, 60000); // 60 seconds

it('should return a successful response for a valid crawl job with PDF files without explicit .pdf extension', async () => {
Expand Down Expand Up @@ -697,8 +697,8 @@ describe("E2E Tests for API Routes", () => {
// 120 seconds
expect(completedResponse.body.data[0]).toHaveProperty("html");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
expect(completedResponse.body.data[0].content).toContain("🔥 FireCrawl");
expect(completedResponse.body.data[0].markdown).toContain("FireCrawl");
expect(completedResponse.body.data[0].content).toContain("🔥 Firecrawl");
expect(completedResponse.body.data[0].markdown).toContain("Firecrawl");
expect(completedResponse.body.data[0].html).toContain("<h1");
}, 60000);
}); // 60 seconds
Expand Down
23 changes: 18 additions & 5 deletions apps/api/src/controllers/auth.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,22 @@ import { AuthResponse, RateLimiterMode } from "../../src/types";
import { supabase_service } from "../../src/services/supabase";
import { withAuth } from "../../src/lib/withAuth";
import { RateLimiterRedis } from "rate-limiter-flexible";
import { setTraceAttributes } from '@hyperdx/node-opentelemetry';

export async function authenticateUser(req, res, mode?: RateLimiterMode) : Promise<AuthResponse> {
return withAuth(supaAuthenticateUser)(req, res, mode);
}

function setTrace(team_id: string, api_key: string) {
try {
setTraceAttributes({
team_id,
api_key
});
} catch (error) {
console.error('Error setting trace attributes:', error);
}

}
export async function supaAuthenticateUser(
req,
res,
Expand Down Expand Up @@ -78,11 +89,13 @@ export async function supaAuthenticateUser(
status: 401,
};
}


const team_id = data[0].team_id;
const plan = getPlanByPriceId(data[0].price_id);
// HyperDX Logging
setTrace(team_id, normalizedApi);
subscriptionData = {
team_id: data[0].team_id,
plan: getPlanByPriceId(data[0].price_id)
team_id: team_id,
plan: plan
}
switch (mode) {
case RateLimiterMode.Crawl:
Expand Down
3 changes: 3 additions & 0 deletions apps/api/src/controllers/scrape.ts
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,9 @@ export async function scrapeController(req: Request, res: Response) {
const extractorOptions = req.body.extractorOptions ?? {
mode: "markdown"
}
if (extractorOptions.mode === "llm-extraction") {
pageOptions.onlyMainContent = true;
}
const origin = req.body.origin ?? "api";
const timeout = req.body.timeout ?? 30000; // Default timeout of 30 seconds

Expand Down
7 changes: 7 additions & 0 deletions apps/api/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ import "dotenv/config";
import { getWebScraperQueue } from "./services/queue-service";
import { redisClient } from "./services/rate-limiter";
import { v0Router } from "./routes/v0";
import { initSDK } from '@hyperdx/node-opentelemetry';

const { createBullBoard } = require("@bull-board/api");
const { BullAdapter } = require("@bull-board/api/bullAdapter");
const { ExpressAdapter } = require("@bull-board/express");
Expand Down Expand Up @@ -47,6 +49,11 @@ const DEFAULT_PORT = process.env.PORT ?? 3002;
const HOST = process.env.HOST ?? "localhost";
redisClient.connect();

// HyperDX OpenTelemetry
if(process.env.ENV === 'production') {
initSDK({ consoleCapture: true, additionalInstrumentations: []});
}


export function startServer(port = DEFAULT_PORT) {
const server = app.listen(Number(port), HOST, () => {
Expand Down
24 changes: 19 additions & 5 deletions apps/api/src/lib/LLM-extraction/models.ts
Original file line number Diff line number Diff line change
@@ -1,25 +1,38 @@
import OpenAI from "openai";
import { Document } from "../../lib/entities";
import { numTokensFromString } from "./helpers";

export type ScraperCompletionResult = {
data: any | null;
url: string;
};

const maxTokens = 32000;
const modifier = 4;
const defaultPrompt =
"You are a professional web scraper. Extract the contents of the webpage";

function prepareOpenAIDoc(
document: Document
): OpenAI.Chat.Completions.ChatCompletionContentPart[] {
// Check if the markdown content exists in the document
if (!document.markdown) {
): [OpenAI.Chat.Completions.ChatCompletionContentPart[], number] {
let markdown = document.markdown;

// Check if the markdown content exists in the document
if (!markdown) {
throw new Error(
"Markdown content is missing in the document. This is likely due to an error in the scraping process. Please try again or reach out to [email protected]"
);
}

return [{ type: "text", text: document.markdown }];
// count number of tokens
const numTokens = numTokensFromString(document.markdown, "gpt-4");

if (numTokens > maxTokens) {
// trim the document to the maximum number of tokens, tokens != characters
markdown = markdown.slice(0, (maxTokens * modifier));
}

return [[{ type: "text", text: markdown }], numTokens];
}

export async function generateOpenAICompletions({
Expand All @@ -38,7 +51,7 @@ export async function generateOpenAICompletions({
temperature?: number;
}): Promise<Document> {
const openai = client as OpenAI;
const content = prepareOpenAIDoc(document);
const [content, numTokens] = prepareOpenAIDoc(document);

const completion = await openai.chat.completions.create({
model,
Expand Down Expand Up @@ -72,6 +85,7 @@ export async function generateOpenAICompletions({
return {
...document,
llm_extraction: llmExtraction,
warning: numTokens > maxTokens ? `Page was trimmed to fit the maximum token limit defined by the LLM model (Max: ${maxTokens} tokens, Attemped: ${numTokens} tokens). If results are not good, email us at [email protected] so we can help you.` : undefined,
};
}

1 change: 1 addition & 0 deletions apps/api/src/lib/entities.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ export class Document {
};
childrenLinks?: string[];
provider?: string;
warning?: string;

constructor(data: Partial<Document>) {
if (!data.content) {
Expand Down
42 changes: 42 additions & 0 deletions apps/api/src/lib/load-testing-example.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import { scrapWithFireEngine } from "../../src/scraper/WebScraper/single_url";

const delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));

const scrapInBatches = async (
urls: string[],
batchSize: number,
delayMs: number
) => {
let successCount = 0;
let errorCount = 0;

for (let i = 0; i < urls.length; i += batchSize) {
const batch = urls
.slice(i, i + batchSize)
.map((url) => scrapWithFireEngine(url));
try {
const results = await Promise.all(batch);
results.forEach((data, index) => {
if (data.trim() === "") {
errorCount++;
} else {
successCount++;
console.log(
`Scraping result ${i + index + 1}:`,
data.trim().substring(0, 20) + "..."
);
}
});
} catch (error) {
console.error("Error during scraping:", error);
}
await delay(delayMs);
}

console.log(`Total successful scrapes: ${successCount}`);
console.log(`Total errored scrapes: ${errorCount}`);
};
function run() {
const urls = Array.from({ length: 200 }, () => "https://scrapethissite.com");
scrapInBatches(urls, 10, 1000);
}
16 changes: 11 additions & 5 deletions apps/api/src/scraper/WebScraper/crawler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ export class WebCrawler {
initialUrl,
includes,
excludes,
maxCrawledLinks,
maxCrawledLinks = 10000,
limit = 10000,
generateImgAltText = false,
maxCrawledDepth = 10,
Expand Down Expand Up @@ -117,7 +117,7 @@ export class WebCrawler {
const response = await axios.get(this.robotsTxtUrl);
this.robots = robotsParser(this.robotsTxtUrl, response.data);
} catch (error) {
console.error(`Failed to fetch robots.txt from ${this.robotsTxtUrl}`);
console.log(`Failed to fetch robots.txt from ${this.robotsTxtUrl}`);

}

Expand Down Expand Up @@ -152,7 +152,7 @@ export class WebCrawler {
inProgress?: (progress: Progress) => void,
): Promise<{ url: string, html: string }[]> {
const queue = async.queue(async (task: string, callback) => {
if (this.crawledUrls.size >= this.maxCrawledLinks) {
if (this.crawledUrls.size >= Math.min(this.maxCrawledLinks, this.limit)) {
if (callback && typeof callback === "function") {
callback();
}
Expand All @@ -176,14 +176,14 @@ export class WebCrawler {
if (inProgress && newUrls.length > 0) {
inProgress({
current: this.crawledUrls.size,
total: this.maxCrawledLinks,
total: Math.min(this.maxCrawledLinks, this.limit),
status: "SCRAPING",
currentDocumentUrl: newUrls[newUrls.length - 1].url,
});
} else if (inProgress) {
inProgress({
current: this.crawledUrls.size,
total: this.maxCrawledLinks,
total: Math.min(this.maxCrawledLinks, this.limit),
status: "SCRAPING",
currentDocumentUrl: task,
});
Expand Down Expand Up @@ -324,6 +324,12 @@ export class WebCrawler {
// ".docx",
".xlsx",
".xml",
".avi",
".flv",
".woff",
".ttf",
".woff2",
".webp"
];
return fileExtensions.some((ext) => url.endsWith(ext));
}
Expand Down
Loading

0 comments on commit 9663015

Please sign in to comment.