Merge branch 'main' into patch-1

This commit is contained in:
Ikko Eltociear Ashimine 2024-05-15 12:14:52 +09:00 committed by GitHub
commit e91c122c69
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
107 changed files with 14106 additions and 1707 deletions

View File

@ -13,6 +13,8 @@ env:
HOST: ${{ secrets.HOST }}
LLAMAPARSE_API_KEY: ${{ secrets.LLAMAPARSE_API_KEY }}
LOGTAIL_KEY: ${{ secrets.LOGTAIL_KEY }}
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
NUM_WORKERS_PER_QUEUE: ${{ secrets.NUM_WORKERS_PER_QUEUE }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
PLAYWRIGHT_MICROSERVICE_URL: ${{ secrets.PLAYWRIGHT_MICROSERVICE_URL }}
@ -38,7 +40,7 @@ jobs:
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: '20'
node-version: "20"
- name: Install pnpm
run: npm install -g pnpm
- name: Install dependencies
@ -54,5 +56,5 @@ jobs:
id: start_workers
- name: Run E2E tests
run: |
npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false
working-directory: ./apps/api
npm run test:prod
working-directory: ./apps/api

View File

@ -13,6 +13,8 @@ env:
HOST: ${{ secrets.HOST }}
LLAMAPARSE_API_KEY: ${{ secrets.LLAMAPARSE_API_KEY }}
LOGTAIL_KEY: ${{ secrets.LOGTAIL_KEY }}
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
NUM_WORKERS_PER_QUEUE: ${{ secrets.NUM_WORKERS_PER_QUEUE }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
PLAYWRIGHT_MICROSERVICE_URL: ${{ secrets.PLAYWRIGHT_MICROSERVICE_URL }}
@ -38,7 +40,7 @@ jobs:
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: '20'
node-version: "20"
- name: Install pnpm
run: npm install -g pnpm
- name: Install dependencies
@ -54,12 +56,49 @@ jobs:
id: start_workers
- name: Run E2E tests
run: |
npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false
npm run test:prod
working-directory: ./apps/api
pre-deploy-test-suite:
name: Test Suite
needs: pre-deploy
runs-on: ubuntu-latest
services:
redis:
image: redis
ports:
- 6379:6379
steps:
- uses: actions/checkout@v3
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: "20"
- name: Install pnpm
run: npm install -g pnpm
- name: Install dependencies
run: pnpm install
working-directory: ./apps/api
- name: Start the application
run: npm start &
working-directory: ./apps/api
id: start_app
- name: Start workers
run: npm run workers &
working-directory: ./apps/api
id: start_workers
- name: Install dependencies
run: pnpm install
working-directory: ./apps/test-suite
- name: Run E2E tests
run: |
npm run test
working-directory: ./apps/test-suite
deploy:
name: Deploy app
runs-on: ubuntu-latest
needs: pre-deploy
needs: pre-deploy-test-suite
steps:
- uses: actions/checkout@v3
- name: Change directory
@ -68,4 +107,3 @@ jobs:
- run: flyctl deploy ./apps/api --remote-only -a firecrawl-scraper-js
env:
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}

8
.gitignore vendored
View File

@ -6,3 +6,11 @@
dump.rdb
/mongo-data
apps/js-sdk/node_modules/
apps/api/.env.local
apps/test-suite/node_modules/
apps/test-suite/.env
apps/test-suite/logs

View File

@ -1,4 +1,117 @@
# Contributing
# Contributors guide:
We love contributions! Please read our [contributing guide](CONTRIBUTING.md) before submitting a pull request.
Welcome to [Firecrawl](https://firecrawl.dev) 🔥! Here are some instructions on how to get the project locally, so you can run it on your own (and contribute)
If you're contributing, note that the process is similar to other open source repos i.e. (fork firecrawl, make changes, run tests, PR). If you have any questions, and would like help gettin on board, reach out to hello@mendable.ai for more or submit an issue!
## Running the project locally
First, start by installing dependencies
1. node.js [instructions](https://nodejs.org/en/learn/getting-started/how-to-install-nodejs)
2. pnpm [instructions](https://pnpm.io/installation)
3. redis [instructions](https://redis.io/docs/latest/operate/oss_and_stack/install/install-redis/)
Set environment variables in a .env in the /apps/api/ directoryyou can copy over the template in .env.example.
To start, we wont set up authentication, or any optional sub services (pdf parsing, JS blocking support, AI features )
.env:
```
# ===== Required ENVS ======
NUM_WORKERS_PER_QUEUE=8
PORT=3002
HOST=0.0.0.0
REDIS_URL=redis://localhost:6379
## To turn on DB authentication, you need to set up supabase.
USE_DB_AUTHENTICATION=false
# ===== Optional ENVS ======
# Supabase Setup (used to support DB authentication, advanced logging, etc.)
SUPABASE_ANON_TOKEN=
SUPABASE_URL=
SUPABASE_SERVICE_TOKEN=
# Other Optionals
TEST_API_KEY= # use if you've set up authentication and want to test with a real API key
SCRAPING_BEE_API_KEY= #Set if you'd like to use scraping Be to handle JS blocking
OPENAI_API_KEY= # add for LLM dependednt features (image alt generation, etc.)
BULL_AUTH_KEY= #
LOGTAIL_KEY= # Use if you're configuring basic logging with logtail
PLAYWRIGHT_MICROSERVICE_URL= # set if you'd like to run a playwright fallback
LLAMAPARSE_API_KEY= #Set if you have a llamaparse key you'd like to use to parse pdfs
SERPER_API_KEY= #Set if you have a serper key you'd like to use as a search api
SLACK_WEBHOOK_URL= # set if you'd like to send slack server health status messages
POSTHOG_API_KEY= # set if you'd like to send posthog events like job logs
POSTHOG_HOST= # set if you'd like to send posthog events like job logs
```
### Installing dependencies
First, install the dependencies using pnpm.
```bash
pnpm install
```
### Running the project
You're going to need to open 3 terminals.
### Terminal 1 - setting up redis
Run the command anywhere within your project
```bash
redis-server
```
### Terminal 2 - setting up workers
Now, navigate to the apps/api/ directory and run:
```bash
pnpm run workers
```
This will start the workers who are responsible for processing crawl jobs.
### Terminal 3 - setting up the main server
To do this, navigate to the apps/api/ directory and run if you dont have this already, install pnpm here: https://pnpm.io/installation
Next, run your server with:
```bash
pnpm run start
```
### Terminal 3 - sending our first request.
Alright: now lets send our first request.
```curl
curl -X GET http://localhost:3002/test
```
This should return the response Hello, world!
If youd like to test the crawl endpoint, you can run this
```curl
curl -X POST http://localhost:3002/v0/crawl \
-H 'Content-Type: application/json' \
-d '{
"url": "https://mendable.ai"
}'
```
## Tests:
The best way to do this is run the test with `npm run test:local-no-auth` if you'd like to run the tests without authentication.
If you'd like to run the tests with authentication, run `npm run test:prod`

798
LICENSE
View File

@ -1,201 +1,661 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
1. Definitions.
Preamble
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
The precise terms and conditions for copying, distribution and
modification follow.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
TERMS AND CONDITIONS
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
0. Definitions.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
"This License" refers to version 3 of the GNU Affero General Public License.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
A "covered work" means either the unmodified Program or a work based
on the Program.
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
1. Source Code.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
END OF TERMS AND CONDITIONS
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
APPENDIX: How to apply the Apache License to your work.
The Corresponding Source for a work in source code form is that
same work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
2. Basic Permissions.
Copyright 2024 Firecrawl | Mendable.ai
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
http://www.apache.org/licenses/LICENSE-2.0
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

348
README.md
View File

@ -1,32 +1,35 @@
# 🔥 Firecrawl
Crawl and convert any website into LLM-ready markdown. Build by [Mendable.ai](https://mendable.ai?ref=gfirecrawl)
Crawl and convert any website into LLM-ready markdown. Built by [Mendable.ai](https://mendable.ai?ref=gfirecrawl) and the firecrawl community.
*This repository is currently in its early stages of development. We are in the process of merging custom modules into this mono repository. The primary objective is to enhance the accuracy of LLM responses by utilizing clean data. It is not ready for full self-host yet - we're working on it*
_This repository is in its early development stages. We are still merging custom modules in the mono repo. It's not completely yet ready for full self-host deployment, but you can already run it locally._
## What is Firecrawl?
[Firecrawl](https://firecrawl.dev?ref=github) is an API service that takes a URL, crawls it, and converts it into clean markdown. We crawl all accessible subpages and give you clean markdown for each. No sitemap required.
_Pst. hey, you, join our stargazers :)_
<img src="https://github.com/mendableai/firecrawl/assets/44934913/53c4483a-0f0e-40c6-bd84-153a07f94d29" width="200">
## How to use it?
We provide an easy to use API with our hosted version. You can find the playground and documentation [here](https://firecrawl.dev/playground). You can also self host the backend if you'd like.
We provide an easy to use API with our hosted version. You can find the playground and documentation [here](https://firecrawl.dev/playground). You can also self host the backend if you'd like.
- [x] [API](https://firecrawl.dev/playground)
- [x] [Python SDK](https://github.com/mendableai/firecrawl/tree/main/apps/python-sdk)
- [X] [Node SDK](https://github.com/mendableai/firecrawl/tree/main/apps/js-sdk)
- [x] [Node SDK](https://github.com/mendableai/firecrawl/tree/main/apps/js-sdk)
- [x] [Langchain Integration 🦜🔗](https://python.langchain.com/docs/integrations/document_loaders/firecrawl/)
- [x] [Llama Index Integration 🦙](https://docs.llamaindex.ai/en/latest/examples/data_connectors/WebPageDemo/#using-firecrawl-reader)
- [ ] LangchainJS - Coming Soon
- [X] [Langchain JS Integration 🦜🔗](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/firecrawl)
- [ ] Want an SDK or Integration? Let us know by opening an issue.
Self-host. To self-host refer to guide [here](https://github.com/mendableai/firecrawl/blob/main/SELF_HOST.md).
To run locally, refer to guide [here](https://github.com/mendableai/firecrawl/blob/main/CONTRIBUTING.md).
### API Key
To use the API, you need to sign up on [Firecrawl](https://firecrawl.dev) and get an API key.
### Crawling
Used to crawl a URL and all accessible subpages. This submits a crawl job and returns a job ID to check the status of the crawl.
@ -58,25 +61,160 @@ curl -X GET https://api.firecrawl.dev/v0/crawl/status/1234-5678-9101 \
```json
{
"status": "completed",
"current": 22,
"total": 22,
"data": [
{
"content": "Raw Content ",
"markdown": "# Markdown Content",
"provider": "web-scraper",
"metadata": {
"title": "Mendable | AI for CX and Sales",
"description": "AI for CX and Sales",
"language": null,
"sourceURL": "https://www.mendable.ai/",
}
}
]
"status": "completed",
"current": 22,
"total": 22,
"data": [
{
"content": "Raw Content ",
"markdown": "# Markdown Content",
"provider": "web-scraper",
"metadata": {
"title": "Mendable | AI for CX and Sales",
"description": "AI for CX and Sales",
"language": null,
"sourceURL": "https://www.mendable.ai/"
}
}
]
}
```
### Scraping
Used to scrape a URL and get its content.
```bash
curl -X POST https://api.firecrawl.dev/v0/scrape \
-H 'Content-Type: application/json' \
-H 'Authorization: Bearer YOUR_API_KEY' \
-d '{
"url": "https://mendable.ai"
}'
```
Response:
```json
{
"success": true,
"data": {
"content": "Raw Content ",
"markdown": "# Markdown Content",
"provider": "web-scraper",
"metadata": {
"title": "Mendable | AI for CX and Sales",
"description": "AI for CX and Sales",
"language": null,
"sourceURL": "https://www.mendable.ai/"
}
}
}
```
### Search (Beta)
Used to search the web, get the most relevant results, scrap each page and return the markdown.
```bash
curl -X POST https://api.firecrawl.dev/v0/search \
-H 'Content-Type: application/json' \
-H 'Authorization: Bearer YOUR_API_KEY' \
-d '{
"query": "firecrawl",
"pageOptions": {
"fetchPageContent": true // false for a fast serp api
}
}'
```
```json
{
"success": true,
"data": [
{
"url": "https://mendable.ai",
"markdown": "# Markdown Content",
"provider": "web-scraper",
"metadata": {
"title": "Mendable | AI for CX and Sales",
"description": "AI for CX and Sales",
"language": null,
"sourceURL": "https://www.mendable.ai/"
}
}
]
}
```
### Intelligent Extraction (Beta)
Used to extract structured data from scraped pages.
```bash
curl -X POST https://api.firecrawl.dev/v0/scrape \
-H 'Content-Type: application/json' \
-H 'Authorization: Bearer YOUR_API_KEY' \
-d '{
"url": "https://www.mendable.ai/",
"extractorOptions": {
"mode": "llm-extraction",
"extractionPrompt": "Based on the information on the page, extract the information from the schema. ",
"extractionSchema": {
"type": "object",
"properties": {
"company_mission": {
"type": "string"
},
"supports_sso": {
"type": "boolean"
},
"is_open_source": {
"type": "boolean"
},
"is_in_yc": {
"type": "boolean"
}
},
"required": [
"company_mission",
"supports_sso",
"is_open_source",
"is_in_yc"
]
}
}
}'
```
```json
{
"success": true,
"data": {
"content": "Raw Content",
"metadata": {
"title": "Mendable",
"description": "Mendable allows you to easily build AI chat applications. Ingest, customize, then deploy with one line of code anywhere you want. Brought to you by SideGuide",
"robots": "follow, index",
"ogTitle": "Mendable",
"ogDescription": "Mendable allows you to easily build AI chat applications. Ingest, customize, then deploy with one line of code anywhere you want. Brought to you by SideGuide",
"ogUrl": "https://mendable.ai/",
"ogImage": "https://mendable.ai/mendable_new_og1.png",
"ogLocaleAlternate": [],
"ogSiteName": "Mendable",
"sourceURL": "https://mendable.ai/"
},
"llm_extraction": {
"company_mission": "Train a secure AI on your technical resources that answers customer and employee questions so your team doesn't have to",
"supports_sso": true,
"is_open_source": false,
"is_in_yc": true
}
}
}
```
## Using Python SDK
### Installing Python SDK
@ -108,6 +246,166 @@ url = 'https://example.com'
scraped_data = app.scrape_url(url)
```
### Extracting structured data from a URL
With LLM extraction, you can easily extract structured data from any URL. We support pydanti schemas to make it easier for you too. Here is how you to use it:
```python
class ArticleSchema(BaseModel):
title: str
points: int
by: str
commentsURL: str
class TopArticlesSchema(BaseModel):
top: List[ArticleSchema] = Field(..., max_items=5, description="Top 5 stories")
data = app.scrape_url('https://news.ycombinator.com', {
'extractorOptions': {
'extractionSchema': TopArticlesSchema.model_json_schema(),
'mode': 'llm-extraction'
},
'pageOptions':{
'onlyMainContent': True
}
})
print(data["llm_extraction"])
```
### Search for a query
Performs a web search, retrieve the top results, extract data from each page, and returns their markdown.
```python
query = 'What is Mendable?'
search_result = app.search(query)
```
## Using the Node SDK
### Installation
To install the Firecrawl Node SDK, you can use npm:
```bash
npm install @mendable/firecrawl-js
```
### Usage
1. Get an API key from [firecrawl.dev](https://firecrawl.dev)
2. Set the API key as an environment variable named `FIRECRAWL_API_KEY` or pass it as a parameter to the `FirecrawlApp` class.
### Scraping a URL
To scrape a single URL with error handling, use the `scrapeUrl` method. It takes the URL as a parameter and returns the scraped data as a dictionary.
```js
try {
const url = 'https://example.com';
const scrapedData = await app.scrapeUrl(url);
console.log(scrapedData);
} catch (error) {
console.error(
'Error occurred while scraping:',
error.message
);
}
```
### Crawling a Website
To crawl a website with error handling, use the `crawlUrl` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the crawl job, such as the maximum number of pages to crawl, allowed domains, and the output format.
```js
const crawlUrl = 'https://example.com';
const params = {
crawlerOptions: {
excludes: ['blog/'],
includes: [], // leave empty for all pages
limit: 1000,
},
pageOptions: {
onlyMainContent: true
}
};
const waitUntilDone = true;
const timeout = 5;
const crawlResult = await app.crawlUrl(
crawlUrl,
params,
waitUntilDone,
timeout
);
```
### Checking Crawl Status
To check the status of a crawl job with error handling, use the `checkCrawlStatus` method. It takes the job ID as a parameter and returns the current status of the crawl job.
```js
const status = await app.checkCrawlStatus(jobId);
console.log(status);
```
### Extracting structured data from a URL
With LLM extraction, you can easily extract structured data from any URL. We support zod schema to make it easier for you too. Here is how you to use it:
```js
import FirecrawlApp from "@mendable/firecrawl-js";
import { z } from "zod";
const app = new FirecrawlApp({
apiKey: "fc-YOUR_API_KEY",
});
// Define schema to extract contents into
const schema = z.object({
top: z
.array(
z.object({
title: z.string(),
points: z.number(),
by: z.string(),
commentsURL: z.string(),
})
)
.length(5)
.describe("Top 5 stories on Hacker News"),
});
const scrapeResult = await app.scrapeUrl("https://news.ycombinator.com", {
extractorOptions: { extractionSchema: schema },
});
console.log(scrapeResult.data["llm_extraction"]);
```
### Search for a query
With the `search` method, you can search for a query in a search engine and get the top results along with the page content for each result. The method takes the query as a parameter and returns the search results.
```js
const query = 'what is mendable?';
const searchResults = await app.search(query, {
pageOptions: {
fetchPageContent: true // Fetch the page content for each search result
}
});
```
## Contributing
We love contributions! Please read our [contributing guide](CONTRIBUTING.md) before submitting a pull request.
*It is the sole responsibility of the end users to respect websites' policies when scraping, searching and crawling with Firecrawl. Users are advised to adhere to the applicable privacy policies and terms of use of the websites prior to initiating any scraping activities. By default, Firecrawl respects the directives specified in the websites' robots.txt files when crawling. By utilizing Firecrawl, you expressly agree to comply with these conditions.*

View File

@ -1,6 +1,6 @@
# Self-hosting Firecrawl
Guide coming soon.
Refer to [CONTRIBUTING.md](https://github.com/mendableai/firecrawl/blob/main/CONTRIBUTING.md) for instructions on how to run it locally.
*This repository is currently in its early stages of development. We are in the process of merging custom modules into this mono repository. The primary objective is to enhance the accuracy of LLM responses by utilizing clean data. It is not ready for full self-host yet - we're working on it*

29
apps/api/.env.example Normal file
View File

@ -0,0 +1,29 @@
# ===== Required ENVS ======
NUM_WORKERS_PER_QUEUE=8
PORT=3002
HOST=0.0.0.0
REDIS_URL=redis://localhost:6379
## To turn on DB authentication, you need to set up supabase.
USE_DB_AUTHENTICATION=true
# ===== Optional ENVS ======
# Supabase Setup (used to support DB authentication, advanced logging, etc.)
SUPABASE_ANON_TOKEN=
SUPABASE_URL=
SUPABASE_SERVICE_TOKEN=
# Other Optionals
TEST_API_KEY= # use if you've set up authentication and want to test with a real API key
SCRAPING_BEE_API_KEY= #Set if you'd like to use scraping Be to handle JS blocking
OPENAI_API_KEY= # add for LLM dependednt features (image alt generation, etc.)
BULL_AUTH_KEY= #
LOGTAIL_KEY= # Use if you're configuring basic logging with logtail
PLAYWRIGHT_MICROSERVICE_URL= # set if you'd like to run a playwright fallback
LLAMAPARSE_API_KEY= #Set if you have a llamaparse key you'd like to use to parse pdfs
SERPER_API_KEY= #Set if you have a serper key you'd like to use as a search api
SLACK_WEBHOOK_URL= # set if you'd like to send slack server health status messages
POSTHOG_API_KEY= # set if you'd like to send posthog events like job logs
POSTHOG_HOST= # set if you'd like to send posthog events like job logs

View File

@ -7,8 +7,8 @@ SUPABASE_SERVICE_TOKEN=
REDIS_URL=
SCRAPING_BEE_API_KEY=
OPENAI_API_KEY=
ANTHROPIC_API_KEY=
BULL_AUTH_KEY=
LOGTAIL_KEY=
PLAYWRIGHT_MICROSERVICE_URL=
LLAMAPARSE_API_KEY=
TEST_API_KEY=

View File

@ -17,11 +17,16 @@ kill_timeout = '5s'
[http_service]
internal_port = 8080
force_https = true
auto_stop_machines = true
auto_stop_machines = false
auto_start_machines = true
min_machines_running = 0
min_machines_running = 2
processes = ['app']
[http_service.concurrency]
type = "requests"
hard_limit = 200
soft_limit = 100
[[services]]
protocol = 'tcp'
internal_port = 8080
@ -38,10 +43,14 @@ kill_timeout = '5s'
[services.concurrency]
type = 'connections'
hard_limit = 45
soft_limit = 20
hard_limit = 75
soft_limit = 30
[[vm]]
size = 'performance-1x'
size = 'performance-4x'
processes = ['app']

View File

@ -2,4 +2,7 @@ module.exports = {
preset: "ts-jest",
testEnvironment: "node",
setupFiles: ["./jest.setup.js"],
// ignore dist folder root dir
modulePathIgnorePatterns: ["<rootDir>/dist/"],
};

View File

@ -1,258 +1,392 @@
{
"openapi": "3.0.0",
"info": {
"title": "Firecrawl API",
"version": "1.0.0",
"description": "API for interacting with Firecrawl services to convert websites to LLM-ready data.",
"contact": {
"name": "Firecrawl Support",
"url": "https://firecrawl.dev/support",
"email": "help@mendable.ai"
}
},
"servers": [
{
"url": "https://api.firecrawl.dev/v0"
}
],
"paths": {
"/scrape": {
"post": {
"summary": "Scrape a single URL",
"operationId": "scrapeSingleUrl",
"tags": ["Scraping"],
"security": [
{
"bearerAuth": []
}
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"url": {
"type": "string",
"format": "uri",
"description": "The URL to scrape"
}
},
"required": ["url"]
}
}
}
},
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ScrapeResponse"
}
}
}
},
"402": {
"description": "Payment required"
},
"429": {
"description": "Too many requests"
},
"500": {
"description": "Server error"
}
"openapi": "3.0.0",
"info": {
"title": "Firecrawl API",
"version": "1.0.0",
"description": "API for interacting with Firecrawl services to perform web scraping and crawling tasks.",
"contact": {
"name": "Firecrawl Support",
"url": "https://firecrawl.dev/support",
"email": "support@firecrawl.dev"
}
},
"servers": [
{
"url": "https://api.firecrawl.dev/v0"
}
],
"paths": {
"/scrape": {
"post": {
"summary": "Scrape a single URL",
"operationId": "scrapeSingleUrl",
"tags": ["Scraping"],
"security": [
{
"bearerAuth": []
}
}
},
"/crawl": {
"post": {
"summary": "Crawl multiple URLs based on options",
"operationId": "crawlUrls",
"tags": ["Crawling"],
"security": [
{
"bearerAuth": []
}
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"url": {
"type": "string",
"format": "uri",
"description": "The base URL to start crawling from"
},
"crawlerOptions": {
"type": "object",
"properties": {
"includes": {
"type": "array",
"items": {
"type": "string"
},
"description": "URL patterns to include"
},
"excludes": {
"type": "array",
"items": {
"type": "string"
},
"description": "URL patterns to exclude"
},
"generateImgAltText": {
"type": "boolean",
"description": "Generate alt text for images using LLMs (must have a paid plan)",
"default": false
},
"limit": {
"type": "integer",
"description": "Maximum number of pages to crawl"
}
}
},
"pageOptions": {
"type": "object",
"properties": {
"onlyMainContent": {
"type": "boolean",
"description": "Only return the main content of the page excluding headers, navs, footers, etc.",
"default": false
}
}
}
},
"required": ["url"]
}
}
}
},
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CrawlResponse"
}
}
}
},
"402": {
"description": "Payment required"
},
"429": {
"description": "Too many requests"
},
"500": {
"description": "Server error"
}
}
}
},
"/crawl/status/{jobId}": {
"get": {
"tags": ["Crawl"],
"summary": "Get the status of a crawl job",
"operationId": "getCrawlStatus",
"security": [
{
"bearerAuth": []
}
],
"parameters": [
{
"name": "jobId",
"in": "path",
"description": "ID of the crawl job",
"required": true,
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"url": {
"type": "string",
"format": "uri",
"description": "The URL to scrape"
},
"pageOptions": {
"type": "object",
"properties": {
"status": {
"type": "string",
"description": "Status of the job (completed, active, failed, paused)"
},
"current": {
"type": "integer",
"description": "Current page number"
},
"current_url": {
"type": "string",
"description": "Current URL being scraped"
},
"current_step": {
"type": "string",
"description": "Current step in the process"
},
"total": {
"type": "integer",
"description": "Total number of pages"
},
"data": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ScrapeResponse"
},
"description": "Data returned from the job (null when it is in progress)"
"onlyMainContent": {
"type": "boolean",
"description": "Only return the main content of the page excluding headers, navs, footers, etc.",
"default": false
}
}
}
},
"required": ["url"]
}
}
}
},
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ScrapeResponse"
}
}
},
"402": {
"description": "Payment required"
},
"429": {
"description": "Too many requests"
},
"500": {
"description": "Server error"
}
},
"402": {
"description": "Payment required"
},
"429": {
"description": "Too many requests"
},
"500": {
"description": "Server error"
}
}
}
},
"components": {
"securitySchemes": {
"bearerAuth": {
"type": "http",
"scheme": "bearer"
"/crawl": {
"post": {
"summary": "Crawl multiple URLs based on options",
"operationId": "crawlUrls",
"tags": ["Crawling"],
"security": [
{
"bearerAuth": []
}
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"url": {
"type": "string",
"format": "uri",
"description": "The base URL to start crawling from"
},
"crawlerOptions": {
"type": "object",
"properties": {
"includes": {
"type": "array",
"items": {
"type": "string"
},
"description": "URL patterns to include"
},
"excludes": {
"type": "array",
"items": {
"type": "string"
},
"description": "URL patterns to exclude"
},
"generateImgAltText": {
"type": "boolean",
"description": "Generate alt text for images using LLMs (must have a paid plan)",
"default": false
},
"returnOnlyUrls": {
"type": "boolean",
"description": "If true, returns only the URLs as a list on the crawl status. Attention: the return response will be a list of URLs inside the data, not a list of documents.",
"default": false
},
"limit": {
"type": "integer",
"description": "Maximum number of pages to crawl",
"default": 10000
}
}
},
"pageOptions": {
"type": "object",
"properties": {
"onlyMainContent": {
"type": "boolean",
"description": "Only return the main content of the page excluding headers, navs, footers, etc.",
"default": false
}
}
}
},
"required": ["url"]
}
}
}
},
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CrawlResponse"
}
}
}
},
"402": {
"description": "Payment required"
},
"429": {
"description": "Too many requests"
},
"500": {
"description": "Server error"
}
}
}
},
"/search": {
"post": {
"summary": "Search for a keyword in Google, returns top page results with markdown content for each page",
"operationId": "searchGoogle",
"tags": ["Search"],
"security": [
{
"bearerAuth": []
}
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"format": "uri",
"description": "The URL to scrape"
},
"pageOptions": {
"type": "object",
"properties": {
"onlyMainContent": {
"type": "boolean",
"description": "Only return the main content of the page excluding headers, navs, footers, etc.",
"default": false
},
"fetchPageContent": {
"type": "boolean",
"description": "Fetch the content of each page. If false, defaults to a basic fast serp API.",
"default": true
}
}
},
"searchOptions": {
"type": "object",
"properties": {
"limit": {
"type": "integer",
"description": "Maximum number of results. Max is 20 during beta."
}
}
}
},
"required": ["query"]
}
}
}
},
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/SearchResponse"
}
}
}
},
"402": {
"description": "Payment required"
},
"429": {
"description": "Too many requests"
},
"500": {
"description": "Server error"
}
}
}
},
"/crawl/status/{jobId}": {
"get": {
"tags": ["Crawl"],
"summary": "Get the status of a crawl job",
"operationId": "getCrawlStatus",
"security": [
{
"bearerAuth": []
}
],
"parameters": [
{
"name": "jobId",
"in": "path",
"description": "ID of the crawl job",
"required": true,
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"status": {
"type": "string",
"description": "Status of the job (completed, active, failed, paused)"
},
"current": {
"type": "integer",
"description": "Current page number"
},
"current_url": {
"type": "string",
"description": "Current URL being scraped"
},
"current_step": {
"type": "string",
"description": "Current step in the process"
},
"total": {
"type": "integer",
"description": "Total number of pages"
},
"data": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ScrapeResponse"
},
"description": "Data returned from the job (null when it is in progress)"
}
}
}
}
}
},
"402": {
"description": "Payment required"
},
"429": {
"description": "Too many requests"
},
"500": {
"description": "Server error"
}
}
}
}
},
"components": {
"securitySchemes": {
"bearerAuth": {
"type": "http",
"scheme": "bearer"
}
},
"schemas": {
"ScrapeResponse": {
"type": "object",
"properties": {
"success": {
"type": "boolean"
},
"data": {
"type": "object",
"properties": {
"markdown": {
"type": "string"
},
"content": {
"type": "string"
},
"metadata": {
"type": "object",
"properties": {
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"language": {
"type": "string",
"nullable": true
},
"sourceURL": {
"type": "string",
"format": "uri"
}
}
}
}
}
}
},
"schemas": {
"ScrapeResponse": {
"type": "object",
"properties": {
"success": {
"type": "boolean"
},
"data": {
"SearchResponse": {
"type": "object",
"properties": {
"success": {
"type": "boolean"
},
"data": {
"type": "array",
"items": {
"type": "object",
"properties": {
"content": {
"url": {
"type": "string"
},
"markdown": {
"type": "string"
},
"content": {
"type": "string"
},
"metadata": {
"type": "object",
"properties": {
@ -275,21 +409,21 @@
}
}
}
},
"CrawlResponse": {
"type": "object",
"properties": {
"jobId": {
"type": "string"
}
}
},
"CrawlResponse": {
"type": "object",
"properties": {
"jobId": {
"type": "string"
}
}
}
},
"security": [
{
"bearerAuth": []
}
]
}
}
},
"security": [
{
"bearerAuth": []
}
]
}

View File

@ -10,7 +10,9 @@
"flyio": "node dist/src/index.js",
"start:dev": "nodemon --exec ts-node src/index.ts",
"build": "tsc",
"test": "jest --verbose",
"test": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false --testPathIgnorePatterns='src/__tests__/e2e_noAuth/*'",
"test:local-no-auth": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false --testPathIgnorePatterns='src/__tests__/e2e_withAuth/*'",
"test:prod": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false --testPathIgnorePatterns='src/__tests__/e2e_noAuth/*'",
"workers": "nodemon --exec ts-node src/services/queue-worker.ts",
"worker:production": "node dist/src/services/queue-worker.js",
"mongo-docker": "docker run -d -p 2717:27017 -v ./mongo-data:/data/db --name mongodb mongo:latest",
@ -39,15 +41,17 @@
"typescript": "^5.4.2"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.20.5",
"@brillout/import": "^0.2.2",
"@bull-board/api": "^5.14.2",
"@bull-board/express": "^5.8.0",
"@devil7softwares/pos": "^1.0.2",
"@dqbd/tiktoken": "^1.0.7",
"@dqbd/tiktoken": "^1.0.13",
"@logtail/node": "^0.4.12",
"@nangohq/node": "^0.36.33",
"@sentry/node": "^7.48.0",
"@supabase/supabase-js": "^2.7.1",
"ajv": "^8.12.0",
"async": "^3.2.5",
"async-mutex": "^0.4.0",
"axios": "^1.3.4",
@ -64,6 +68,8 @@
"glob": "^10.3.12",
"gpt3-tokenizer": "^1.1.5",
"ioredis": "^5.3.2",
"joplin-turndown-plugin-gfm": "^1.0.12",
"json-schema-to-zod": "^2.1.0",
"keyword-extractor": "^0.0.25",
"langchain": "^0.1.25",
"languagedetect": "^2.0.0",
@ -76,6 +82,7 @@
"openai": "^4.28.4",
"pdf-parse": "^1.1.1",
"pos": "^0.4.2",
"posthog-node": "^4.0.1",
"promptable": "^0.0.9",
"puppeteer": "^22.6.3",
"rate-limiter-flexible": "^2.4.2",
@ -89,7 +96,9 @@
"unstructured-client": "^0.9.4",
"uuid": "^9.0.1",
"wordpos": "^2.1.0",
"xml2js": "^0.6.2"
"xml2js": "^0.6.2",
"zod": "^3.23.4",
"zod-to-json-schema": "^3.23.0"
},
"nodemonConfig": {
"ignore": [

View File

@ -5,6 +5,9 @@ settings:
excludeLinksFromLockfile: false
dependencies:
'@anthropic-ai/sdk':
specifier: ^0.20.5
version: 0.20.5
'@brillout/import':
specifier: ^0.2.2
version: 0.2.3
@ -18,7 +21,7 @@ dependencies:
specifier: ^1.0.2
version: 1.0.2
'@dqbd/tiktoken':
specifier: ^1.0.7
specifier: ^1.0.13
version: 1.0.13
'@logtail/node':
specifier: ^0.4.12
@ -32,6 +35,9 @@ dependencies:
'@supabase/supabase-js':
specifier: ^2.7.1
version: 2.39.7
ajv:
specifier: ^8.12.0
version: 8.12.0
async:
specifier: ^3.2.5
version: 3.2.5
@ -80,6 +86,12 @@ dependencies:
ioredis:
specifier: ^5.3.2
version: 5.3.2
joplin-turndown-plugin-gfm:
specifier: ^1.0.12
version: 1.0.12
json-schema-to-zod:
specifier: ^2.1.0
version: 2.1.0
keyword-extractor:
specifier: ^0.0.25
version: 0.0.25
@ -116,6 +128,9 @@ dependencies:
pos:
specifier: ^0.4.2
version: 0.4.2
posthog-node:
specifier: ^4.0.1
version: 4.0.1
promptable:
specifier: ^0.0.9
version: 0.0.9
@ -158,6 +173,12 @@ dependencies:
xml2js:
specifier: ^0.6.2
version: 0.6.2
zod:
specifier: ^3.23.4
version: 3.23.4
zod-to-json-schema:
specifier: ^3.23.0
version: 3.23.0(zod@3.23.4)
devDependencies:
'@flydotio/dockerfile':
@ -222,6 +243,21 @@ packages:
'@jridgewell/trace-mapping': 0.3.25
dev: true
/@anthropic-ai/sdk@0.20.5:
resolution: {integrity: sha512-d0ch+zp6/gHR4+2wqWV7JU1EJ7PpHc3r3F6hebovJTouY+pkaId1FuYYaVsG3l/gyqhOZUwKCMSMqcFNf+ZmWg==}
dependencies:
'@types/node': 18.19.22
'@types/node-fetch': 2.6.11
abort-controller: 3.0.0
agentkeepalive: 4.5.0
form-data-encoder: 1.7.2
formdata-node: 4.4.1
node-fetch: 2.7.0
web-streams-polyfill: 3.3.3
transitivePeerDependencies:
- encoding
dev: false
/@anthropic-ai/sdk@0.9.1:
resolution: {integrity: sha512-wa1meQ2WSfoY8Uor3EdrJq0jTiZJoKoSii2ZVWRY1oN4Tlr5s59pADg9T79FTbPe1/se5c3pBeZgJL63wmuoBA==}
dependencies:
@ -1179,7 +1215,7 @@ packages:
redis: 4.6.13
typesense: 1.7.2(@babel/runtime@7.24.0)
uuid: 9.0.1
zod: 3.22.4
zod: 3.23.4
transitivePeerDependencies:
- encoding
dev: false
@ -1197,8 +1233,8 @@ packages:
p-queue: 6.6.2
p-retry: 4.6.2
uuid: 9.0.1
zod: 3.22.4
zod-to-json-schema: 3.22.4(zod@3.22.4)
zod: 3.23.4
zod-to-json-schema: 3.23.0(zod@3.23.4)
dev: false
/@langchain/openai@0.0.18:
@ -1208,8 +1244,8 @@ packages:
'@langchain/core': 0.1.43
js-tiktoken: 1.0.10
openai: 4.28.4
zod: 3.22.4
zod-to-json-schema: 3.22.4(zod@3.22.4)
zod: 3.23.4
zod-to-json-schema: 3.23.0(zod@3.23.4)
transitivePeerDependencies:
- encoding
dev: false
@ -1790,6 +1826,15 @@ packages:
humanize-ms: 1.2.1
dev: false
/ajv@8.12.0:
resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==}
dependencies:
fast-deep-equal: 3.1.3
json-schema-traverse: 1.0.0
require-from-string: 2.0.2
uri-js: 4.4.1
dev: false
/ansi-escapes@4.3.2:
resolution: {integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==}
engines: {node: '>=8'}
@ -2896,6 +2941,10 @@ packages:
- supports-color
dev: false
/fast-deep-equal@3.1.3:
resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
dev: false
/fast-fifo@1.3.2:
resolution: {integrity: sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==}
dev: false
@ -3923,6 +3972,10 @@ packages:
- ts-node
dev: true
/joplin-turndown-plugin-gfm@1.0.12:
resolution: {integrity: sha512-qL4+1iycQjZ1fs8zk3jSRk7cg3ROBUHk7GKtiLAQLFzLPKErnILUvz5DLszSQvz3s1sTjPbywLDISVUtBY6HaA==}
dev: false
/js-tiktoken@1.0.10:
resolution: {integrity: sha512-ZoSxbGjvGyMT13x6ACo9ebhDha/0FHdKA+OsQcMOWcm1Zs7r90Rhk5lhERLzji+3rA7EKpXCgwXcM5fF3DMpdA==}
dependencies:
@ -3960,6 +4013,15 @@ packages:
/json-parse-even-better-errors@2.3.1:
resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==}
/json-schema-to-zod@2.1.0:
resolution: {integrity: sha512-7ishNgYY+AbIKeeHcp5xCOdJbdVwSfDx/4V2ktc16LUusCJJbz2fEKdWUmAxhKIiYzhZ9Fp4E8OsAoM/h9cOLA==}
hasBin: true
dev: false
/json-schema-traverse@1.0.0:
resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==}
dev: false
/json5@2.2.3:
resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
engines: {node: '>=6'}
@ -4184,8 +4246,8 @@ packages:
redis: 4.6.13
uuid: 9.0.1
yaml: 2.4.1
zod: 3.22.4
zod-to-json-schema: 3.22.4(zod@3.22.4)
zod: 3.23.4
zod-to-json-schema: 3.23.0(zod@3.23.4)
transitivePeerDependencies:
- '@aws-crypto/sha256-js'
- '@aws-sdk/client-bedrock-agent-runtime'
@ -5009,6 +5071,16 @@ packages:
source-map-js: 1.0.2
dev: false
/posthog-node@4.0.1:
resolution: {integrity: sha512-rtqm2h22QxLGBrW2bLYzbRhliIrqgZ0k+gF0LkQ1SNdeD06YE5eilV0MxZppFSxC8TfH0+B0cWCuebEnreIDgQ==}
engines: {node: '>=15.0.0'}
dependencies:
axios: 1.6.7
rusha: 0.8.14
transitivePeerDependencies:
- debug
dev: false
/prelude-ls@1.1.2:
resolution: {integrity: sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==}
engines: {node: '>= 0.8.0'}
@ -5044,7 +5116,7 @@ packages:
sbd: 1.0.19
typescript: 5.4.5
uuid: 9.0.1
zod: 3.22.4
zod: 3.23.4
transitivePeerDependencies:
- debug
dev: false
@ -5225,6 +5297,11 @@ packages:
resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==}
engines: {node: '>=0.10.0'}
/require-from-string@2.0.2:
resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==}
engines: {node: '>=0.10.0'}
dev: false
/resolve-cwd@3.0.0:
resolution: {integrity: sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==}
engines: {node: '>=8'}
@ -5266,6 +5343,10 @@ packages:
engines: {node: '>=10.0.0'}
dev: false
/rusha@0.8.14:
resolution: {integrity: sha512-cLgakCUf6PedEu15t8kbsjnwIFFR2D4RfL+W3iWFJ4iac7z4B0ZI8fxy4R3J956kAI68HclCFGL8MPoUVC3qVA==}
dev: false
/safe-buffer@5.2.1:
resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==}
@ -5931,6 +6012,12 @@ packages:
picocolors: 1.0.0
dev: true
/uri-js@4.4.1:
resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==}
dependencies:
punycode: 2.3.1
dev: false
/urlpattern-polyfill@10.0.0:
resolution: {integrity: sha512-H/A06tKD7sS1O1X2SshBVeA5FLycRpjqiBeqGKmBwBDBy28EnRjORxTNe269KSSr5un5qyWi1iL61wLxpd+ZOg==}
dev: false
@ -6160,14 +6247,18 @@ packages:
engines: {node: '>=10'}
dev: true
/zod-to-json-schema@3.22.4(zod@3.22.4):
resolution: {integrity: sha512-2Ed5dJ+n/O3cU383xSY28cuVi0BCQhF8nYqWU5paEpl7fVdqdAmiLdqLyfblbNdfOFwFfi/mqU4O1pwc60iBhQ==}
/zod-to-json-schema@3.23.0(zod@3.23.4):
resolution: {integrity: sha512-az0uJ243PxsRIa2x1WmNE/pnuA05gUq/JB8Lwe1EDCCL/Fz9MgjYQ0fPlyc2Tcv6aF2ZA7WM5TWaRZVEFaAIag==}
peerDependencies:
zod: ^3.22.4
zod: ^3.23.3
dependencies:
zod: 3.22.4
zod: 3.23.4
dev: false
/zod@3.22.4:
resolution: {integrity: sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg==}
dev: false
/zod@3.23.4:
resolution: {integrity: sha512-/AtWOKbBgjzEYYQRNfoGKHObgfAZag6qUJX1VbHo2PRBgS+wfWagEY2mizjfyAPcGesrJOcx/wcl0L9WnVrHFw==}
dev: false

View File

@ -13,15 +13,24 @@ GET http://localhost:3002/v0/jobs/active HTTP/1.1
### Scrape Website
POST https://api.firecrawl.dev/v0/scrape HTTP/1.1
POST http://localhost:3002/v0/crawl HTTP/1.1
Authorization: Bearer
content-type: application/json
{
"url":"https://www.mendable.ai"
"url":"https://www.mendable.ai",
"crawlerOptions": {
"returnOnlyUrls": true
}
}
### Scrape Website
POST http://localhost:3002/v0/scrape HTTP/1.1
Authorization: Bearer
@ -34,7 +43,7 @@ content-type: application/json
### Check Job Status
GET http://localhost:3002/v0/crawl/status/333ab225-dc3e-418b-9d4b-8fb833cbaf89 HTTP/1.1
GET http://localhost:3002/v0/crawl/status/a6053912-d602-4709-841f-3d2cb46fea0a HTTP/1.1
Authorization: Bearer
### Get Job Result
@ -49,4 +58,13 @@ content-type: application/json
### Check Job Status
GET https://api.firecrawl.dev/v0/crawl/status/cfcb71ac-23a3-4da5-bd85-d4e58b871d66
Authorization: Bearer
Authorization: Bearer
### Get Active Jobs Count
GET http://localhost:3002/serverHealthCheck
content-type: application/json
### Notify Server Health Check
GET http://localhost:3002/serverHealthCheck/notify
content-type: application/json

View File

@ -1,179 +0,0 @@
import request from 'supertest';
import { app } from '../../index';
import dotenv from 'dotenv';
dotenv.config();
const TEST_URL = 'http://localhost:3002'
describe('E2E Tests for API Routes', () => {
describe('GET /', () => {
it('should return Hello, world! message', async () => {
const response = await request(TEST_URL).get('/');
expect(response.statusCode).toBe(200);
expect(response.text).toContain('SCRAPERS-JS: Hello, world! Fly.io');
});
});
describe('GET /test', () => {
it('should return Hello, world! message', async () => {
const response = await request(TEST_URL).get('/test');
expect(response.statusCode).toBe(200);
expect(response.text).toContain('Hello, world!');
});
});
describe('POST /v0/scrape', () => {
it('should require authorization', async () => {
const response = await request(app).post('/v0/scrape');
expect(response.statusCode).toBe(401);
});
it('should return an error response with an invalid API key', async () => {
const response = await request(TEST_URL)
.post('/v0/scrape')
.set('Authorization', `Bearer invalid-api-key`)
.set('Content-Type', 'application/json')
.send({ url: 'https://firecrawl.dev' });
expect(response.statusCode).toBe(401);
});
it('should return a successful response with a valid preview token', async () => {
const response = await request(TEST_URL)
.post('/v0/scrape')
.set('Authorization', `Bearer this_is_just_a_preview_token`)
.set('Content-Type', 'application/json')
.send({ url: 'https://firecrawl.dev' });
expect(response.statusCode).toBe(200);
});
it('should return a successful response with a valid API key', async () => {
const response = await request(TEST_URL)
.post('/v0/scrape')
.set('Authorization', `Bearer ${process.env.TEST_API_KEY}`)
.set('Content-Type', 'application/json')
.send({ url: 'https://firecrawl.dev' });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty('data');
expect(response.body.data).toHaveProperty('content');
expect(response.body.data).toHaveProperty('markdown');
expect(response.body.data).toHaveProperty('metadata');
expect(response.body.data.content).toContain('🔥 FireCrawl');
}, 30000); // 30 seconds timeout
});
describe('POST /v0/crawl', () => {
it('should require authorization', async () => {
const response = await request(TEST_URL).post('/v0/crawl');
expect(response.statusCode).toBe(401);
});
it('should return an error response with an invalid API key', async () => {
const response = await request(TEST_URL)
.post('/v0/crawl')
.set('Authorization', `Bearer invalid-api-key`)
.set('Content-Type', 'application/json')
.send({ url: 'https://firecrawl.dev' });
expect(response.statusCode).toBe(401);
});
it('should return a successful response with a valid API key', async () => {
const response = await request(TEST_URL)
.post('/v0/crawl')
.set('Authorization', `Bearer ${process.env.TEST_API_KEY}`)
.set('Content-Type', 'application/json')
.send({ url: 'https://firecrawl.dev' });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty('jobId');
expect(response.body.jobId).toMatch(/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$/);
});
// Additional tests for insufficient credits?
});
describe('POST /v0/crawlWebsitePreview', () => {
it('should require authorization', async () => {
const response = await request(TEST_URL).post('/v0/crawlWebsitePreview');
expect(response.statusCode).toBe(401);
});
it('should return an error response with an invalid API key', async () => {
const response = await request(TEST_URL)
.post('/v0/crawlWebsitePreview')
.set('Authorization', `Bearer invalid-api-key`)
.set('Content-Type', 'application/json')
.send({ url: 'https://firecrawl.dev' });
expect(response.statusCode).toBe(401);
});
it('should return a successful response with a valid API key', async () => {
const response = await request(TEST_URL)
.post('/v0/crawlWebsitePreview')
.set('Authorization', `Bearer this_is_just_a_preview_token`)
.set('Content-Type', 'application/json')
.send({ url: 'https://firecrawl.dev' });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty('jobId');
expect(response.body.jobId).toMatch(/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$/);
});
});
describe('GET /v0/crawl/status/:jobId', () => {
it('should require authorization', async () => {
const response = await request(TEST_URL).get('/v0/crawl/status/123');
expect(response.statusCode).toBe(401);
});
it('should return an error response with an invalid API key', async () => {
const response = await request(TEST_URL)
.get('/v0/crawl/status/123')
.set('Authorization', `Bearer invalid-api-key`);
expect(response.statusCode).toBe(401);
});
it('should return Job not found for invalid job ID', async () => {
const response = await request(TEST_URL)
.get('/v0/crawl/status/invalidJobId')
.set('Authorization', `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(404);
});
it('should return a successful response for a valid crawl job', async () => {
const crawlResponse = await request(TEST_URL)
.post('/v0/crawl')
.set('Authorization', `Bearer ${process.env.TEST_API_KEY}`)
.set('Content-Type', 'application/json')
.send({ url: 'https://firecrawl.dev' });
expect(crawlResponse.statusCode).toBe(200);
const response = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set('Authorization', `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty('status');
expect(response.body.status).toBe('active');
// wait for 30 seconds
await new Promise((r) => setTimeout(r, 30000));
const completedResponse = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set('Authorization', `Bearer ${process.env.TEST_API_KEY}`);
expect(completedResponse.statusCode).toBe(200);
expect(completedResponse.body).toHaveProperty('status');
expect(completedResponse.body.status).toBe('completed');
expect(completedResponse.body).toHaveProperty('data');
expect(completedResponse.body.data[0]).toHaveProperty('content');
expect(completedResponse.body.data[0]).toHaveProperty('markdown');
expect(completedResponse.body.data[0]).toHaveProperty('metadata');
expect(completedResponse.body.data[0].content).toContain('🔥 FireCrawl');
}, 60000); // 60 seconds
});
describe('GET /is-production', () => {
it('should return the production status', async () => {
const response = await request(TEST_URL).get('/is-production');
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty('isProduction');
});
});
});

View File

@ -0,0 +1,216 @@
import request from "supertest";
import { app } from "../../index";
import dotenv from "dotenv";
const fs = require("fs");
const path = require("path");
dotenv.config();
const TEST_URL = "http://127.0.0.1:3002";
describe("E2E Tests for API Routes with No Authentication", () => {
let originalEnv: NodeJS.ProcessEnv;
// save original process.env
beforeAll(() => {
originalEnv = { ...process.env };
process.env.USE_DB_AUTHENTICATION = "false";
process.env.SUPABASE_ANON_TOKEN = "";
process.env.SUPABASE_URL = "";
process.env.SUPABASE_SERVICE_TOKEN = "";
process.env.SCRAPING_BEE_API_KEY = "";
process.env.OPENAI_API_KEY = "";
process.env.BULL_AUTH_KEY = "";
process.env.LOGTAIL_KEY = "";
process.env.PLAYWRIGHT_MICROSERVICE_URL = "";
process.env.LLAMAPARSE_API_KEY = "";
process.env.TEST_API_KEY = "";
process.env.POSTHOG_API_KEY = "";
process.env.POSTHOG_HOST = "";
});
// restore original process.env
afterAll(() => {
process.env = originalEnv;
});
describe("GET /", () => {
it("should return Hello, world! message", async () => {
const response = await request(TEST_URL).get("/");
expect(response.statusCode).toBe(200);
expect(response.text).toContain("SCRAPERS-JS: Hello, world! Fly.io");
});
});
describe("GET /test", () => {
it("should return Hello, world! message", async () => {
const response = await request(TEST_URL).get("/test");
expect(response.statusCode).toBe(200);
expect(response.text).toContain("Hello, world!");
});
});
describe("POST /v0/scrape", () => {
it("should not require authorization", async () => {
const response = await request(TEST_URL).post("/v0/scrape");
expect(response.statusCode).not.toBe(401);
});
it("should return an error for a blocklisted URL without requiring authorization", async () => {
const blocklistedUrl = "https://facebook.com/fake-test";
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Content-Type", "application/json")
.send({ url: blocklistedUrl });
expect(response.statusCode).toBe(403);
expect(response.body.error).toContain("Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it.");
});
it("should return a successful response", async () => {
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(200);
}, 10000); // 10 seconds timeout
});
describe("POST /v0/crawl", () => {
it("should not require authorization", async () => {
const response = await request(TEST_URL).post("/v0/crawl");
expect(response.statusCode).not.toBe(401);
});
it("should return an error for a blocklisted URL", async () => {
const blocklistedUrl = "https://twitter.com/fake-test";
const response = await request(TEST_URL)
.post("/v0/crawl")
.set("Content-Type", "application/json")
.send({ url: blocklistedUrl });
expect(response.statusCode).toBe(403);
expect(response.body.error).toContain("Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it.");
});
it("should return a successful response", async () => {
const response = await request(TEST_URL)
.post("/v0/crawl")
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("jobId");
expect(response.body.jobId).toMatch(
/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$/
);
});
});
describe("POST /v0/crawlWebsitePreview", () => {
it("should not require authorization", async () => {
const response = await request(TEST_URL).post("/v0/crawlWebsitePreview");
expect(response.statusCode).not.toBe(401);
});
it("should return an error for a blocklisted URL", async () => {
const blocklistedUrl = "https://instagram.com/fake-test";
const response = await request(TEST_URL)
.post("/v0/crawlWebsitePreview")
.set("Content-Type", "application/json")
.send({ url: blocklistedUrl });
expect(response.statusCode).toBe(403);
expect(response.body.error).toContain("Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it.");
});
it("should return a successful response", async () => {
const response = await request(TEST_URL)
.post("/v0/crawlWebsitePreview")
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("jobId");
expect(response.body.jobId).toMatch(
/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$/
);
});
});
describe("POST /v0/search", () => {
it("should require not authorization", async () => {
const response = await request(TEST_URL).post("/v0/search");
expect(response.statusCode).not.toBe(401);
});
it("should return no error response with an invalid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/search")
.set("Authorization", `Bearer invalid-api-key`)
.set("Content-Type", "application/json")
.send({ query: "test" });
expect(response.statusCode).not.toBe(401);
});
it("should return a successful response without a valid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/search")
.set("Content-Type", "application/json")
.send({ query: "test" });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("success");
expect(response.body.success).toBe(true);
expect(response.body).toHaveProperty("data");
}, 20000);
});
describe("GET /v0/crawl/status/:jobId", () => {
it("should not require authorization", async () => {
const response = await request(TEST_URL).get("/v0/crawl/status/123");
expect(response.statusCode).not.toBe(401);
});
it("should return Job not found for invalid job ID", async () => {
const response = await request(TEST_URL).get(
"/v0/crawl/status/invalidJobId"
);
expect(response.statusCode).toBe(404);
});
it("should return a successful response for a valid crawl job", async () => {
const crawlResponse = await request(TEST_URL)
.post("/v0/crawl")
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(crawlResponse.statusCode).toBe(200);
const response = await request(TEST_URL).get(
`/v0/crawl/status/${crawlResponse.body.jobId}`
);
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("status");
expect(response.body.status).toBe("active");
// wait for 30 seconds
await new Promise((r) => setTimeout(r, 30000));
const completedResponse = await request(TEST_URL).get(
`/v0/crawl/status/${crawlResponse.body.jobId}`
);
expect(completedResponse.statusCode).toBe(200);
expect(completedResponse.body).toHaveProperty("status");
expect(completedResponse.body.status).toBe("completed");
expect(completedResponse.body).toHaveProperty("data");
expect(completedResponse.body.data[0]).toHaveProperty("content");
expect(completedResponse.body.data[0]).toHaveProperty("markdown");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
}, 60000); // 60 seconds
});
describe("GET /is-production", () => {
it("should return the production status", async () => {
const response = await request(TEST_URL).get("/is-production");
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("isProduction");
});
});
});

View File

@ -0,0 +1,521 @@
import request from "supertest";
import { app } from "../../index";
import dotenv from "dotenv";
dotenv.config();
// const TEST_URL = 'http://localhost:3002'
const TEST_URL = "http://127.0.0.1:3002";
describe("E2E Tests for API Routes", () => {
beforeAll(() => {
process.env.USE_DB_AUTHENTICATION = "true";
});
afterAll(() => {
delete process.env.USE_DB_AUTHENTICATION;
});
describe("GET /", () => {
it("should return Hello, world! message", async () => {
const response = await request(TEST_URL).get("/");
expect(response.statusCode).toBe(200);
expect(response.text).toContain("SCRAPERS-JS: Hello, world! Fly.io");
});
});
describe("GET /test", () => {
it("should return Hello, world! message", async () => {
const response = await request(TEST_URL).get("/test");
expect(response.statusCode).toBe(200);
expect(response.text).toContain("Hello, world!");
});
});
describe("POST /v0/scrape", () => {
it("should require authorization", async () => {
const response = await request(app).post("/v0/scrape");
expect(response.statusCode).toBe(401);
});
it("should return an error response with an invalid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Authorization", `Bearer invalid-api-key`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(401);
});
it("should return an error for a blocklisted URL", async () => {
const blocklistedUrl = "https://facebook.com/fake-test";
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: blocklistedUrl });
expect(response.statusCode).toBe(403);
expect(response.body.error).toContain(
"Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it."
);
});
it("should return a successful response with a valid preview token", async () => {
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Authorization", `Bearer this_is_just_a_preview_token`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(200);
}, 10000); // 10 seconds timeout
it("should return a successful response with a valid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("data");
expect(response.body.data).toHaveProperty("content");
expect(response.body.data).toHaveProperty("markdown");
expect(response.body.data).toHaveProperty("metadata");
expect(response.body.data).not.toHaveProperty("html");
expect(response.body.data.content).toContain("🔥 FireCrawl");
}, 30000); // 30 seconds timeout
it("should return a successful response with a valid API key and includeHtml set to true", async () => {
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({
url: "https://firecrawl.dev",
pageOptions: { includeHtml: true },
});
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("data");
expect(response.body.data).toHaveProperty("content");
expect(response.body.data).toHaveProperty("markdown");
expect(response.body.data).toHaveProperty("html");
expect(response.body.data).toHaveProperty("metadata");
expect(response.body.data.content).toContain("🔥 FireCrawl");
expect(response.body.data.markdown).toContain("🔥 FireCrawl");
expect(response.body.data.html).toContain("<h1");
}, 30000); // 30 seconds timeout
});
describe("POST /v0/crawl", () => {
it("should require authorization", async () => {
const response = await request(TEST_URL).post("/v0/crawl");
expect(response.statusCode).toBe(401);
});
it("should return an error response with an invalid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer invalid-api-key`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(401);
});
it("should return an error for a blocklisted URL", async () => {
const blocklistedUrl = "https://twitter.com/fake-test";
const response = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: blocklistedUrl });
expect(response.statusCode).toBe(403);
expect(response.body.error).toContain(
"Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it."
);
});
it("should return a successful response with a valid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("jobId");
expect(response.body.jobId).toMatch(
/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$/
);
});
// Additional tests for insufficient credits?
});
describe("POST /v0/crawlWebsitePreview", () => {
it("should require authorization", async () => {
const response = await request(TEST_URL).post("/v0/crawlWebsitePreview");
expect(response.statusCode).toBe(401);
});
it("should return an error response with an invalid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/crawlWebsitePreview")
.set("Authorization", `Bearer invalid-api-key`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(401);
});
// it("should return an error for a blocklisted URL", async () => {
// const blocklistedUrl = "https://instagram.com/fake-test";
// const response = await request(TEST_URL)
// .post("/v0/crawlWebsitePreview")
// .set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
// .set("Content-Type", "application/json")
// .send({ url: blocklistedUrl });
// // is returning 429 instead of 403
// expect(response.statusCode).toBe(403);
// expect(response.body.error).toContain("Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it.");
// });
it("should return a timeout error when scraping takes longer than the specified timeout", async () => {
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev", timeout: 1000 });
expect(response.statusCode).toBe(408);
}, 3000);
it("should return a successful response with a valid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/crawlWebsitePreview")
.set("Authorization", `Bearer this_is_just_a_preview_token`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("jobId");
expect(response.body.jobId).toMatch(
/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$/
);
});
});
describe("POST /v0/search", () => {
it("should require authorization", async () => {
const response = await request(TEST_URL).post("/v0/search");
expect(response.statusCode).toBe(401);
});
it("should return an error response with an invalid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/search")
.set("Authorization", `Bearer invalid-api-key`)
.set("Content-Type", "application/json")
.send({ query: "test" });
expect(response.statusCode).toBe(401);
});
it("should return a successful response with a valid API key", async () => {
const response = await request(TEST_URL)
.post("/v0/search")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ query: "test" });
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("success");
expect(response.body.success).toBe(true);
expect(response.body).toHaveProperty("data");
}, 30000); // 30 seconds timeout
});
describe("GET /v0/crawl/status/:jobId", () => {
it("should require authorization", async () => {
const response = await request(TEST_URL).get("/v0/crawl/status/123");
expect(response.statusCode).toBe(401);
});
it("should return an error response with an invalid API key", async () => {
const response = await request(TEST_URL)
.get("/v0/crawl/status/123")
.set("Authorization", `Bearer invalid-api-key`);
expect(response.statusCode).toBe(401);
});
it("should return Job not found for invalid job ID", async () => {
const response = await request(TEST_URL)
.get("/v0/crawl/status/invalidJobId")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(404);
});
it("should return a successful response for a valid crawl job", async () => {
const crawlResponse = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev" });
expect(crawlResponse.statusCode).toBe(200);
const response = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("status");
expect(response.body.status).toBe("active");
// wait for 30 seconds
await new Promise((r) => setTimeout(r, 30000));
const completedResponse = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(completedResponse.statusCode).toBe(200);
expect(completedResponse.body).toHaveProperty("status");
expect(completedResponse.body.status).toBe("completed");
expect(completedResponse.body).toHaveProperty("data");
expect(completedResponse.body.data[0]).toHaveProperty("content");
expect(completedResponse.body.data[0]).toHaveProperty("markdown");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
expect(completedResponse.body.data[0].content).toContain("🔥 FireCrawl");
}, 60000); // 60 seconds
it("should return a successful response with max depth option for a valid crawl job", async () => {
const crawlResponse = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({
url: "https://www.scrapethissite.com",
crawlerOptions: { maxDepth: 2 },
});
expect(crawlResponse.statusCode).toBe(200);
const response = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("status");
expect(response.body.status).toBe("active");
// wait for 60 seconds
await new Promise((r) => setTimeout(r, 60000));
const completedResponse = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(completedResponse.statusCode).toBe(200);
expect(completedResponse.body).toHaveProperty("status");
expect(completedResponse.body.status).toBe("completed");
expect(completedResponse.body).toHaveProperty("data");
expect(completedResponse.body.data[0]).toHaveProperty("content");
expect(completedResponse.body.data[0]).toHaveProperty("markdown");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
const urls = completedResponse.body.data.map(
(item: any) => item.metadata?.sourceURL
);
expect(urls.length).toBeGreaterThan(1);
// Check if all URLs have a maximum depth of 1
urls.forEach((url) => {
const depth = new URL(url).pathname.split("/").filter(Boolean).length;
expect(depth).toBeLessThanOrEqual(1);
});
}, 120000);
it("should return a successful response for a valid crawl job with includeHtml set to true option", async () => {
const crawlResponse = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({
url: "https://firecrawl.dev",
pageOptions: { includeHtml: true },
});
expect(crawlResponse.statusCode).toBe(200);
const response = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("status");
expect(response.body.status).toBe("active");
// wait for 30 seconds
await new Promise((r) => setTimeout(r, 30000));
const completedResponse = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(completedResponse.statusCode).toBe(200);
expect(completedResponse.body).toHaveProperty("status");
expect(completedResponse.body.status).toBe("completed");
expect(completedResponse.body).toHaveProperty("data");
expect(completedResponse.body.data[0]).toHaveProperty("content");
expect(completedResponse.body.data[0]).toHaveProperty("markdown");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
// 120 seconds
expect(completedResponse.body.data[0]).toHaveProperty("html");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
expect(completedResponse.body.data[0].content).toContain("🔥 FireCrawl");
expect(completedResponse.body.data[0].markdown).toContain("FireCrawl");
expect(completedResponse.body.data[0].html).toContain("<h1");
}, 60000);
}); // 60 seconds
it("If someone cancels a crawl job, it should turn into failed status", async () => {
const crawlResponse = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: "https://jestjs.io" });
expect(crawlResponse.statusCode).toBe(200);
// wait for 30 seconds
await new Promise((r) => setTimeout(r, 10000));
const response = await request(TEST_URL)
.delete(`/v0/crawl/cancel/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("status");
expect(response.body.status).toBe("cancelled");
await new Promise((r) => setTimeout(r, 20000));
const completedResponse = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(completedResponse.statusCode).toBe(200);
expect(completedResponse.body).toHaveProperty("status");
expect(completedResponse.body.status).toBe("failed");
expect(completedResponse.body).toHaveProperty("data");
expect(completedResponse.body.data).toEqual(null);
expect(completedResponse.body).toHaveProperty("partial_data");
expect(completedResponse.body.partial_data[0]).toHaveProperty("content");
expect(completedResponse.body.partial_data[0]).toHaveProperty("markdown");
expect(completedResponse.body.partial_data[0]).toHaveProperty("metadata");
}, 60000); // 60 seconds
describe("POST /v0/scrape with LLM Extraction", () => {
it("should extract data using LLM extraction mode", async () => {
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({
url: "https://mendable.ai",
pageOptions: {
onlyMainContent: true,
},
extractorOptions: {
mode: "llm-extraction",
extractionPrompt:
"Based on the information on the page, find what the company's mission is and whether it supports SSO, and whether it is open source",
extractionSchema: {
type: "object",
properties: {
company_mission: {
type: "string",
},
supports_sso: {
type: "boolean",
},
is_open_source: {
type: "boolean",
},
},
required: ["company_mission", "supports_sso", "is_open_source"],
},
},
});
// Ensure that the job was successfully created before proceeding with LLM extraction
expect(response.statusCode).toBe(200);
// Assuming the LLM extraction object is available in the response body under `data.llm_extraction`
let llmExtraction = response.body.data.llm_extraction;
// Check if the llm_extraction object has the required properties with correct types and values
expect(llmExtraction).toHaveProperty("company_mission");
expect(typeof llmExtraction.company_mission).toBe("string");
expect(llmExtraction).toHaveProperty("supports_sso");
expect(llmExtraction.supports_sso).toBe(true);
expect(typeof llmExtraction.supports_sso).toBe("boolean");
expect(llmExtraction).toHaveProperty("is_open_source");
expect(llmExtraction.is_open_source).toBe(false);
expect(typeof llmExtraction.is_open_source).toBe("boolean");
}, 60000); // 60 secs
});
// describe("POST /v0/scrape for Top 100 Companies", () => {
// it("should extract data for the top 100 companies", async () => {
// const response = await request(TEST_URL)
// .post("/v0/scrape")
// .set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
// .set("Content-Type", "application/json")
// .send({
// url: "https://companiesmarketcap.com/",
// pageOptions: {
// onlyMainContent: true
// },
// extractorOptions: {
// mode: "llm-extraction",
// extractionPrompt: "Extract the name, market cap, price, and today's change for the top 20 companies listed on the page.",
// extractionSchema: {
// type: "object",
// properties: {
// companies: {
// type: "array",
// items: {
// type: "object",
// properties: {
// rank: { type: "number" },
// name: { type: "string" },
// marketCap: { type: "string" },
// price: { type: "string" },
// todayChange: { type: "string" }
// },
// required: ["rank", "name", "marketCap", "price", "todayChange"]
// }
// }
// },
// required: ["companies"]
// }
// }
// });
// // Print the response body to the console for debugging purposes
// console.log("Response companies:", response.body.data.llm_extraction.companies);
// // Check if the response has the correct structure and data types
// expect(response.status).toBe(200);
// expect(Array.isArray(response.body.data.llm_extraction.companies)).toBe(true);
// expect(response.body.data.llm_extraction.companies.length).toBe(40);
// // Sample check for the first company
// const firstCompany = response.body.data.llm_extraction.companies[0];
// expect(firstCompany).toHaveProperty("name");
// expect(typeof firstCompany.name).toBe("string");
// expect(firstCompany).toHaveProperty("marketCap");
// expect(typeof firstCompany.marketCap).toBe("string");
// expect(firstCompany).toHaveProperty("price");
// expect(typeof firstCompany.price).toBe("string");
// expect(firstCompany).toHaveProperty("todayChange");
// expect(typeof firstCompany.todayChange).toBe("string");
// }, 120000); // 120 secs
// });
describe("GET /is-production", () => {
it("should return the production status", async () => {
const response = await request(TEST_URL).get("/is-production");
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("isProduction");
});
});
});

View File

@ -0,0 +1,84 @@
import { parseApi } from "../../src/lib/parseApi";
import { getRateLimiter } from "../../src/services/rate-limiter";
import { AuthResponse, RateLimiterMode } from "../../src/types";
import { supabase_service } from "../../src/services/supabase";
import { withAuth } from "../../src/lib/withAuth";
export async function authenticateUser(req, res, mode?: RateLimiterMode) : Promise<AuthResponse> {
return withAuth(supaAuthenticateUser)(req, res, mode);
}
export async function supaAuthenticateUser(
req,
res,
mode?: RateLimiterMode
): Promise<{
success: boolean;
team_id?: string;
error?: string;
status?: number;
}> {
const authHeader = req.headers.authorization;
if (!authHeader) {
return { success: false, error: "Unauthorized", status: 401 };
}
const token = authHeader.split(" ")[1]; // Extract the token from "Bearer <token>"
if (!token) {
return {
success: false,
error: "Unauthorized: Token missing",
status: 401,
};
}
try {
const incomingIP = (req.headers["x-forwarded-for"] ||
req.socket.remoteAddress) as string;
const iptoken = incomingIP + token;
await getRateLimiter(
token === "this_is_just_a_preview_token" ? RateLimiterMode.Preview : mode, token
).consume(iptoken);
} catch (rateLimiterRes) {
console.error(rateLimiterRes);
return {
success: false,
error: "Rate limit exceeded. Too many requests, try again in 1 minute.",
status: 429,
};
}
if (
token === "this_is_just_a_preview_token" &&
(mode === RateLimiterMode.Scrape || mode === RateLimiterMode.Preview || mode === RateLimiterMode.Search)
) {
return { success: true, team_id: "preview" };
// check the origin of the request and make sure its from firecrawl.dev
// const origin = req.headers.origin;
// if (origin && origin.includes("firecrawl.dev")){
// return { success: true, team_id: "preview" };
// }
// if(process.env.ENV !== "production") {
// return { success: true, team_id: "preview" };
// }
// return { success: false, error: "Unauthorized: Invalid token", status: 401 };
}
const normalizedApi = parseApi(token);
// make sure api key is valid, based on the api_keys table in supabase
const { data, error } = await supabase_service
.from("api_keys")
.select("*")
.eq("key", normalizedApi);
if (error || !data || data.length === 0) {
return {
success: false,
error: "Unauthorized: Invalid token",
status: 401,
};
}
return { success: true, team_id: data[0].team_id };
}

View File

@ -0,0 +1,62 @@
import { Request, Response } from "express";
import { authenticateUser } from "./auth";
import { RateLimiterMode } from "../../src/types";
import { addWebScraperJob } from "../../src/services/queue-jobs";
import { getWebScraperQueue } from "../../src/services/queue-service";
import { supabase_service } from "../../src/services/supabase";
import { billTeam } from "../../src/services/billing/credit_billing";
export async function crawlCancelController(req: Request, res: Response) {
try {
const { success, team_id, error, status } = await authenticateUser(
req,
res,
RateLimiterMode.CrawlStatus
);
if (!success) {
return res.status(status).json({ error });
}
const job = await getWebScraperQueue().getJob(req.params.jobId);
if (!job) {
return res.status(404).json({ error: "Job not found" });
}
// check if the job belongs to the team
const { data, error: supaError } = await supabase_service
.from("bulljobs_teams")
.select("*")
.eq("job_id", req.params.jobId)
.eq("team_id", team_id);
if (supaError) {
return res.status(500).json({ error: supaError.message });
}
if (data.length === 0) {
return res.status(403).json({ error: "Unauthorized" });
}
const jobState = await job.getState();
const { partialDocs } = await job.progress();
if (partialDocs && partialDocs.length > 0 && jobState === "active") {
console.log("Billing team for partial docs...");
// Note: the credits that we will bill them here might be lower than the actual
// due to promises that are not yet resolved
await billTeam(team_id, partialDocs.length);
}
try {
await job.moveToFailed(Error("Job cancelled by user"), true);
} catch (error) {
console.error(error);
}
const newJobState = await job.getState();
res.json({
status: newJobState === "failed" ? "cancelled" : "Cancelling...",
});
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
}

View File

@ -0,0 +1,37 @@
import { Request, Response } from "express";
import { authenticateUser } from "./auth";
import { RateLimiterMode } from "../../src/types";
import { addWebScraperJob } from "../../src/services/queue-jobs";
import { getWebScraperQueue } from "../../src/services/queue-service";
export async function crawlStatusController(req: Request, res: Response) {
try {
const { success, team_id, error, status } = await authenticateUser(
req,
res,
RateLimiterMode.CrawlStatus
);
if (!success) {
return res.status(status).json({ error });
}
const job = await getWebScraperQueue().getJob(req.params.jobId);
if (!job) {
return res.status(404).json({ error: "Job not found" });
}
const { current, current_url, total, current_step, partialDocs } = await job.progress();
res.json({
status: await job.getState(),
// progress: job.progress(),
current: current,
current_url: current_url,
current_step: current_step,
total: total,
data: job.returnvalue,
partial_data: partialDocs ?? [],
});
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
}

View File

@ -0,0 +1,92 @@
import { Request, Response } from "express";
import { WebScraperDataProvider } from "../../src/scraper/WebScraper";
import { billTeam } from "../../src/services/billing/credit_billing";
import { checkTeamCredits } from "../../src/services/billing/credit_billing";
import { authenticateUser } from "./auth";
import { RateLimiterMode } from "../../src/types";
import { addWebScraperJob } from "../../src/services/queue-jobs";
import { isUrlBlocked } from "../../src/scraper/WebScraper/utils/blocklist";
import { logCrawl } from "../../src/services/logging/crawl_log";
export async function crawlController(req: Request, res: Response) {
try {
const { success, team_id, error, status } = await authenticateUser(
req,
res,
RateLimiterMode.Crawl
);
if (!success) {
return res.status(status).json({ error });
}
const { success: creditsCheckSuccess, message: creditsCheckMessage } =
await checkTeamCredits(team_id, 1);
if (!creditsCheckSuccess) {
return res.status(402).json({ error: "Insufficient credits" });
}
const url = req.body.url;
if (!url) {
return res.status(400).json({ error: "Url is required" });
}
if (isUrlBlocked(url)) {
return res
.status(403)
.json({
error:
"Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it.",
});
}
const mode = req.body.mode ?? "crawl";
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false, includeHtml: false };
if (mode === "single_urls" && !url.includes(",")) {
try {
const a = new WebScraperDataProvider();
await a.setOptions({
mode: "single_urls",
urls: [url],
crawlerOptions: {
returnOnlyUrls: true,
},
pageOptions: pageOptions,
});
const docs = await a.getDocuments(false, (progress) => {
job.progress({
current: progress.current,
total: progress.total,
current_step: "SCRAPING",
current_url: progress.currentDocumentUrl,
});
});
return res.json({
success: true,
documents: docs,
});
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
}
const job = await addWebScraperJob({
url: url,
mode: mode ?? "crawl", // fix for single urls not working
crawlerOptions: { ...crawlerOptions },
team_id: team_id,
pageOptions: pageOptions,
origin: req.body.origin ?? "api",
});
await logCrawl(job.id.toString(), team_id);
res.json({ jobId: job.id });
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
}

View File

@ -0,0 +1,45 @@
import { Request, Response } from "express";
import { authenticateUser } from "./auth";
import { RateLimiterMode } from "../../src/types";
import { addWebScraperJob } from "../../src/services/queue-jobs";
import { isUrlBlocked } from "../../src/scraper/WebScraper/utils/blocklist";
export async function crawlPreviewController(req: Request, res: Response) {
try {
const { success, team_id, error, status } = await authenticateUser(
req,
res,
RateLimiterMode.Preview
);
if (!success) {
return res.status(status).json({ error });
}
// authenticate on supabase
const url = req.body.url;
if (!url) {
return res.status(400).json({ error: "Url is required" });
}
if (isUrlBlocked(url)) {
return res.status(403).json({ error: "Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it." });
}
const mode = req.body.mode ?? "crawl";
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false, includeHtml: false };
const job = await addWebScraperJob({
url: url,
mode: mode ?? "crawl", // fix for single urls not working
crawlerOptions: { ...crawlerOptions, limit: 5, maxCrawledLinks: 5 },
team_id: "preview",
pageOptions: pageOptions,
origin: "website-preview",
});
res.json({ jobId: job.id });
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
}

View File

@ -0,0 +1,24 @@
import { AuthResponse, RateLimiterMode } from "../types";
import { Request, Response } from "express";
import { authenticateUser } from "./auth";
export const keyAuthController = async (req: Request, res: Response) => {
try {
// make sure to authenticate user first, Bearer <token>
const { success, team_id, error, status } = await authenticateUser(
req,
res
);
if (!success) {
return res.status(status).json({ error });
}
// if success, return success: true
return res.status(200).json({ success: true });
} catch (error) {
return res.status(500).json({ error: error.message });
}
};

View File

@ -0,0 +1,155 @@
import { ExtractorOptions, PageOptions } from './../lib/entities';
import { Request, Response } from "express";
import { WebScraperDataProvider } from "../scraper/WebScraper";
import { billTeam, checkTeamCredits } from "../services/billing/credit_billing";
import { authenticateUser } from "./auth";
import { RateLimiterMode } from "../types";
import { logJob } from "../services/logging/log_job";
import { Document } from "../lib/entities";
import { isUrlBlocked } from "../scraper/WebScraper/utils/blocklist"; // Import the isUrlBlocked function
import { numTokensFromString } from '../lib/LLM-extraction/helpers';
export async function scrapeHelper(
req: Request,
team_id: string,
crawlerOptions: any,
pageOptions: PageOptions,
extractorOptions: ExtractorOptions,
timeout: number
): Promise<{
success: boolean;
error?: string;
data?: Document;
returnCode: number;
}> {
const url = req.body.url;
if (!url) {
return { success: false, error: "Url is required", returnCode: 400 };
}
if (isUrlBlocked(url)) {
return { success: false, error: "Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it.", returnCode: 403 };
}
const a = new WebScraperDataProvider();
await a.setOptions({
mode: "single_urls",
urls: [url],
crawlerOptions: {
...crawlerOptions,
},
pageOptions: pageOptions,
extractorOptions: extractorOptions,
});
const timeoutPromise = new Promise<{ success: boolean; error?: string; returnCode: number }>((_, reject) =>
setTimeout(() => reject({ success: false, error: "Request timed out. Increase the timeout by passing `timeout` param to the request.", returnCode: 408 }), timeout)
);
const docsPromise = a.getDocuments(false);
let docs;
try {
docs = await Promise.race([docsPromise, timeoutPromise]);
} catch (error) {
return error;
}
// make sure doc.content is not empty
const filteredDocs = docs.filter(
(doc: { content?: string }) => doc.content && doc.content.trim().length > 0
);
if (filteredDocs.length === 0) {
return { success: true, error: "No page found", returnCode: 200 };
}
let creditsToBeBilled = filteredDocs.length;
const creditsPerLLMExtract = 5;
if (extractorOptions.mode === "llm-extraction") {
creditsToBeBilled = creditsToBeBilled + (creditsPerLLMExtract * filteredDocs.length);
}
const billingResult = await billTeam(
team_id,
creditsToBeBilled
);
if (!billingResult.success) {
return {
success: false,
error:
"Failed to bill team. Insufficient credits or subscription not found.",
returnCode: 402,
};
}
return {
success: true,
data: filteredDocs[0],
returnCode: 200,
};
}
export async function scrapeController(req: Request, res: Response) {
try {
// make sure to authenticate user first, Bearer <token>
const { success, team_id, error, status } = await authenticateUser(
req,
res,
RateLimiterMode.Scrape
);
if (!success) {
return res.status(status).json({ error });
}
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false, includeHtml: false };
const extractorOptions = req.body.extractorOptions ?? {
mode: "markdown"
}
const origin = req.body.origin ?? "api";
const timeout = req.body.timeout ?? 30000; // Default timeout of 30 seconds
try {
const { success: creditsCheckSuccess, message: creditsCheckMessage } =
await checkTeamCredits(team_id, 1);
if (!creditsCheckSuccess) {
return res.status(402).json({ error: "Insufficient credits" });
}
} catch (error) {
console.error(error);
return res.status(500).json({ error: "Internal server error" });
}
const startTime = new Date().getTime();
const result = await scrapeHelper(
req,
team_id,
crawlerOptions,
pageOptions,
extractorOptions,
timeout
);
const endTime = new Date().getTime();
const timeTakenInSeconds = (endTime - startTime) / 1000;
const numTokens = (result.data && result.data.markdown) ? numTokensFromString(result.data.markdown, "gpt-3.5-turbo") : 0;
logJob({
success: result.success,
message: result.error,
num_docs: 1,
docs: [result.data],
time_taken: timeTakenInSeconds,
team_id: team_id,
mode: "scrape",
url: req.body.url,
crawlerOptions: crawlerOptions,
pageOptions: pageOptions,
origin: origin,
extractor_options: extractorOptions,
num_tokens: numTokens,
});
return res.status(result.returnCode).json(result);
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
}

View File

@ -0,0 +1,168 @@
import { Request, Response } from "express";
import { WebScraperDataProvider } from "../scraper/WebScraper";
import { billTeam, checkTeamCredits } from "../services/billing/credit_billing";
import { authenticateUser } from "./auth";
import { RateLimiterMode } from "../types";
import { logJob } from "../services/logging/log_job";
import { PageOptions, SearchOptions } from "../lib/entities";
import { search } from "../search";
import { isUrlBlocked } from "../scraper/WebScraper/utils/blocklist";
export async function searchHelper(
req: Request,
team_id: string,
crawlerOptions: any,
pageOptions: PageOptions,
searchOptions: SearchOptions,
): Promise<{
success: boolean;
error?: string;
data?: any;
returnCode: number;
}> {
const query = req.body.query;
const advanced = false;
if (!query) {
return { success: false, error: "Query is required", returnCode: 400 };
}
const tbs = searchOptions.tbs ?? null;
const filter = searchOptions.filter ?? null;
let res = await search({
query: query,
advanced: advanced,
num_results: searchOptions.limit ?? 7,
tbs: tbs,
filter: filter,
lang: searchOptions.lang ?? "en",
country: searchOptions.country ?? "us",
location: searchOptions.location,
});
let justSearch = pageOptions.fetchPageContent === false;
if (justSearch) {
return { success: true, data: res, returnCode: 200 };
}
res = res.filter((r) => !isUrlBlocked(r.url));
if (res.length === 0) {
return { success: true, error: "No search results found", returnCode: 200 };
}
// filter out social media links
const a = new WebScraperDataProvider();
await a.setOptions({
mode: "single_urls",
urls: res.map((r) => r.url).slice(0, searchOptions.limit ?? 7),
crawlerOptions: {
...crawlerOptions,
},
pageOptions: {
...pageOptions,
onlyMainContent: pageOptions?.onlyMainContent ?? true,
fetchPageContent: pageOptions?.fetchPageContent ?? true,
includeHtml: pageOptions?.includeHtml ?? false,
fallback: false,
},
});
const docs = await a.getDocuments(false);
if (docs.length === 0) {
return { success: true, error: "No search results found", returnCode: 200 };
}
// make sure doc.content is not empty
const filteredDocs = docs.filter(
(doc: { content?: string }) => doc.content && doc.content.trim().length > 0
);
if (filteredDocs.length === 0) {
return { success: true, error: "No page found", returnCode: 200 };
}
const billingResult = await billTeam(
team_id,
filteredDocs.length
);
if (!billingResult.success) {
return {
success: false,
error:
"Failed to bill team. Insufficient credits or subscription not found.",
returnCode: 402,
};
}
return {
success: true,
data: filteredDocs,
returnCode: 200,
};
}
export async function searchController(req: Request, res: Response) {
try {
// make sure to authenticate user first, Bearer <token>
const { success, team_id, error, status } = await authenticateUser(
req,
res,
RateLimiterMode.Search
);
if (!success) {
return res.status(status).json({ error });
}
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? {
includeHtml: false,
onlyMainContent: true,
fetchPageContent: true,
fallback: false,
};
const origin = req.body.origin ?? "api";
const searchOptions = req.body.searchOptions ?? { limit: 7 };
try {
const { success: creditsCheckSuccess, message: creditsCheckMessage } =
await checkTeamCredits(team_id, 1);
if (!creditsCheckSuccess) {
return res.status(402).json({ error: "Insufficient credits" });
}
} catch (error) {
console.error(error);
return res.status(500).json({ error: "Internal server error" });
}
const startTime = new Date().getTime();
const result = await searchHelper(
req,
team_id,
crawlerOptions,
pageOptions,
searchOptions,
);
const endTime = new Date().getTime();
const timeTakenInSeconds = (endTime - startTime) / 1000;
logJob({
success: result.success,
message: result.error,
num_docs: result.data ? result.data.length : 0,
docs: result.data,
time_taken: timeTakenInSeconds,
team_id: team_id,
mode: "search",
url: req.body.query,
crawlerOptions: crawlerOptions,
pageOptions: pageOptions,
origin: origin,
});
return res.status(result.returnCode).json(result);
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
}

View File

@ -0,0 +1,26 @@
import { Request, Response } from "express";
import { getWebScraperQueue } from "../../src/services/queue-service";
export async function crawlJobStatusPreviewController(req: Request, res: Response) {
try {
const job = await getWebScraperQueue().getJob(req.params.jobId);
if (!job) {
return res.status(404).json({ error: "Job not found" });
}
const { current, current_url, total, current_step, partialDocs } = await job.progress();
res.json({
status: await job.getState(),
// progress: job.progress(),
current: current,
current_url: current_url,
current_step: current_step,
total: total,
data: job.returnvalue,
partial_data: partialDocs ?? [],
});
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
}

View File

@ -3,20 +3,14 @@ import bodyParser from "body-parser";
import cors from "cors";
import "dotenv/config";
import { getWebScraperQueue } from "./services/queue-service";
import { addWebScraperJob } from "./services/queue-jobs";
import { supabase_service } from "./services/supabase";
import { WebScraperDataProvider } from "./scraper/WebScraper";
import { billTeam, checkTeamCredits } from "./services/billing/credit_billing";
import { getRateLimiter, redisClient } from "./services/rate-limiter";
import { parseApi } from "./lib/parseApi";
import { redisClient } from "./services/rate-limiter";
import { v0Router } from "./routes/v0";
const { createBullBoard } = require("@bull-board/api");
const { BullAdapter } = require("@bull-board/api/bullAdapter");
const { ExpressAdapter } = require("@bull-board/express");
export const app = express();
global.isProduction = process.env.IS_PRODUCTION === "true";
app.use(bodyParser.urlencoded({ extended: true }));
@ -46,275 +40,20 @@ app.get("/test", async (req, res) => {
res.send("Hello, world!");
});
async function authenticateUser(req, res, mode?: string): Promise<{ success: boolean, team_id?: string, error?: string, status?: number }> {
const authHeader = req.headers.authorization;
if (!authHeader) {
return { success: false, error: "Unauthorized", status: 401 };
}
const token = authHeader.split(" ")[1]; // Extract the token from "Bearer <token>"
if (!token) {
return { success: false, error: "Unauthorized: Token missing", status: 401 };
}
try {
const incomingIP = (req.headers["x-forwarded-for"] ||
req.socket.remoteAddress) as string;
const iptoken = incomingIP + token;
await getRateLimiter(
token === "this_is_just_a_preview_token" ? true : false
).consume(iptoken);
} catch (rateLimiterRes) {
console.error(rateLimiterRes);
return { success: false, error: "Rate limit exceeded. Too many requests, try again in 1 minute.", status: 429 };
}
if (token === "this_is_just_a_preview_token" && mode === "scrape") {
return { success: true, team_id: "preview" };
}
const normalizedApi = parseApi(token);
// make sure api key is valid, based on the api_keys table in supabase
const { data, error } = await supabase_service
.from("api_keys")
.select("*")
.eq("key", normalizedApi);
if (error || !data || data.length === 0) {
return { success: false, error: "Unauthorized: Invalid token", status: 401 };
}
return { success: true, team_id: data[0].team_id };
}
app.post("/v0/scrape", async (req, res) => {
try {
// make sure to authenticate user first, Bearer <token>
const { success, team_id, error, status } = await authenticateUser(req, res, "scrape");
if (!success) {
return res.status(status).json({ error });
}
const crawlerOptions = req.body.crawlerOptions ?? {};
try {
const { success: creditsCheckSuccess, message: creditsCheckMessage } =
await checkTeamCredits(team_id, 1);
if (!creditsCheckSuccess) {
return res.status(402).json({ error: "Insufficient credits" });
}
} catch (error) {
console.error(error);
return res.status(500).json({ error: "Internal server error" });
}
// authenticate on supabase
const url = req.body.url;
if (!url) {
return res.status(400).json({ error: "Url is required" });
}
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false };
try {
const a = new WebScraperDataProvider();
await a.setOptions({
mode: "single_urls",
urls: [url],
crawlerOptions: {
...crawlerOptions,
},
pageOptions: pageOptions,
});
const docs = await a.getDocuments(false);
// make sure doc.content is not empty
const filteredDocs = docs.filter(
(doc: { content?: string }) =>
doc.content && doc.content.trim().length > 0
);
if (filteredDocs.length === 0) {
return res.status(200).json({ success: true, data: [] });
}
const { success, credit_usage } = await billTeam(
team_id,
filteredDocs.length
);
if (!success) {
// throw new Error("Failed to bill team, no subscription was found");
// return {
// success: false,
// message: "Failed to bill team, no subscription was found",
// docs: [],
// };
return res
.status(402)
.json({ error: "Failed to bill, no subscription was found" });
}
return res.json({
success: true,
data: filteredDocs[0],
});
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
});
app.post("/v0/crawl", async (req, res) => {
try {
const { success, team_id, error, status } = await authenticateUser(req, res, "crawl");
if (!success) {
return res.status(status).json({ error });
}
const { success: creditsCheckSuccess, message: creditsCheckMessage } =
await checkTeamCredits(team_id, 1);
if (!creditsCheckSuccess) {
return res.status(402).json({ error: "Insufficient credits" });
}
// authenticate on supabase
const url = req.body.url;
if (!url) {
return res.status(400).json({ error: "Url is required" });
}
const mode = req.body.mode ?? "crawl";
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false };
if (mode === "single_urls" && !url.includes(",")) {
try {
const a = new WebScraperDataProvider();
await a.setOptions({
mode: "single_urls",
urls: [url],
crawlerOptions: {
returnOnlyUrls: true,
},
pageOptions: pageOptions,
});
const docs = await a.getDocuments(false, (progress) => {
job.progress({
current: progress.current,
total: progress.total,
current_step: "SCRAPING",
current_url: progress.currentDocumentUrl,
});
});
return res.json({
success: true,
documents: docs,
});
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
}
const job = await addWebScraperJob({
url: url,
mode: mode ?? "crawl", // fix for single urls not working
crawlerOptions: { ...crawlerOptions },
team_id: team_id,
pageOptions: pageOptions,
});
res.json({ jobId: job.id });
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
});
app.post("/v0/crawlWebsitePreview", async (req, res) => {
try {
const { success, team_id, error, status } = await authenticateUser(req, res, "scrape");
if (!success) {
return res.status(status).json({ error });
}
// authenticate on supabase
const url = req.body.url;
if (!url) {
return res.status(400).json({ error: "Url is required" });
}
const mode = req.body.mode ?? "crawl";
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false };
const job = await addWebScraperJob({
url: url,
mode: mode ?? "crawl", // fix for single urls not working
crawlerOptions: { ...crawlerOptions, limit: 5, maxCrawledLinks: 5 },
team_id: "preview",
pageOptions: pageOptions,
});
res.json({ jobId: job.id });
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
});
app.get("/v0/crawl/status/:jobId", async (req, res) => {
try {
const { success, team_id, error, status } = await authenticateUser(req, res, "scrape");
if (!success) {
return res.status(status).json({ error });
}
const job = await getWebScraperQueue().getJob(req.params.jobId);
if (!job) {
return res.status(404).json({ error: "Job not found" });
}
const { current, current_url, total, current_step } = await job.progress();
res.json({
status: await job.getState(),
// progress: job.progress(),
current: current,
current_url: current_url,
current_step: current_step,
total: total,
data: job.returnvalue,
});
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
});
app.get("/v0/checkJobStatus/:jobId", async (req, res) => {
try {
const job = await getWebScraperQueue().getJob(req.params.jobId);
if (!job) {
return res.status(404).json({ error: "Job not found" });
}
const { current, current_url, total, current_step } = await job.progress();
res.json({
status: await job.getState(),
// progress: job.progress(),
current: current,
current_url: current_url,
current_step: current_step,
total: total,
data: job.returnvalue,
});
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
});
// register router
app.use(v0Router);
const DEFAULT_PORT = process.env.PORT ?? 3002;
const HOST = process.env.HOST ?? "localhost";
redisClient.connect();
export function startServer(port = DEFAULT_PORT) {
const server = app.listen(Number(port), HOST, () => {
console.log(`Server listening on port ${port}`);
console.log(`For the UI, open http://${HOST}:${port}/admin/${process.env.BULL_AUTH_KEY}/queues`);
console.log(
`For the UI, open http://${HOST}:${port}/admin/${process.env.BULL_AUTH_KEY}/queues`
);
console.log("");
console.log("1. Make sure Redis is running on port 6379 by default");
console.log(
@ -348,7 +87,77 @@ app.get(`/admin/${process.env.BULL_AUTH_KEY}/queues`, async (req, res) => {
}
});
app.get(`/serverHealthCheck`, async (req, res) => {
try {
const webScraperQueue = getWebScraperQueue();
const [waitingJobs] = await Promise.all([
webScraperQueue.getWaitingCount(),
]);
const noWaitingJobs = waitingJobs === 0;
// 200 if no active jobs, 503 if there are active jobs
return res.status(noWaitingJobs ? 200 : 500).json({
waitingJobs,
});
} catch (error) {
console.error(error);
return res.status(500).json({ error: error.message });
}
});
app.get('/serverHealthCheck/notify', async (req, res) => {
if (process.env.SLACK_WEBHOOK_URL) {
const treshold = 1; // The treshold value for the active jobs
const timeout = 60000; // 1 minute // The timeout value for the check in milliseconds
const getWaitingJobsCount = async () => {
const webScraperQueue = getWebScraperQueue();
const [waitingJobsCount] = await Promise.all([
webScraperQueue.getWaitingCount(),
]);
return waitingJobsCount;
};
res.status(200).json({ message: "Check initiated" });
const checkWaitingJobs = async () => {
try {
let waitingJobsCount = await getWaitingJobsCount();
if (waitingJobsCount >= treshold) {
setTimeout(async () => {
// Re-check the waiting jobs count after the timeout
waitingJobsCount = await getWaitingJobsCount();
if (waitingJobsCount >= treshold) {
const slackWebhookUrl = process.env.SLACK_WEBHOOK_URL;
const message = {
text: `⚠️ Warning: The number of active jobs (${waitingJobsCount}) has exceeded the threshold (${treshold}) for more than ${timeout/60000} minute(s).`,
};
const response = await fetch(slackWebhookUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(message),
})
if (!response.ok) {
console.error('Failed to send Slack notification')
}
}
}, timeout);
}
} catch (error) {
console.error(error);
}
};
checkWaitingJobs();
}
});
app.get("/is-production", (req, res) => {
res.send({ isProduction: global.isProduction });
});

View File

@ -0,0 +1,16 @@
import { encoding_for_model } from "@dqbd/tiktoken";
import { TiktokenModel } from "@dqbd/tiktoken";
// This function calculates the number of tokens in a text string using GPT-3.5-turbo model
export function numTokensFromString(message: string, model: string): number {
const encoder = encoding_for_model(model as TiktokenModel);
// Encode the message into tokens
const tokens = encoder.encode(message);
// Free the encoder resources after use
encoder.free();
// Return the number of tokens
return tokens.length;
}

View File

@ -0,0 +1,56 @@
import Turndown from "turndown";
import OpenAI from "openai";
import Ajv from "ajv";
const ajv = new Ajv(); // Initialize AJV for JSON schema validation
import { generateOpenAICompletions } from "./models";
import { Document, ExtractorOptions } from "../entities";
// Generate completion using OpenAI
export async function generateCompletions(
documents: Document[],
extractionOptions: ExtractorOptions
): Promise<Document[]> {
// const schema = zodToJsonSchema(options.schema)
const schema = extractionOptions.extractionSchema;
const prompt = extractionOptions.extractionPrompt;
const switchVariable = "openAI"; // Placholder, want to think more about how we abstract the model provider
const completions = await Promise.all(
documents.map(async (document: Document) => {
switch (switchVariable) {
case "openAI":
const llm = new OpenAI();
try{
const completionResult = await generateOpenAICompletions({
client: llm,
document: document,
schema: schema,
prompt: prompt,
});
// Validate the JSON output against the schema using AJV
const validate = ajv.compile(schema);
if (!validate(completionResult.llm_extraction)) {
//TODO: add Custom Error handling middleware that bubbles this up with proper Error code, etc.
throw new Error(
`JSON parsing error(s): ${validate.errors
?.map((err) => err.message)
.join(", ")}\n\nLLM extraction did not match the extraction schema you provided. This could be because of a model hallucination, or an Error on our side. Try adjusting your prompt, and if it doesn't work reach out to support.`
);
}
return completionResult;
} catch (error) {
console.error(`Error generating completions: ${error}`);
throw new Error(`Error generating completions: ${error.message}`);
}
default:
throw new Error("Invalid client");
}
})
);
return completions;
}

View File

@ -0,0 +1,77 @@
import OpenAI from "openai";
import { Document } from "../../lib/entities";
export type ScraperCompletionResult = {
data: any | null;
url: string;
};
const defaultPrompt =
"You are a professional web scraper. Extract the contents of the webpage";
function prepareOpenAIDoc(
document: Document
): OpenAI.Chat.Completions.ChatCompletionContentPart[] {
// Check if the markdown content exists in the document
if (!document.markdown) {
throw new Error(
"Markdown content is missing in the document. This is likely due to an error in the scraping process. Please try again or reach out to help@mendable.ai"
);
}
return [{ type: "text", text: document.markdown }];
}
export async function generateOpenAICompletions({
client,
model = "gpt-4o",
document,
schema, //TODO - add zod dynamic type checking
prompt = defaultPrompt,
temperature,
}: {
client: OpenAI;
model?: string;
document: Document;
schema: any; // This should be replaced with a proper Zod schema type when available
prompt?: string;
temperature?: number;
}): Promise<Document> {
const openai = client as OpenAI;
const content = prepareOpenAIDoc(document);
const completion = await openai.chat.completions.create({
model,
messages: [
{
role: "system",
content: prompt,
},
{ role: "user", content },
],
tools: [
{
type: "function",
function: {
name: "extract_content",
description: "Extracts the content from the given webpage(s)",
parameters: schema,
},
},
],
tool_choice: { "type": "function", "function": {"name": "extract_content"}},
temperature,
});
const c = completion.choices[0].message.tool_calls[0].function.arguments;
// Extract the LLM extraction content from the completion response
const llmExtraction = JSON.parse(c);
// Return the document with the LLM extraction content added
return {
...document,
llm_extraction: llmExtraction,
};
}

View File

@ -7,11 +7,31 @@ export interface Progress {
[key: string]: any;
};
currentDocumentUrl?: string;
currentDocument?: Document;
}
export type PageOptions = {
onlyMainContent?: boolean;
includeHtml?: boolean;
fallback?: boolean;
fetchPageContent?: boolean;
};
export type ExtractorOptions = {
mode: "markdown" | "llm-extraction";
extractionPrompt?: string;
extractionSchema?: Record<string, any>;
}
export type SearchOptions = {
limit?: number;
tbs?: string;
filter?: string;
lang?: string;
country?: string;
location?: string;
};
export type WebScraperOptions = {
urls: string[];
mode: "single_urls" | "sitemap" | "crawl";
@ -20,17 +40,28 @@ export type WebScraperOptions = {
includes?: string[];
excludes?: string[];
maxCrawledLinks?: number;
maxDepth?: number;
limit?: number;
generateImgAltText?: boolean;
replaceAllPathsWithAbsolutePaths?: boolean;
};
pageOptions?: PageOptions;
extractorOptions?: ExtractorOptions;
concurrentRequests?: number;
bullJobId?: string;
};
export interface DocumentUrl {
url: string;
}
export class Document {
id?: string;
url?: string; // Used only in /search for now
content: string;
markdown?: string;
html?: string;
llm_extraction?: Record<string, any>;
createdAt?: Date;
updatedAt?: Date;
type?: string;
@ -55,3 +86,20 @@ export class Document {
this.provider = data.provider || undefined;
}
}
export class SearchResult {
url: string;
title: string;
description: string;
constructor(url: string, title: string, description: string) {
this.url = url;
this.title = title;
this.description = description;
}
toString(): string {
return `SearchResult(url=${this.url}, title=${this.title}, description=${this.description})`;
}
}

View File

@ -1,6 +1,8 @@
export function parseMarkdown(html: string) {
var TurndownService = require("turndown");
var turndownPluginGfm = require("turndown-plugin-gfm");
var turndownPluginGfm = require('joplin-turndown-plugin-gfm')
const turndownService = new TurndownService();
turndownService.addRule("inlineLink", {

View File

@ -0,0 +1,24 @@
import { AuthResponse } from "../../src/types";
let warningCount = 0;
export function withAuth<T extends AuthResponse, U extends any[]>(
originalFunction: (...args: U) => Promise<T>
) {
return async function (...args: U): Promise<T> {
if (process.env.USE_DB_AUTHENTICATION === "false") {
if (warningCount < 5) {
console.warn("WARNING - You're bypassing authentication");
warningCount++;
}
return { success: true } as T;
} else {
try {
return await originalFunction(...args);
} catch (error) {
console.error("Error in withAuth function: ", error);
return { success: false, error: error.message } as T;
}
}
};
}

View File

@ -1,21 +1,26 @@
import { Job } from "bull";
import { CrawlResult, WebScraperOptions } from "../types";
import { WebScraperDataProvider } from "../scraper/WebScraper";
import { Progress } from "../lib/entities";
import { DocumentUrl, Progress } from "../lib/entities";
import { billTeam } from "../services/billing/credit_billing";
import { Document } from "../lib/entities";
export async function startWebScraperPipeline({
job,
}: {
job: Job<WebScraperOptions>;
}) {
let partialDocs: Document[] = [];
return (await runWebScraper({
url: job.data.url,
mode: job.data.mode,
crawlerOptions: job.data.crawlerOptions,
pageOptions: job.data.pageOptions,
inProgress: (progress) => {
job.progress(progress);
if (progress.currentDocument) {
partialDocs.push(progress.currentDocument);
job.progress({ ...progress, partialDocs: partialDocs });
}
},
onSuccess: (result) => {
job.moveToCompleted(result);
@ -24,7 +29,8 @@ export async function startWebScraperPipeline({
job.moveToFailed(error);
},
team_id: job.data.team_id,
})) as { success: boolean; message: string; docs: CrawlResult[] };
bull_job_id: job.id.toString(),
})) as { success: boolean; message: string; docs: Document[] };
}
export async function runWebScraper({
url,
@ -35,6 +41,7 @@ export async function runWebScraper({
onSuccess,
onError,
team_id,
bull_job_id,
}: {
url: string;
mode: "crawl" | "single_urls" | "sitemap";
@ -44,7 +51,12 @@ export async function runWebScraper({
onSuccess: (result: any) => void;
onError: (error: any) => void;
team_id: string;
}): Promise<{ success: boolean; message: string; docs: CrawlResult[] }> {
bull_job_id: string;
}): Promise<{
success: boolean;
message: string;
docs: Document[] | DocumentUrl[];
}> {
try {
const provider = new WebScraperDataProvider();
if (mode === "crawl") {
@ -53,6 +65,7 @@ export async function runWebScraper({
urls: [url],
crawlerOptions: crawlerOptions,
pageOptions: pageOptions,
bullJobId: bull_job_id,
});
} else {
await provider.setOptions({
@ -64,7 +77,7 @@ export async function runWebScraper({
}
const docs = (await provider.getDocuments(false, (progress: Progress) => {
inProgress(progress);
})) as CrawlResult[];
})) as Document[];
if (docs.length === 0) {
return {
@ -75,14 +88,17 @@ export async function runWebScraper({
}
// remove docs with empty content
const filteredDocs = docs.filter((doc) => doc.content.trim().length > 0);
onSuccess(filteredDocs);
const filteredDocs = crawlerOptions.returnOnlyUrls
? docs.map((doc) => {
if (doc.metadata.sourceURL) {
return { url: doc.metadata.sourceURL };
}
})
: docs.filter((doc) => doc.content.trim().length > 0);
const { success, credit_usage } = await billTeam(
team_id,
filteredDocs.length
);
if (!success) {
const billingResult = await billTeam(team_id, filteredDocs.length);
if (!billingResult.success) {
// throw new Error("Failed to bill team, no subscription was found");
return {
success: false,
@ -91,7 +107,11 @@ export async function runWebScraper({
};
}
return { success: true, message: "", docs: filteredDocs as CrawlResult[] };
// This is where the returnvalue from the job is set
onSuccess(filteredDocs);
// this return doesn't matter too much for the job completion result
return { success: true, message: "", docs: filteredDocs };
} catch (error) {
console.error("Error running web scraper", error);
onError(error);

25
apps/api/src/routes/v0.ts Normal file
View File

@ -0,0 +1,25 @@
import express from "express";
import { crawlController } from "../../src/controllers/crawl";
import { crawlStatusController } from "../../src/controllers/crawl-status";
import { scrapeController } from "../../src/controllers/scrape";
import { crawlPreviewController } from "../../src/controllers/crawlPreview";
import { crawlJobStatusPreviewController } from "../../src/controllers/status";
import { searchController } from "../../src/controllers/search";
import { crawlCancelController } from "../../src/controllers/crawl-cancel";
import { keyAuthController } from "../../src/controllers/keyAuth";
export const v0Router = express.Router();
v0Router.post("/v0/scrape", scrapeController);
v0Router.post("/v0/crawl", crawlController);
v0Router.post("/v0/crawlWebsitePreview", crawlPreviewController);
v0Router.get("/v0/crawl/status/:jobId", crawlStatusController);
v0Router.delete("/v0/crawl/cancel/:jobId", crawlCancelController);
v0Router.get("/v0/checkJobStatus/:jobId", crawlJobStatusPreviewController);
// Auth route for key based authentication
v0Router.get("/v0/keyAuth", keyAuthController);
// Search routes
v0Router.post("/v0/search", searchController);

View File

@ -1,179 +0,0 @@
import { WebScraperDataProvider } from "../index";
describe("WebScraperDataProvider", () => {
describe("replaceImgPathsWithAbsolutePaths", () => {
it("should replace image paths with absolute paths", () => {
const webScraperDataProvider = new WebScraperDataProvider();
const documents = [
{
metadata: { sourceURL: "https://example.com/page" },
content: "![alt text](/image.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content: "![another alt text](./another-image.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content: "![another alt text](./another-image.webp)",
},
{
metadata: { sourceURL: "https://example.com/data-image" },
content: "![data image](data:image/png;base64,...)",
},
];
const expectedDocuments = [
{
metadata: { sourceURL: "https://example.com/page" },
content: "![alt text](https://example.com/image.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content: "![another alt text](https://example.com/another-image.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content: "![another alt text](https://example.com/another-image.webp)",
},
{
metadata: { sourceURL: "https://example.com/data-image" },
content: "![data image](data:image/png;base64,...)",
},
];
const result =
webScraperDataProvider.replaceImgPathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
it("should handle absolute URLs without modification", () => {
const webScraperDataProvider = new WebScraperDataProvider();
const documents = [
{
metadata: { sourceURL: "https://example.com/page" },
content: "![alt text](https://example.com/image.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content:
"![another alt text](http://anotherexample.com/another-image.png)",
},
];
const expectedDocuments = [
{
metadata: { sourceURL: "https://example.com/page" },
content: "![alt text](https://example.com/image.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content:
"![another alt text](http://anotherexample.com/another-image.png)",
},
];
const result =
webScraperDataProvider.replaceImgPathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
it("should not replace non-image content within the documents", () => {
const webScraperDataProvider = new WebScraperDataProvider();
const documents = [
{
metadata: { sourceURL: "https://example.com/page" },
content:
"This is a test. ![alt text](/image.png) Here is a link: [Example](https://example.com).",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content:
"Another test. ![another alt text](./another-image.png) Here is some **bold text**.",
},
];
const expectedDocuments = [
{
metadata: { sourceURL: "https://example.com/page" },
content:
"This is a test. ![alt text](https://example.com/image.png) Here is a link: [Example](https://example.com).",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content:
"Another test. ![another alt text](https://example.com/another-image.png) Here is some **bold text**.",
},
];
const result =
webScraperDataProvider.replaceImgPathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
it("should replace multiple image paths within the documents", () => {
const webScraperDataProvider = new WebScraperDataProvider();
const documents = [
{
metadata: { sourceURL: "https://example.com/page" },
content:
"This is a test. ![alt text](/image1.png) Here is a link: [Example](https://example.com). ![alt text](/image2.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content:
"Another test. ![another alt text](./another-image1.png) Here is some **bold text**. ![another alt text](./another-image2.png)",
},
];
const expectedDocuments = [
{
metadata: { sourceURL: "https://example.com/page" },
content:
"This is a test. ![alt text](https://example.com/image1.png) Here is a link: [Example](https://example.com). ![alt text](https://example.com/image2.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page" },
content:
"Another test. ![another alt text](https://example.com/another-image1.png) Here is some **bold text**. ![another alt text](https://example.com/another-image2.png)",
},
];
const result =
webScraperDataProvider.replaceImgPathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
it("should replace image paths within the documents with complex URLs", () => {
const webScraperDataProvider = new WebScraperDataProvider();
const documents = [
{
metadata: { sourceURL: "https://example.com/page/subpage" },
content:
"This is a test. ![alt text](/image1.png) Here is a link: [Example](https://example.com). ![alt text](/sub/image2.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page/subpage" },
content:
"Another test. ![another alt text](/another-page/another-image1.png) Here is some **bold text**. ![another alt text](/another-page/sub/another-image2.png)",
},
];
const expectedDocuments = [
{
metadata: { sourceURL: "https://example.com/page/subpage" },
content:
"This is a test. ![alt text](https://example.com/image1.png) Here is a link: [Example](https://example.com). ![alt text](https://example.com/sub/image2.png)",
},
{
metadata: { sourceURL: "https://example.com/another-page/subpage" },
content:
"Another test. ![another alt text](https://example.com/another-page/another-image1.png) Here is some **bold text**. ![another alt text](https://example.com/another-page/sub/another-image2.png)",
},
];
const result =
webScraperDataProvider.replaceImgPathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
});
});

View File

@ -13,6 +13,7 @@ export class WebCrawler {
private includes: string[];
private excludes: string[];
private maxCrawledLinks: number;
private maxCrawledDepth: number;
private visited: Set<string> = new Set();
private crawledUrls: Set<string> = new Set();
private limit: number;
@ -27,6 +28,7 @@ export class WebCrawler {
maxCrawledLinks,
limit = 10000,
generateImgAltText = false,
maxCrawledDepth = 10,
}: {
initialUrl: string;
includes?: string[];
@ -34,6 +36,7 @@ export class WebCrawler {
maxCrawledLinks?: number;
limit?: number;
generateImgAltText?: boolean;
maxCrawledDepth?: number;
}) {
this.initialUrl = initialUrl;
this.baseUrl = new URL(initialUrl).origin;
@ -44,15 +47,22 @@ export class WebCrawler {
this.robots = robotsParser(this.robotsTxtUrl, "");
// Deprecated, use limit instead
this.maxCrawledLinks = maxCrawledLinks ?? limit;
this.maxCrawledDepth = maxCrawledDepth ?? 10;
this.generateImgAltText = generateImgAltText ?? false;
}
private filterLinks(sitemapLinks: string[], limit: number): string[] {
private filterLinks(sitemapLinks: string[], limit: number, maxDepth: number): string[] {
return sitemapLinks
.filter((link) => {
const url = new URL(link);
const path = url.pathname;
const depth = url.pathname.split('/').length - 1;
// Check if the link exceeds the maximum depth allowed
if (depth > maxDepth) {
return false;
}
// Check if the link should be excluded
if (this.excludes.length > 0 && this.excludes[0] !== "") {
@ -87,7 +97,8 @@ export class WebCrawler {
public async start(
inProgress?: (progress: Progress) => void,
concurrencyLimit: number = 5,
limit: number = 10000
limit: number = 10000,
maxDepth: number = 10
): Promise<string[]> {
// Fetch and parse robots.txt
try {
@ -99,7 +110,7 @@ export class WebCrawler {
const sitemapLinks = await this.tryFetchSitemapLinks(this.initialUrl);
if (sitemapLinks.length > 0) {
const filteredLinks = this.filterLinks(sitemapLinks, limit);
const filteredLinks = this.filterLinks(sitemapLinks, limit, maxDepth);
return filteredLinks;
}
@ -110,13 +121,13 @@ export class WebCrawler {
);
if (
urls.length === 0 &&
this.filterLinks([this.initialUrl], limit).length > 0
this.filterLinks([this.initialUrl], limit, this.maxCrawledDepth).length > 0
) {
return [this.initialUrl];
}
// make sure to run include exclude here again
return this.filterLinks(urls, limit);
return this.filterLinks(urls, limit, this.maxCrawledDepth);
}
private async crawlUrls(

View File

@ -1,24 +1,40 @@
import { Document, PageOptions, WebScraperOptions } from "../../lib/entities";
import {
Document,
ExtractorOptions,
PageOptions,
WebScraperOptions,
} from "../../lib/entities";
import { Progress } from "../../lib/entities";
import { scrapSingleUrl } from "./single_url";
import { SitemapEntry, fetchSitemapData, getLinksFromSitemap } from "./sitemap";
import { WebCrawler } from "./crawler";
import { getValue, setValue } from "../../services/redis";
import { getImageDescription } from "./utils/gptVision";
import { getImageDescription } from "./utils/imageDescription";
import { fetchAndProcessPdf } from "./utils/pdfProcessor";
import {
replaceImgPathsWithAbsolutePaths,
replacePathsWithAbsolutePaths,
} from "./utils/replacePaths";
import { generateCompletions } from "../../lib/LLM-extraction";
import { getWebScraperQueue } from "../../../src/services/queue-service";
export class WebScraperDataProvider {
private bullJobId: string;
private urls: string[] = [""];
private mode: "single_urls" | "sitemap" | "crawl" = "single_urls";
private includes: string[];
private excludes: string[];
private maxCrawledLinks: number;
private maxCrawledDepth: number = 10;
private returnOnlyUrls: boolean;
private limit: number = 10000;
private concurrentRequests: number = 20;
private generateImgAltText: boolean = false;
private pageOptions?: PageOptions;
private extractorOptions?: ExtractorOptions;
private replaceAllPathsWithAbsolutePaths?: boolean = false;
private generateImgAltTextModel: "gpt-4-turbo" | "claude-3-opus" =
"gpt-4-turbo";
authorize(): void {
throw new Error("Method not implemented.");
@ -34,14 +50,13 @@ export class WebScraperDataProvider {
): Promise<Document[]> {
const totalUrls = urls.length;
let processedUrls = 0;
console.log("Converting urls to documents");
console.log("Total urls", urls);
const results: (Document | null)[] = new Array(urls.length).fill(null);
for (let i = 0; i < urls.length; i += this.concurrentRequests) {
const batchUrls = urls.slice(i, i + this.concurrentRequests);
await Promise.all(
batchUrls.map(async (url, index) => {
const result = await scrapSingleUrl(url, true, this.pageOptions);
const result = await scrapSingleUrl(url, this.pageOptions);
processedUrls++;
if (inProgress) {
inProgress({
@ -49,11 +64,26 @@ export class WebScraperDataProvider {
total: totalUrls,
status: "SCRAPING",
currentDocumentUrl: url,
currentDocument: result,
});
}
results[i + index] = result;
})
);
try {
if (this.mode === "crawl" && this.bullJobId) {
const job = await getWebScraperQueue().getJob(this.bullJobId);
const jobStatus = await job.getState();
if (jobStatus === "failed") {
throw new Error(
"Job has failed or has been cancelled by the user. Stopping the job..."
);
}
}
} catch (error) {
console.error(error);
}
}
return results.filter((result) => result !== null) as Document[];
}
@ -62,156 +92,158 @@ export class WebScraperDataProvider {
useCaching: boolean = false,
inProgress?: (progress: Progress) => void
): Promise<Document[]> {
this.validateInitialUrl();
if (!useCaching) {
return this.processDocumentsWithoutCache(inProgress);
}
return this.processDocumentsWithCache(inProgress);
}
private validateInitialUrl(): void {
if (this.urls[0].trim() === "") {
throw new Error("Url is required");
}
}
if (!useCaching) {
if (this.mode === "crawl") {
const crawler = new WebCrawler({
initialUrl: this.urls[0],
includes: this.includes,
excludes: this.excludes,
maxCrawledLinks: this.maxCrawledLinks,
limit: this.limit,
generateImgAltText: this.generateImgAltText,
});
let links = await crawler.start(inProgress, 5, this.limit);
if (this.returnOnlyUrls) {
return links.map((url) => ({
content: "",
metadata: { sourceURL: url },
provider: "web",
type: "text",
}));
}
/**
* Process documents without cache handling each mode
* @param inProgress inProgress
* @returns documents
*/
private async processDocumentsWithoutCache(
inProgress?: (progress: Progress) => void
): Promise<Document[]> {
switch (this.mode) {
case "crawl":
return this.handleCrawlMode(inProgress);
case "single_urls":
return this.handleSingleUrlsMode(inProgress);
case "sitemap":
return this.handleSitemapMode(inProgress);
default:
return [];
}
}
let pdfLinks = links.filter((link) => link.endsWith(".pdf"));
let pdfDocuments: Document[] = [];
for (let pdfLink of pdfLinks) {
const pdfContent = await fetchAndProcessPdf(pdfLink);
pdfDocuments.push({
content: pdfContent,
metadata: { sourceURL: pdfLink },
provider: "web-scraper"
});
}
links = links.filter((link) => !link.endsWith(".pdf"));
let documents = await this.convertUrlsToDocuments(links, inProgress);
documents = await this.getSitemapData(this.urls[0], documents);
documents = this.replaceImgPathsWithAbsolutePaths(documents);
if (this.generateImgAltText) {
documents = await this.generatesImgAltText(documents);
}
documents = documents.concat(pdfDocuments);
// CACHING DOCUMENTS
// - parent document
const cachedParentDocumentString = await getValue(
"web-scraper-cache:" + this.normalizeUrl(this.urls[0])
);
if (cachedParentDocumentString != null) {
let cachedParentDocument = JSON.parse(cachedParentDocumentString);
if (
!cachedParentDocument.childrenLinks ||
cachedParentDocument.childrenLinks.length < links.length - 1
) {
cachedParentDocument.childrenLinks = links.filter(
(link) => link !== this.urls[0]
);
await setValue(
"web-scraper-cache:" + this.normalizeUrl(this.urls[0]),
JSON.stringify(cachedParentDocument),
60 * 60 * 24 * 10
); // 10 days
}
} else {
let parentDocument = documents.filter(
(document) =>
this.normalizeUrl(document.metadata.sourceURL) ===
this.normalizeUrl(this.urls[0])
);
await this.setCachedDocuments(parentDocument, links);
}
await this.setCachedDocuments(
documents.filter(
(document) =>
this.normalizeUrl(document.metadata.sourceURL) !==
this.normalizeUrl(this.urls[0])
),
[]
);
documents = this.removeChildLinks(documents);
documents = documents.splice(0, this.limit);
return documents;
}
if (this.mode === "single_urls") {
let pdfLinks = this.urls.filter((link) => link.endsWith(".pdf"));
let pdfDocuments: Document[] = [];
for (let pdfLink of pdfLinks) {
const pdfContent = await fetchAndProcessPdf(pdfLink);
pdfDocuments.push({
content: pdfContent,
metadata: { sourceURL: pdfLink },
provider: "web-scraper"
});
}
let documents = await this.convertUrlsToDocuments(
this.urls.filter((link) => !link.endsWith(".pdf")),
inProgress
);
documents = this.replaceImgPathsWithAbsolutePaths(documents);
if (this.generateImgAltText) {
documents = await this.generatesImgAltText(documents);
}
const baseUrl = new URL(this.urls[0]).origin;
documents = await this.getSitemapData(baseUrl, documents);
documents = documents.concat(pdfDocuments);
await this.setCachedDocuments(documents);
documents = this.removeChildLinks(documents);
documents = documents.splice(0, this.limit);
return documents;
}
if (this.mode === "sitemap") {
let links = await getLinksFromSitemap(this.urls[0]);
let pdfLinks = links.filter((link) => link.endsWith(".pdf"));
let pdfDocuments: Document[] = [];
for (let pdfLink of pdfLinks) {
const pdfContent = await fetchAndProcessPdf(pdfLink);
pdfDocuments.push({
content: pdfContent,
metadata: { sourceURL: pdfLink },
provider: "web-scraper"
});
}
links = links.filter((link) => !link.endsWith(".pdf"));
let documents = await this.convertUrlsToDocuments(
links.slice(0, this.limit),
inProgress
);
documents = await this.getSitemapData(this.urls[0], documents);
documents = this.replaceImgPathsWithAbsolutePaths(documents);
if (this.generateImgAltText) {
documents = await this.generatesImgAltText(documents);
}
documents = documents.concat(pdfDocuments);
await this.setCachedDocuments(documents);
documents = this.removeChildLinks(documents);
documents = documents.splice(0, this.limit);
return documents;
}
return [];
private async handleCrawlMode(
inProgress?: (progress: Progress) => void
): Promise<Document[]> {
const crawler = new WebCrawler({
initialUrl: this.urls[0],
includes: this.includes,
excludes: this.excludes,
maxCrawledLinks: this.maxCrawledLinks,
maxCrawledDepth: this.maxCrawledDepth,
limit: this.limit,
generateImgAltText: this.generateImgAltText,
});
let links = await crawler.start(inProgress, 5, this.limit, this.maxCrawledDepth);
if (this.returnOnlyUrls) {
return this.returnOnlyUrlsResponse(links, inProgress);
}
let documents = await this.processLinks(links, inProgress);
return this.cacheAndFinalizeDocuments(documents, links);
}
private async handleSingleUrlsMode(
inProgress?: (progress: Progress) => void
): Promise<Document[]> {
let documents = await this.processLinks(this.urls, inProgress);
return documents;
}
private async handleSitemapMode(
inProgress?: (progress: Progress) => void
): Promise<Document[]> {
let links = await getLinksFromSitemap(this.urls[0]);
if (this.returnOnlyUrls) {
return this.returnOnlyUrlsResponse(links, inProgress);
}
let documents = await this.processLinks(links, inProgress);
return this.cacheAndFinalizeDocuments(documents, links);
}
private async returnOnlyUrlsResponse(
links: string[],
inProgress?: (progress: Progress) => void
): Promise<Document[]> {
inProgress?.({
current: links.length,
total: links.length,
status: "COMPLETED",
currentDocumentUrl: this.urls[0],
});
return links.map((url) => ({
content: "",
html: this.pageOptions?.includeHtml ? "" : undefined,
markdown: "",
metadata: { sourceURL: url },
}));
}
private async processLinks(
links: string[],
inProgress?: (progress: Progress) => void
): Promise<Document[]> {
let pdfLinks = links.filter((link) => link.endsWith(".pdf"));
let pdfDocuments = await this.fetchPdfDocuments(pdfLinks);
links = links.filter((link) => !link.endsWith(".pdf"));
let documents = await this.convertUrlsToDocuments(links, inProgress);
documents = await this.getSitemapData(this.urls[0], documents);
documents = this.applyPathReplacements(documents);
// documents = await this.applyImgAltText(documents);
if (
this.extractorOptions.mode === "llm-extraction" &&
this.mode === "single_urls"
) {
documents = await generateCompletions(documents, this.extractorOptions);
}
return documents.concat(pdfDocuments);
}
private async fetchPdfDocuments(pdfLinks: string[]): Promise<Document[]> {
return Promise.all(
pdfLinks.map(async (pdfLink) => {
const pdfContent = await fetchAndProcessPdf(pdfLink);
return {
content: pdfContent,
metadata: { sourceURL: pdfLink },
provider: "web-scraper",
};
})
);
}
private applyPathReplacements(documents: Document[]): Document[] {
return this.replaceAllPathsWithAbsolutePaths
? replacePathsWithAbsolutePaths(documents)
: replaceImgPathsWithAbsolutePaths(documents);
}
private async applyImgAltText(documents: Document[]): Promise<Document[]> {
return this.generateImgAltText
? this.generatesImgAltText(documents)
: documents;
}
private async cacheAndFinalizeDocuments(
documents: Document[],
links: string[]
): Promise<Document[]> {
await this.setCachedDocuments(documents, links);
documents = this.removeChildLinks(documents);
return documents.splice(0, this.limit);
}
private async processDocumentsWithCache(
inProgress?: (progress: Progress) => void
): Promise<Document[]> {
let documents = await this.getCachedDocuments(
this.urls.slice(0, this.limit)
);
@ -220,22 +252,30 @@ export class WebScraperDataProvider {
false,
inProgress
);
newDocuments.forEach((doc) => {
if (
!documents.some(
(d) =>
this.normalizeUrl(d.metadata.sourceURL) ===
this.normalizeUrl(doc.metadata?.sourceURL)
)
) {
documents.push(doc);
}
});
documents = this.mergeNewDocuments(documents, newDocuments);
}
documents = this.filterDocsExcludeInclude(documents);
documents = this.filterDepth(documents);
documents = this.removeChildLinks(documents);
documents = documents.splice(0, this.limit);
return documents;
return documents.splice(0, this.limit);
}
private mergeNewDocuments(
existingDocuments: Document[],
newDocuments: Document[]
): Document[] {
newDocuments.forEach((doc) => {
if (
!existingDocuments.some(
(d) =>
this.normalizeUrl(d.metadata.sourceURL) ===
this.normalizeUrl(doc.metadata?.sourceURL)
)
) {
existingDocuments.push(doc);
}
});
return existingDocuments;
}
private filterDocsExcludeInclude(documents: Document[]): Document[] {
@ -312,7 +352,7 @@ export class WebScraperDataProvider {
documents.push(cachedDocument);
// get children documents
for (const childUrl of cachedDocument.childrenLinks) {
for (const childUrl of cachedDocument.childrenLinks || []) {
const normalizedChildUrl = this.normalizeUrl(childUrl);
const childCachedDocumentString = await getValue(
"web-scraper-cache:" + normalizedChildUrl
@ -340,18 +380,21 @@ export class WebScraperDataProvider {
throw new Error("Urls are required");
}
this.bullJobId = options.bullJobId;
this.urls = options.urls;
this.mode = options.mode;
this.concurrentRequests = options.concurrentRequests ?? 20;
this.includes = options.crawlerOptions?.includes ?? [];
this.excludes = options.crawlerOptions?.excludes ?? [];
this.maxCrawledLinks = options.crawlerOptions?.maxCrawledLinks ?? 1000;
this.maxCrawledDepth = options.crawlerOptions?.maxDepth ?? 10;
this.returnOnlyUrls = options.crawlerOptions?.returnOnlyUrls ?? false;
this.limit = options.crawlerOptions?.limit ?? 10000;
this.generateImgAltText =
options.crawlerOptions?.generateImgAltText ?? false;
this.pageOptions = options.pageOptions ?? {onlyMainContent: false};
this.pageOptions = options.pageOptions ?? { onlyMainContent: false, includeHtml: false };
this.extractorOptions = options.extractorOptions ?? {mode: "markdown"}
this.replaceAllPathsWithAbsolutePaths = options.crawlerOptions?.replaceAllPathsWithAbsolutePaths ?? false;
//! @nicolas, for some reason this was being injected and breaking everything. Don't have time to find source of the issue so adding this check
this.excludes = this.excludes.filter((item) => item !== "");
@ -421,7 +464,8 @@ export class WebScraperDataProvider {
altText = await getImageDescription(
imageUrl,
backText,
frontText
frontText,
this.generateImgAltTextModel
);
}
@ -437,39 +481,11 @@ export class WebScraperDataProvider {
return documents;
};
replaceImgPathsWithAbsolutePaths = (documents: Document[]): Document[] => {
try {
documents.forEach((document) => {
const baseUrl = new URL(document.metadata.sourceURL).origin;
const images =
document.content.match(
/!\[.*?\]\(((?:[^()]+|\((?:[^()]+|\([^()]*\))*\))*)\)/g
) || [];
images.forEach((image: string) => {
let imageUrl = image.match(/\(([^)]+)\)/)[1];
let altText = image.match(/\[(.*?)\]/)[1];
if (!imageUrl.startsWith("data:image")) {
if (!imageUrl.startsWith("http")) {
if (imageUrl.startsWith("/")) {
imageUrl = imageUrl.substring(1);
}
imageUrl = new URL(imageUrl, baseUrl).toString();
}
}
document.content = document.content.replace(
image,
`![${altText}](${imageUrl})`
);
});
});
return documents;
} catch (error) {
console.error("Error replacing img paths with absolute paths", error);
return documents;
}
};
filterDepth(documents: Document[]): Document[] {
return documents.filter((document) => {
const url = new URL(document.metadata.sourceURL);
const path = url.pathname;
return path.split("/").length <= this.maxCrawledDepth;
});
}
}

View File

@ -4,12 +4,34 @@ import { extractMetadata } from "./utils/metadata";
import dotenv from "dotenv";
import { Document, PageOptions } from "../../lib/entities";
import { parseMarkdown } from "../../lib/html-to-markdown";
import { parseTablesToMarkdown } from "./utils/parseTable";
import { excludeNonMainTags } from "./utils/excludeTags";
// import puppeteer from "puppeteer";
import { urlSpecificParams } from "./utils/custom/website_params";
dotenv.config();
export async function generateRequestParams(
url: string,
wait_browser: string = "domcontentloaded",
timeout: number = 15000
): Promise<any> {
const defaultParams = {
url: url,
params: { timeout: timeout, wait_browser: wait_browser },
headers: { "ScrapingService-Request": "TRUE" },
};
try {
const urlKey = new URL(url).hostname.replace(/^www\./, "");
if (urlSpecificParams.hasOwnProperty(urlKey)) {
return { ...defaultParams, ...urlSpecificParams[urlKey] };
} else {
return defaultParams;
}
} catch (error) {
console.error(`Error generating URL key: ${error}`);
return defaultParams;
}
}
export async function scrapWithCustomFirecrawl(
url: string,
options?: any
@ -25,15 +47,18 @@ export async function scrapWithCustomFirecrawl(
export async function scrapWithScrapingBee(
url: string,
wait_browser: string = "domcontentloaded"
wait_browser: string = "domcontentloaded",
timeout: number = 15000
): Promise<string> {
try {
const client = new ScrapingBeeClient(process.env.SCRAPING_BEE_API_KEY);
const response = await client.get({
url: url,
params: { timeout: 15000, wait_browser: wait_browser },
headers: { "ScrapingService-Request": "TRUE" },
});
const clientParams = await generateRequestParams(
url,
wait_browser,
timeout
);
const response = await client.get(clientParams);
if (response.status !== 200 && response.status !== 404) {
console.error(
@ -52,12 +77,15 @@ export async function scrapWithScrapingBee(
export async function scrapWithPlaywright(url: string): Promise<string> {
try {
const reqParams = await generateRequestParams(url);
const wait_playwright = reqParams["params"]?.wait ?? 0;
const response = await fetch(process.env.PLAYWRIGHT_MICROSERVICE_URL, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({ url: url }),
body: JSON.stringify({ url: url, wait: wait_playwright }),
});
if (!response.ok) {
@ -78,10 +106,8 @@ export async function scrapWithPlaywright(url: string): Promise<string> {
export async function scrapSingleUrl(
urlToScrap: string,
toMarkdown: boolean = true,
pageOptions: PageOptions = { onlyMainContent: true }
pageOptions: PageOptions = { onlyMainContent: true, includeHtml: false }
): Promise<Document> {
console.log(`Scraping URL: ${urlToScrap}`);
urlToScrap = urlToScrap.trim();
const removeUnwantedElements = (html: string, pageOptions: PageOptions) => {
@ -112,7 +138,11 @@ export async function scrapSingleUrl(
break;
case "scrapingBee":
if (process.env.SCRAPING_BEE_API_KEY) {
text = await scrapWithScrapingBee(url);
text = await scrapWithScrapingBee(
url,
"domcontentloaded",
pageOptions.fallback === false ? 7000 : 15000
);
}
break;
case "playwright":
@ -141,46 +171,57 @@ export async function scrapSingleUrl(
}
break;
}
//* TODO: add an optional to return markdown or structured/extracted content
let cleanedHtml = removeUnwantedElements(text, pageOptions);
return [await parseMarkdown(cleanedHtml), text];
};
try {
// TODO: comment this out once we're ready to merge firecrawl-scraper into the mono-repo
// let [text, html] = await attemptScraping(urlToScrap, 'firecrawl-scraper');
// if (!text || text.length < 100) {
// console.log("Falling back to scraping bee load");
// [text, html] = await attemptScraping(urlToScrap, 'scrapingBeeLoad');
// }
let [text, html] = ["", ""];
let urlKey = urlToScrap;
try {
urlKey = new URL(urlToScrap).hostname.replace(/^www\./, "");
} catch (error) {
console.error(`Invalid URL key, trying: ${urlToScrap}`);
}
const defaultScraper = urlSpecificParams[urlKey]?.defaultScraper ?? "";
const scrapersInOrder = defaultScraper
? [
defaultScraper,
"scrapingBee",
"playwright",
"scrapingBeeLoad",
"fetch",
]
: ["scrapingBee", "playwright", "scrapingBeeLoad", "fetch"];
let [text, html] = await attemptScraping(urlToScrap, "scrapingBee");
if (!text || text.length < 100) {
console.log("Falling back to playwright");
[text, html] = await attemptScraping(urlToScrap, "playwright");
for (const scraper of scrapersInOrder) {
[text, html] = await attemptScraping(urlToScrap, scraper);
if (text && text.length >= 100) break;
console.log(`Falling back to ${scraper}`);
}
if (!text || text.length < 100) {
console.log("Falling back to scraping bee load");
[text, html] = await attemptScraping(urlToScrap, "scrapingBeeLoad");
}
if (!text || text.length < 100) {
console.log("Falling back to fetch");
[text, html] = await attemptScraping(urlToScrap, "fetch");
if (!text) {
throw new Error(`All scraping methods failed for URL: ${urlToScrap}`);
}
const soup = cheerio.load(html);
const metadata = extractMetadata(soup, urlToScrap);
return {
const document: Document = {
content: text,
markdown: text,
html: pageOptions.includeHtml ? html : undefined,
metadata: { ...metadata, sourceURL: urlToScrap },
} as Document;
};
return document;
} catch (error) {
console.error(`Error: ${error} - Failed to fetch URL: ${urlToScrap}`);
return {
content: "",
markdown: "",
html: "",
metadata: { sourceURL: urlToScrap },
} as Document;
}

View File

@ -1,40 +1,47 @@
import * as pdfProcessor from '../pdfProcessor';
describe('PDF Processing Module - Integration Test', () => {
it('should download and read a simple PDF file by URL', async () => {
it('should correctly process a simple PDF file without the LLAMAPARSE_API_KEY', async () => {
delete process.env.LLAMAPARSE_API_KEY;
const pdfContent = await pdfProcessor.fetchAndProcessPdf('https://s3.us-east-1.amazonaws.com/storage.mendable.ai/rafa-testing/test%20%281%29.pdf');
expect(pdfContent).toEqual("Dummy PDF file");
expect(pdfContent.trim()).toEqual("Dummy PDF file");
});
it('should download and read a complex PDF file by URL', async () => {
const pdfContent = await pdfProcessor.fetchAndProcessPdf('https://arxiv.org/pdf/2307.06435.pdf');
// We're hitting the LLAMAPARSE rate limit 🫠
// it('should download and read a simple PDF file by URL', async () => {
// const pdfContent = await pdfProcessor.fetchAndProcessPdf('https://s3.us-east-1.amazonaws.com/storage.mendable.ai/rafa-testing/test%20%281%29.pdf');
// expect(pdfContent).toEqual("Dummy PDF file");
// });
const expectedContent = 'A Comprehensive Overview of Large Language Models\n' +
' a a, b, c,d, e,f e,f g,i\n' +
' Humza Naveed , Asad Ullah Khan , Shi Qiu , Muhammad Saqib , Saeed Anwar , Muhammad Usman , Naveed Akhtar ,\n' +
' Nick Barnes h, Ajmal Mian i\n' +
' aUniversity of Engineering and Technology (UET), Lahore, Pakistan\n' +
' bThe Chinese University of Hong Kong (CUHK), HKSAR, China\n' +
' cUniversity of Technology Sydney (UTS), Sydney, Australia\n' +
' dCommonwealth Scientific and Industrial Research Organisation (CSIRO), Sydney, Australia\n' +
' eKing Fahd University of Petroleum and Minerals (KFUPM), Dhahran, Saudi Arabia\n' +
' fSDAIA-KFUPM Joint Research Center for Artificial Intelligence (JRCAI), Dhahran, Saudi Arabia\n' +
' gThe University of Melbourne (UoM), Melbourne, Australia\n' +
' hAustralian National University (ANU), Canberra, Australia\n' +
' iThe University of Western Australia (UWA), Perth, Australia\n' +
' Abstract\n' +
' Large Language Models (LLMs) have recently demonstrated remarkable capabilities in natural language processing tasks and\n' +
' beyond. This success of LLMs has led to a large influx of research contributions in this direction. These works encompass diverse\n' +
' topics such as architectural innovations, better training strategies, context length improvements, fine-tuning, multi-modal LLMs,\n' +
' robotics, datasets, benchmarking, efficiency, and more. With the rapid development of techniques and regular breakthroughs in\n' +
' LLM research, it has become considerably challenging to perceive the bigger picture of the advances in this direction. Considering\n' +
' the rapidly emerging plethora of literature on LLMs, it is imperative that the research community is able to benefit from a concise\n' +
' yet comprehensive overview of the recent developments in this field. This article provides an overview of the existing literature\n' +
' on a broad range of LLM-related concepts. Our self-contained comprehensive overview of LLMs discusses relevant background\n' +
' concepts along with covering the advanced topics at the frontier of research in LLMs. This review article is intended to not only\n' +
' provide a systematic survey but also a quick comprehensive reference for the researchers and practitioners to draw insights from\n' +
' extensive informative summaries of the existing works to advance the LLM research.\n'
expect(pdfContent).toContain(expectedContent);
}, 60000);
// it('should download and read a complex PDF file by URL', async () => {
// const pdfContent = await pdfProcessor.fetchAndProcessPdf('https://arxiv.org/pdf/2307.06435.pdf');
// const expectedContent = 'A Comprehensive Overview of Large Language Models\n' +
// ' a a, b, c,d, e,f e,f g,i\n' +
// ' Humza Naveed , Asad Ullah Khan , Shi Qiu , Muhammad Saqib , Saeed Anwar , Muhammad Usman , Naveed Akhtar ,\n' +
// ' Nick Barnes h, Ajmal Mian i\n' +
// ' aUniversity of Engineering and Technology (UET), Lahore, Pakistan\n' +
// ' bThe Chinese University of Hong Kong (CUHK), HKSAR, China\n' +
// ' cUniversity of Technology Sydney (UTS), Sydney, Australia\n' +
// ' dCommonwealth Scientific and Industrial Research Organisation (CSIRO), Sydney, Australia\n' +
// ' eKing Fahd University of Petroleum and Minerals (KFUPM), Dhahran, Saudi Arabia\n' +
// ' fSDAIA-KFUPM Joint Research Center for Artificial Intelligence (JRCAI), Dhahran, Saudi Arabia\n' +
// ' gThe University of Melbourne (UoM), Melbourne, Australia\n' +
// ' hAustralian National University (ANU), Canberra, Australia\n' +
// ' iThe University of Western Australia (UWA), Perth, Australia\n' +
// ' Abstract\n' +
// ' Large Language Models (LLMs) have recently demonstrated remarkable capabilities in natural language processing tasks and\n' +
// ' beyond. This success of LLMs has led to a large influx of research contributions in this direction. These works encompass diverse\n' +
// ' topics such as architectural innovations, better training strategies, context length improvements, fine-tuning, multi-modal LLMs,\n' +
// ' robotics, datasets, benchmarking, efficiency, and more. With the rapid development of techniques and regular breakthroughs in\n' +
// ' LLM research, it has become considerably challenging to perceive the bigger picture of the advances in this direction. Considering\n' +
// ' the rapidly emerging plethora of literature on LLMs, it is imperative that the research community is able to benefit from a concise\n' +
// ' yet comprehensive overview of the recent developments in this field. This article provides an overview of the existing literature\n' +
// ' on a broad range of LLM-related concepts. Our self-contained comprehensive overview of LLMs discusses relevant background\n' +
// ' concepts along with covering the advanced topics at the frontier of research in LLMs. This review article is intended to not only\n' +
// ' provide a systematic survey but also a quick comprehensive reference for the researchers and practitioners to draw insights from\n' +
// ' extensive informative summaries of the existing works to advance the LLM research.\n'
// expect(pdfContent).toContain(expectedContent);
// }, 60000);
});

View File

@ -0,0 +1,114 @@
import { Document } from "../../../../lib/entities";
import { replacePathsWithAbsolutePaths, replaceImgPathsWithAbsolutePaths } from "../replacePaths";
describe('replacePaths', () => {
describe('replacePathsWithAbsolutePaths', () => {
it('should replace relative paths with absolute paths', () => {
const documents: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'This is a [link](/path/to/resource) and an image ![alt text](/path/to/image.jpg).'
}];
const expectedDocuments: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'This is a [link](https://example.com/path/to/resource) and an image ![alt text](https://example.com/path/to/image.jpg).'
}];
const result = replacePathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
it('should not alter absolute URLs', () => {
const documents: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'This is an [external link](https://external.com/path) and an image ![alt text](https://example.com/path/to/image.jpg).'
}];
const result = replacePathsWithAbsolutePaths(documents);
expect(result).toEqual(documents); // Expect no change
});
it('should not alter data URLs for images', () => {
const documents: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'This is an image: ![alt text](data:image/png;base64,ABC123==).'
}];
const result = replacePathsWithAbsolutePaths(documents);
expect(result).toEqual(documents); // Expect no change
});
it('should handle multiple links and images correctly', () => {
const documents: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'Here are two links: [link1](/path1) and [link2](/path2), and two images: ![img1](/img1.jpg) ![img2](/img2.jpg).'
}];
const expectedDocuments: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'Here are two links: [link1](https://example.com/path1) and [link2](https://example.com/path2), and two images: ![img1](https://example.com/img1.jpg) ![img2](https://example.com/img2.jpg).'
}];
const result = replacePathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
it('should correctly handle a mix of absolute and relative paths', () => {
const documents: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'Mixed paths: [relative](/path), [absolute](https://example.com/path), and [data image](data:image/png;base64,ABC123==).'
}];
const expectedDocuments: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'Mixed paths: [relative](https://example.com/path), [absolute](https://example.com/path), and [data image](data:image/png;base64,ABC123==).'
}];
const result = replacePathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
});
describe('replaceImgPathsWithAbsolutePaths', () => {
it('should replace relative image paths with absolute paths', () => {
const documents: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'Here is an image: ![alt text](/path/to/image.jpg).'
}];
const expectedDocuments: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'Here is an image: ![alt text](https://example.com/path/to/image.jpg).'
}];
const result = replaceImgPathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
it('should not alter data:image URLs', () => {
const documents: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'An image with a data URL: ![alt text](data:image/png;base64,ABC123==).'
}];
const result = replaceImgPathsWithAbsolutePaths(documents);
expect(result).toEqual(documents); // Expect no change
});
it('should handle multiple images with a mix of data and relative URLs', () => {
const documents: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'Multiple images: ![img1](/img1.jpg) ![img2](data:image/png;base64,ABC123==) ![img3](/img3.jpg).'
}];
const expectedDocuments: Document[] = [{
metadata: { sourceURL: 'https://example.com' },
content: 'Multiple images: ![img1](https://example.com/img1.jpg) ![img2](data:image/png;base64,ABC123==) ![img3](https://example.com/img3.jpg).'
}];
const result = replaceImgPathsWithAbsolutePaths(documents);
expect(result).toEqual(expectedDocuments);
});
});
});

View File

@ -0,0 +1,27 @@
const socialMediaBlocklist = [
'facebook.com',
'twitter.com',
'instagram.com',
'linkedin.com',
'pinterest.com',
'snapchat.com',
'tiktok.com',
'reddit.com',
'tumblr.com',
'flickr.com',
'whatsapp.com',
'wechat.com',
'telegram.org',
];
const allowedUrls = [
'linkedin.com/pulse'
];
export function isUrlBlocked(url: string): boolean {
if (allowedUrls.some(allowedUrl => url.includes(allowedUrl))) {
return false;
}
return socialMediaBlocklist.some(domain => url.includes(domain));
}

View File

@ -0,0 +1,125 @@
export const urlSpecificParams = {
"platform.openai.com": {
params: {
wait_browser: "networkidle2",
block_resources: false,
},
headers: {
"User-Agent":
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
referer: "https://www.google.com/",
"accept-language": "en-US,en;q=0.9",
"accept-encoding": "gzip, deflate, br",
accept:
"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
},
cookies: {
__cf_bm:
"mC1On8P2GWT3A5UeSYH6z_MP94xcTAdZ5jfNi9IT2U0-1714327136-1.0.1.1-ILAP5pSX_Oo9PPo2iHEYCYX.p9a0yRBNLr58GHyrzYNDJ537xYpG50MXxUYVdfrD.h3FV5O7oMlRKGA0scbxaQ",
},
},
"support.greenpay.me":{
defaultScraper: "playwright",
params: {
wait_browser: "networkidle2",
block_resources: false,
wait: 2000,
},
headers: {
"User-Agent":
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
referer: "https://www.google.com/",
"accept-language": "en-US,en;q=0.9",
"accept-encoding": "gzip, deflate, br",
accept:
"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
},
},
"docs.pdw.co":{
defaultScraper: "playwright",
params: {
wait_browser: "networkidle2",
block_resources: false,
wait: 3000,
},
headers: {
"User-Agent":
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
referer: "https://www.google.com/",
"accept-language": "en-US,en;q=0.9",
"accept-encoding": "gzip, deflate, br",
accept:
"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
},
},
"ycombinator.com":{
defaultScraper: "playwright",
params: {
wait_browser: "networkidle2",
block_resources: false,
wait: 3000,
},
headers: {
"User-Agent":
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
referer: "https://www.google.com/",
"accept-language": "en-US,en;q=0.9",
"accept-encoding": "gzip, deflate, br",
accept:
"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
},
},
"developers.notion.com":{
defaultScraper: "playwright",
params: {
wait_browser: "networkidle2",
block_resources: false,
wait: 2000,
},
headers: {
"User-Agent":
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
referer: "https://www.google.com/",
"accept-language": "en-US,en;q=0.9",
"accept-encoding": "gzip, deflate, br",
accept:
"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
},
},
"docs2.hubitat.com":{
defaultScraper: "playwright",
params: {
wait_browser: "networkidle2",
block_resources: false,
wait: 2000,
},
headers: {
"User-Agent":
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
referer: "https://www.google.com/",
"accept-language": "en-US,en;q=0.9",
"accept-encoding": "gzip, deflate, br",
accept:
"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
},
}
};

View File

@ -1,41 +0,0 @@
export async function getImageDescription(
imageUrl: string,
backText: string,
frontText: string
): Promise<string> {
const { OpenAI } = require("openai");
const openai = new OpenAI();
try {
const response = await openai.chat.completions.create({
model: "gpt-4-turbo",
messages: [
{
role: "user",
content: [
{
type: "text",
text:
"What's in the image? You need to answer with the content for the alt tag of the image. To help you with the context, the image is in the following text: " +
backText +
" and the following text: " +
frontText +
". Be super concise.",
},
{
type: "image_url",
image_url: {
url: imageUrl,
},
},
],
},
],
});
return response.choices[0].message.content;
} catch (error) {
console.error("Error generating image alt text:", error?.message);
return "";
}
}

View File

@ -0,0 +1,88 @@
import Anthropic from '@anthropic-ai/sdk';
import axios from 'axios';
export async function getImageDescription(
imageUrl: string,
backText: string,
frontText: string,
model: string = "gpt-4-turbo"
): Promise<string> {
try {
const prompt = "What's in the image? You need to answer with the content for the alt tag of the image. To help you with the context, the image is in the following text: " +
backText +
" and the following text: " +
frontText +
". Be super concise."
switch (model) {
case 'claude-3-opus': {
if (!process.env.ANTHROPIC_API_KEY) {
throw new Error("No Anthropic API key provided");
}
const imageRequest = await axios.get(imageUrl, { responseType: 'arraybuffer' });
const imageMediaType = 'image/png';
const imageData = Buffer.from(imageRequest.data, 'binary').toString('base64');
const anthropic = new Anthropic();
const response = await anthropic.messages.create({
model: "claude-3-opus-20240229",
max_tokens: 1024,
messages: [
{
role: "user",
content: [
{
type: "image",
source: {
type: "base64",
media_type: imageMediaType,
data: imageData,
},
},
{
type: "text",
text: prompt
}
],
}
]
});
return response.content[0].text;
}
default: {
if (!process.env.OPENAI_API_KEY) {
throw new Error("No OpenAI API key provided");
}
const { OpenAI } = require("openai");
const openai = new OpenAI();
const response = await openai.chat.completions.create({
model: "gpt-4-turbo",
messages: [
{
role: "user",
content: [
{
type: "text",
text: prompt,
},
{
type: "image_url",
image_url: {
url: imageUrl,
},
},
],
},
],
});
return response.choices[0].message.content;
}
}
} catch (error) {
console.error("Error generating image alt text:", error?.message);
return "";
}
}

View File

@ -1,4 +1,3 @@
// import * as cheerio from 'cheerio';
import { CheerioAPI } from "cheerio";
interface Metadata {
title?: string;
@ -8,6 +7,14 @@ interface Metadata {
robots?: string;
ogTitle?: string;
ogDescription?: string;
ogUrl?: string;
ogImage?: string;
ogAudio?: string;
ogDeterminer?: string;
ogLocale?: string;
ogLocaleAlternate?: string[];
ogSiteName?: string;
ogVideo?: string;
dctermsCreated?: string;
dcDateCreated?: string;
dcDate?: string;
@ -17,7 +24,6 @@ interface Metadata {
dctermsSubject?: string;
dcSubject?: string;
dcDescription?: string;
ogImage?: string;
dctermsKeywords?: string;
modifiedTime?: string;
publishedTime?: string;
@ -33,6 +39,14 @@ export function extractMetadata(soup: CheerioAPI, url: string): Metadata {
let robots: string | null = null;
let ogTitle: string | null = null;
let ogDescription: string | null = null;
let ogUrl: string | null = null;
let ogImage: string | null = null;
let ogAudio: string | null = null;
let ogDeterminer: string | null = null;
let ogLocale: string | null = null;
let ogLocaleAlternate: string[] | null = null;
let ogSiteName: string | null = null;
let ogVideo: string | null = null;
let dctermsCreated: string | null = null;
let dcDateCreated: string | null = null;
let dcDate: string | null = null;
@ -42,7 +56,6 @@ export function extractMetadata(soup: CheerioAPI, url: string): Metadata {
let dctermsSubject: string | null = null;
let dcSubject: string | null = null;
let dcDescription: string | null = null;
let ogImage: string | null = null;
let dctermsKeywords: string | null = null;
let modifiedTime: string | null = null;
let publishedTime: string | null = null;
@ -62,11 +75,18 @@ export function extractMetadata(soup: CheerioAPI, url: string): Metadata {
robots = soup('meta[name="robots"]').attr("content") || null;
ogTitle = soup('meta[property="og:title"]').attr("content") || null;
ogDescription = soup('meta[property="og:description"]').attr("content") || null;
ogUrl = soup('meta[property="og:url"]').attr("content") || null;
ogImage = soup('meta[property="og:image"]').attr("content") || null;
ogAudio = soup('meta[property="og:audio"]').attr("content") || null;
ogDeterminer = soup('meta[property="og:determiner"]').attr("content") || null;
ogLocale = soup('meta[property="og:locale"]').attr("content") || null;
ogLocaleAlternate = soup('meta[property="og:locale:alternate"]').map((i, el) => soup(el).attr("content")).get() || null;
ogSiteName = soup('meta[property="og:site_name"]').attr("content") || null;
ogVideo = soup('meta[property="og:video"]').attr("content") || null;
articleSection = soup('meta[name="article:section"]').attr("content") || null;
articleTag = soup('meta[name="article:tag"]').attr("content") || null;
publishedTime = soup('meta[property="article:published_time"]').attr("content") || null;
modifiedTime = soup('meta[property="article:modified_time"]').attr("content") || null;
ogImage = soup('meta[property="og:image"]').attr("content") || null;
dctermsKeywords = soup('meta[name="dcterms.keywords"]').attr("content") || null;
dcDescription = soup('meta[name="dc.description"]').attr("content") || null;
dcSubject = soup('meta[name="dc.subject"]').attr("content") || null;
@ -90,6 +110,14 @@ export function extractMetadata(soup: CheerioAPI, url: string): Metadata {
...(robots ? { robots } : {}),
...(ogTitle ? { ogTitle } : {}),
...(ogDescription ? { ogDescription } : {}),
...(ogUrl ? { ogUrl } : {}),
...(ogImage ? { ogImage } : {}),
...(ogAudio ? { ogAudio } : {}),
...(ogDeterminer ? { ogDeterminer } : {}),
...(ogLocale ? { ogLocale } : {}),
...(ogLocaleAlternate ? { ogLocaleAlternate } : {}),
...(ogSiteName ? { ogSiteName } : {}),
...(ogVideo ? { ogVideo } : {}),
...(dctermsCreated ? { dctermsCreated } : {}),
...(dcDateCreated ? { dcDateCreated } : {}),
...(dcDate ? { dcDate } : {}),
@ -99,7 +127,6 @@ export function extractMetadata(soup: CheerioAPI, url: string): Metadata {
...(dctermsSubject ? { dctermsSubject } : {}),
...(dcSubject ? { dcSubject } : {}),
...(dcDescription ? { dcDescription } : {}),
...(ogImage ? { ogImage } : {}),
...(dctermsKeywords ? { dctermsKeywords } : {}),
...(modifiedTime ? { modifiedTime } : {}),
...(publishedTime ? { publishedTime } : {}),

View File

@ -0,0 +1,80 @@
import { Document } from "../../../lib/entities";
export const replacePathsWithAbsolutePaths = (documents: Document[]): Document[] => {
try {
documents.forEach((document) => {
const baseUrl = new URL(document.metadata.sourceURL).origin;
const paths =
document.content.match(
/(!?\[.*?\])\(((?:[^()]+|\((?:[^()]+|\([^()]*\))*\))*)\)|href="([^"]+)"/g
) || [];
paths.forEach((path: string) => {
const isImage = path.startsWith("!");
let matchedUrl = path.match(/\(([^)]+)\)/) || path.match(/href="([^"]+)"/);
let url = matchedUrl[1];
if (!url.startsWith("data:") && !url.startsWith("http")) {
if (url.startsWith("/")) {
url = url.substring(1);
}
url = new URL(url, baseUrl).toString();
}
const markdownLinkOrImageText = path.match(/(!?\[.*?\])/)[0];
if (isImage) {
document.content = document.content.replace(
path,
`${markdownLinkOrImageText}(${url})`
);
} else {
document.content = document.content.replace(
path,
`${markdownLinkOrImageText}(${url})`
);
}
});
});
return documents;
} catch (error) {
console.error("Error replacing paths with absolute paths", error);
return documents;
}
};
export const replaceImgPathsWithAbsolutePaths = (documents: Document[]): Document[] => {
try {
documents.forEach((document) => {
const baseUrl = new URL(document.metadata.sourceURL).origin;
const images =
document.content.match(
/!\[.*?\]\(((?:[^()]+|\((?:[^()]+|\([^()]*\))*\))*)\)/g
) || [];
images.forEach((image: string) => {
let imageUrl = image.match(/\(([^)]+)\)/)[1];
let altText = image.match(/\[(.*?)\]/)[1];
if (!imageUrl.startsWith("data:image")) {
if (!imageUrl.startsWith("http")) {
if (imageUrl.startsWith("/")) {
imageUrl = imageUrl.substring(1);
}
imageUrl = new URL(imageUrl, baseUrl).toString();
}
}
document.content = document.content.replace(
image,
`![${altText}](${imageUrl})`
);
});
});
return documents;
} catch (error) {
console.error("Error replacing img paths with absolute paths", error);
return documents;
}
};

View File

@ -0,0 +1,115 @@
import axios from 'axios';
import * as cheerio from 'cheerio';
import * as querystring from 'querystring';
import { SearchResult } from '../../src/lib/entities';
const _useragent_list = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0'
];
function get_useragent(): string {
return _useragent_list[Math.floor(Math.random() * _useragent_list.length)];
}
async function _req(term: string, results: number, lang: string, country: string, start: number, proxies: any, timeout: number, tbs: string = null, filter: string = null) {
const params = {
"q": term,
"num": results, // Number of results to return
"hl": lang,
"gl": country,
"start": start,
};
if (tbs) {
params["tbs"] = tbs;
}
if (filter) {
params["filter"] = filter;
}
try {
const resp = await axios.get("https://www.google.com/search", {
headers: {
"User-Agent": get_useragent()
},
params: params,
proxy: proxies,
timeout: timeout,
});
return resp;
} catch (error) {
if (error.response && error.response.status === 429) {
throw new Error('Google Search: Too many requests, try again later.');
}
throw error;
}
}
export async function google_search(term: string, advanced = false, num_results = 7, tbs = null, filter = null, lang = "en", country = "us", proxy = null, sleep_interval = 0, timeout = 5000, ) :Promise<SearchResult[]> {
const escaped_term = querystring.escape(term);
let proxies = null;
if (proxy) {
if (proxy.startsWith("https")) {
proxies = {"https": proxy};
} else {
proxies = {"http": proxy};
}
}
// TODO: knowledge graph, answer box, etc.
let start = 0;
let results : SearchResult[] = [];
let attempts = 0;
const maxAttempts = 20; // Define a maximum number of attempts to prevent infinite loop
while (start < num_results && attempts < maxAttempts) {
try {
const resp = await _req(escaped_term, num_results - start, lang, country, start, proxies, timeout, tbs, filter);
const $ = cheerio.load(resp.data);
const result_block = $("div.g");
if (result_block.length === 0) {
start += 1;
attempts += 1;
} else {
attempts = 0; // Reset attempts if we have results
}
result_block.each((index, element) => {
const linkElement = $(element).find("a");
const link = linkElement && linkElement.attr("href") ? linkElement.attr("href") : null;
const title = $(element).find("h3");
const ogImage = $(element).find("img").eq(1).attr("src");
const description_box = $(element).find("div[style='-webkit-line-clamp:2']");
const answerBox = $(element).find(".mod").text();
if (description_box) {
const description = description_box.text();
if (link && title && description) {
start += 1;
results.push(new SearchResult(link, title.text(), description));
}
}
});
await new Promise(resolve => setTimeout(resolve, sleep_interval * 1000));
} catch (error) {
if (error.message === 'Too many requests') {
console.warn('Too many requests, breaking the loop');
break;
}
throw error;
}
if (start === 0) {
return results;
}
}
if (attempts >= maxAttempts) {
console.warn('Max attempts reached, breaking the loop');
}
return results
}

View File

@ -0,0 +1,54 @@
import { SearchResult } from "../../src/lib/entities";
import { google_search } from "./googlesearch";
import { serper_search } from "./serper";
export async function search({
query,
advanced = false,
num_results = 7,
tbs = null,
filter = null,
lang = "en",
country = "us",
location = undefined,
proxy = null,
sleep_interval = 0,
timeout = 5000,
}: {
query: string;
advanced?: boolean;
num_results?: number;
tbs?: string;
filter?: string;
lang?: string;
country?: string;
location?: string;
proxy?: string;
sleep_interval?: number;
timeout?: number;
}) : Promise<SearchResult[]> {
try {
if (process.env.SERPER_API_KEY ) {
return await serper_search(query, {num_results, tbs, filter, lang, country, location});
}
return await google_search(
query,
advanced,
num_results,
tbs,
filter,
lang,
country,
proxy,
sleep_interval,
timeout
);
} catch (error) {
console.error("Error in search function: ", error);
return []
}
// if process.env.SERPER_API_KEY is set, use serper
}

View File

@ -0,0 +1,45 @@
import axios from "axios";
import dotenv from "dotenv";
import { SearchResult } from "../../src/lib/entities";
dotenv.config();
export async function serper_search(q, options: {
tbs?: string;
filter?: string;
lang?: string;
country?: string;
location?: string;
num_results: number;
page?: number;
}): Promise<SearchResult[]> {
let data = JSON.stringify({
q: q,
hl: options.lang,
gl: options.country,
location: options.location,
tbs: options.tbs,
num: options.num_results,
page: options.page ?? 1,
});
let config = {
method: "POST",
url: "https://google.serper.dev/search",
headers: {
"X-API-KEY": process.env.SERPER_API_KEY,
"Content-Type": "application/json",
},
data: data,
};
const response = await axios(config);
if (response && response.data && Array.isArray(response.data.organic)) {
return response.data.organic.map((a) => ({
url: a.link,
title: a.title,
description: a.snippet,
}));
}else{
return [];
}
}

View File

@ -1,7 +1,12 @@
import { withAuth } from "../../lib/withAuth";
import { supabase_service } from "../supabase";
const FREE_CREDITS = 100;
const FREE_CREDITS = 300;
export async function billTeam(team_id: string, credits: number) {
return withAuth(supaBillTeam)(team_id, credits);
}
export async function supaBillTeam(team_id: string, credits: number) {
if (team_id === "preview") {
return { success: true, message: "Preview team, no credits used" };
}
@ -13,7 +18,6 @@ export async function billTeam(team_id: string, credits: number) {
// created_at: The timestamp of the API usage.
// 1. get the subscription
const { data: subscription } = await supabase_service
.from("subscriptions")
.select("*")
@ -21,52 +25,162 @@ export async function billTeam(team_id: string, credits: number) {
.eq("status", "active")
.single();
if (!subscription) {
const { data: credit_usage } = await supabase_service
.from("credit_usage")
.insert([
{
team_id,
credits_used: credits,
created_at: new Date(),
},
])
.select();
// 2. Check for available coupons
const { data: coupons } = await supabase_service
.from("coupons")
.select("id, credits")
.eq("team_id", team_id)
.eq("status", "active");
return { success: true, credit_usage };
let couponCredits = 0;
if (coupons && coupons.length > 0) {
couponCredits = coupons.reduce((total, coupon) => total + coupon.credits, 0);
}
// 2. add the credits to the credits_usage
const { data: credit_usage } = await supabase_service
.from("credit_usage")
.insert([
{
team_id,
subscription_id: subscription.id,
credits_used: credits,
created_at: new Date(),
},
])
.select();
let sortedCoupons = coupons.sort((a, b) => b.credits - a.credits);
// using coupon credits:
if (couponCredits > 0) {
// if there is no subscription and they have enough coupon credits
if (!subscription) {
// using only coupon credits:
// if there are enough coupon credits
if (couponCredits >= credits) {
// remove credits from coupon credits
let usedCredits = credits;
while (usedCredits > 0) {
// update coupons
if (sortedCoupons[0].credits < usedCredits) {
usedCredits = usedCredits - sortedCoupons[0].credits;
// update coupon credits
await supabase_service
.from("coupons")
.update({
credits: 0
})
.eq("id", sortedCoupons[0].id);
sortedCoupons.shift();
return { success: true, credit_usage };
} else {
// update coupon credits
await supabase_service
.from("coupons")
.update({
credits: sortedCoupons[0].credits - usedCredits
})
.eq("id", sortedCoupons[0].id);
usedCredits = 0;
}
}
return await createCreditUsage({ team_id, credits: 0 });
// not enough coupon credits and no subscription
} else {
// update coupon credits
const usedCredits = credits - couponCredits;
for (let i = 0; i < sortedCoupons.length; i++) {
await supabase_service
.from("coupons")
.update({
credits: 0
})
.eq("id", sortedCoupons[i].id);
}
return await createCreditUsage({ team_id, credits: usedCredits });
}
}
// with subscription
// using coupon + subscription credits:
if (credits > couponCredits) {
// update coupon credits
for (let i = 0; i < sortedCoupons.length; i++) {
await supabase_service
.from("coupons")
.update({
credits: 0
})
.eq("id", sortedCoupons[i].id);
}
const usedCredits = credits - couponCredits;
return await createCreditUsage({ team_id, subscription_id: subscription.id, credits: usedCredits });
} else { // using only coupon credits
let usedCredits = credits;
while (usedCredits > 0) {
// update coupons
if (sortedCoupons[0].credits < usedCredits) {
usedCredits = usedCredits - sortedCoupons[0].credits;
// update coupon credits
await supabase_service
.from("coupons")
.update({
credits: 0
})
.eq("id", sortedCoupons[0].id);
sortedCoupons.shift();
} else {
// update coupon credits
await supabase_service
.from("coupons")
.update({
credits: sortedCoupons[0].credits - usedCredits
})
.eq("id", sortedCoupons[0].id);
usedCredits = 0;
}
}
return await createCreditUsage({ team_id, subscription_id: subscription.id, credits: 0 });
}
}
// not using coupon credits
if (!subscription) {
return await createCreditUsage({ team_id, credits });
}
return await createCreditUsage({ team_id, subscription_id: subscription.id, credits });
}
// if team has enough credits for the operation, return true, else return false
export async function checkTeamCredits(team_id: string, credits: number) {
return withAuth(supaCheckTeamCredits)(team_id, credits);
}
// if team has enough credits for the operation, return true, else return false
export async function supaCheckTeamCredits(team_id: string, credits: number) {
if (team_id === "preview") {
return { success: true, message: "Preview team, no credits used" };
}
// 1. Retrieve the team's active subscription based on the team_id.
const { data: subscription, error: subscriptionError } =
await supabase_service
.from("subscriptions")
.select("id, price_id, current_period_start, current_period_end")
.eq("team_id", team_id)
.eq("status", "active")
.single();
// Retrieve the team's active subscription
const { data: subscription, error: subscriptionError } = await supabase_service
.from("subscriptions")
.select("id, price_id, current_period_start, current_period_end")
.eq("team_id", team_id)
.eq("status", "active")
.single();
// Check for available coupons
const { data: coupons } = await supabase_service
.from("coupons")
.select("credits")
.eq("team_id", team_id)
.eq("status", "active");
let couponCredits = 0;
if (coupons && coupons.length > 0) {
couponCredits = coupons.reduce((total, coupon) => total + coupon.credits, 0);
}
// Free credits, no coupons
if (subscriptionError || !subscription) {
// If there is no active subscription but there are available coupons
if (couponCredits >= credits) {
return { success: true, message: "Sufficient credits available" };
}
const { data: creditUsages, error: creditUsageError } =
await supabase_service
.from("credit_usage")
@ -98,7 +212,30 @@ export async function checkTeamCredits(team_id: string, credits: number) {
return { success: true, message: "Sufficient credits available" };
}
// 2. Get the price_id from the subscription.
let totalCreditsUsed = 0;
try {
const { data: creditUsages, error: creditUsageError } = await supabase_service
.rpc("get_credit_usage_2", {
sub_id: subscription.id,
start_time: subscription.current_period_start,
end_time: subscription.current_period_end
});
if (creditUsageError) {
console.error("Error calculating credit usage:", creditUsageError);
}
if (creditUsages && creditUsages.length > 0) {
totalCreditsUsed = creditUsages[0].total_credits_used;
console.log("Total Credits Used:", totalCreditsUsed);
}
} catch (error) {
console.error("Error calculating credit usage:", error);
}
// Adjust total credits used by subtracting coupon value
const adjustedCreditsUsed = Math.max(0, totalCreditsUsed - couponCredits);
// Get the price details
const { data: price, error: priceError } = await supabase_service
.from("prices")
.select("credits")
@ -106,32 +243,11 @@ export async function checkTeamCredits(team_id: string, credits: number) {
.single();
if (priceError) {
throw new Error(
`Failed to retrieve price for price_id: ${subscription.price_id}`
);
throw new Error(`Failed to retrieve price for price_id: ${subscription.price_id}`);
}
// 4. Calculate the total credits used by the team within the current billing period.
const { data: creditUsages, error: creditUsageError } = await supabase_service
.from("credit_usage")
.select("credits_used")
.eq("subscription_id", subscription.id)
.gte("created_at", subscription.current_period_start)
.lte("created_at", subscription.current_period_end);
if (creditUsageError) {
throw new Error(
`Failed to retrieve credit usage for subscription_id: ${subscription.id}`
);
}
const totalCreditsUsed = creditUsages.reduce(
(acc, usage) => acc + usage.credits_used,
0
);
// 5. Compare the total credits used with the credits allowed by the plan.
if (totalCreditsUsed + credits > price.credits) {
// Compare the adjusted total credits used with the credits allowed by the plan
if (adjustedCreditsUsed + credits > price.credits) {
return { success: false, message: "Insufficient credits, please upgrade!" };
}
@ -150,9 +266,18 @@ export async function countCreditsAndRemainingForCurrentBillingPeriod(
.eq("team_id", team_id)
.single();
if (subscriptionError || !subscription) {
// throw new Error(`Failed to retrieve subscription for team_id: ${team_id}`);
const { data: coupons } = await supabase_service
.from("coupons")
.select("credits")
.eq("team_id", team_id)
.eq("status", "active");
let couponCredits = 0;
if (coupons && coupons.length > 0) {
couponCredits = coupons.reduce((total, coupon) => total + coupon.credits, 0);
}
if (subscriptionError || !subscription) {
// Free
const { data: creditUsages, error: creditUsageError } =
await supabase_service
@ -160,13 +285,9 @@ export async function countCreditsAndRemainingForCurrentBillingPeriod(
.select("credits_used")
.is("subscription_id", null)
.eq("team_id", team_id);
// .gte("created_at", subscription.current_period_start)
// .lte("created_at", subscription.current_period_end);
if (creditUsageError || !creditUsages) {
throw new Error(
`Failed to retrieve credit usage for subscription_id: ${subscription.id}`
);
throw new Error(`Failed to retrieve credit usage for team_id: ${team_id}`);
}
const totalCreditsUsed = creditUsages.reduce(
@ -174,26 +295,10 @@ export async function countCreditsAndRemainingForCurrentBillingPeriod(
0
);
// 4. Calculate remaining credits.
const remainingCredits = FREE_CREDITS - totalCreditsUsed;
return { totalCreditsUsed, remainingCredits, totalCredits: FREE_CREDITS };
const remainingCredits = FREE_CREDITS + couponCredits - totalCreditsUsed;
return { totalCreditsUsed: totalCreditsUsed, remainingCredits, totalCredits: FREE_CREDITS + couponCredits };
}
// 2. Get the price_id from the subscription to retrieve the total credits available.
const { data: price, error: priceError } = await supabase_service
.from("prices")
.select("credits")
.eq("id", subscription.price_id)
.single();
if (priceError || !price) {
throw new Error(
`Failed to retrieve price for price_id: ${subscription.price_id}`
);
}
// 3. Calculate the total credits used by the team within the current billing period.
const { data: creditUsages, error: creditUsageError } = await supabase_service
.from("credit_usage")
.select("credits_used")
@ -202,18 +307,42 @@ export async function countCreditsAndRemainingForCurrentBillingPeriod(
.lte("created_at", subscription.current_period_end);
if (creditUsageError || !creditUsages) {
throw new Error(
`Failed to retrieve credit usage for subscription_id: ${subscription.id}`
);
throw new Error(`Failed to retrieve credit usage for subscription_id: ${subscription.id}`);
}
const totalCreditsUsed = creditUsages.reduce(
(acc, usage) => acc + usage.credits_used,
0
);
const totalCreditsUsed = creditUsages.reduce((acc, usage) => acc + usage.credits_used, 0);
// 4. Calculate remaining credits.
const remainingCredits = price.credits - totalCreditsUsed;
const { data: price, error: priceError } = await supabase_service
.from("prices")
.select("credits")
.eq("id", subscription.price_id)
.single();
return { totalCreditsUsed, remainingCredits, totalCredits: price.credits };
if (priceError || !price) {
throw new Error(`Failed to retrieve price for price_id: ${subscription.price_id}`);
}
const remainingCredits = price.credits + couponCredits - totalCreditsUsed;
return {
totalCreditsUsed,
remainingCredits,
totalCredits: price.credits
};
}
async function createCreditUsage({ team_id, subscription_id, credits }: { team_id: string, subscription_id?: string, credits: number }) {
const { data: credit_usage } = await supabase_service
.from("credit_usage")
.insert([
{
team_id,
credits_used: credits,
subscription_id: subscription_id || null,
created_at: new Date(),
},
])
.select();
return { success: true, credit_usage };
}

View File

@ -0,0 +1,17 @@
import { supabase_service } from "../supabase";
import "dotenv/config";
export async function logCrawl(job_id: string, team_id: string) {
try {
const { data, error } = await supabase_service
.from("bulljobs_teams")
.insert([
{
job_id: job_id,
team_id: team_id,
},
]);
} catch (error) {
console.error("Error logging crawl job:\n", error);
}
}

View File

@ -0,0 +1,60 @@
import { ExtractorOptions } from './../../lib/entities';
import { supabase_service } from "../supabase";
import { FirecrawlJob } from "../../types";
import { posthog } from "../posthog";
import "dotenv/config";
export async function logJob(job: FirecrawlJob) {
try {
// Only log jobs in production
if (process.env.ENV !== "production") {
return;
}
const { data, error } = await supabase_service
.from("firecrawl_jobs")
.insert([
{
success: job.success,
message: job.message,
num_docs: job.num_docs,
docs: job.docs,
time_taken: job.time_taken,
team_id: job.team_id === "preview" ? null : job.team_id,
mode: job.mode,
url: job.url,
crawler_options: job.crawlerOptions,
page_options: job.pageOptions,
origin: job.origin,
extractor_options: job.extractor_options,
num_tokens: job.num_tokens
},
]);
if (process.env.POSTHOG_API_KEY) {
posthog.capture({
distinctId: job.team_id === "preview" ? null : job.team_id,
event: "job-logged",
properties: {
success: job.success,
message: job.message,
num_docs: job.num_docs,
time_taken: job.time_taken,
team_id: job.team_id === "preview" ? null : job.team_id,
mode: job.mode,
url: job.url,
crawler_options: job.crawlerOptions,
page_options: job.pageOptions,
origin: job.origin,
extractor_options: job.extractor_options,
num_tokens: job.num_tokens
},
});
}
if (error) {
console.error("Error logging job:\n", error);
}
} catch (error) {
console.error("Error logging job:\n", error);
}
}

View File

@ -1,4 +1,19 @@
const { Logtail } = require("@logtail/node");
//dot env
require("dotenv").config();
export const logtail = new Logtail(process.env.LOGTAIL_KEY);
import { Logtail } from "@logtail/node";
import "dotenv/config";
// A mock Logtail class to handle cases where LOGTAIL_KEY is not provided
class MockLogtail {
info(message: string, context?: Record<string, any>): void {
console.log(message, context);
}
error(message: string, context: Record<string, any> = {}): void {
console.error(message, context);
}
}
// Using the actual Logtail class if LOGTAIL_KEY exists, otherwise using the mock class
// Additionally, print a warning to the terminal if LOGTAIL_KEY is not provided
export const logtail = process.env.LOGTAIL_KEY ? new Logtail(process.env.LOGTAIL_KEY) : (() => {
console.warn("LOGTAIL_KEY is not provided - your events will not be logged. Using MockLogtail as a fallback. see logtail.ts for more.");
return new MockLogtail();
})();

View File

@ -0,0 +1,26 @@
import { PostHog } from 'posthog-node';
import "dotenv/config";
export default function PostHogClient() {
const posthogClient = new PostHog(process.env.POSTHOG_API_KEY, {
host: process.env.POSTHOG_HOST,
flushAt: 1,
flushInterval: 0
});
return posthogClient;
}
class MockPostHog {
capture() {}
}
// Using the actual PostHog class if POSTHOG_API_KEY exists, otherwise using the mock class
// Additionally, print a warning to the terminal if POSTHOG_API_KEY is not provided
export const posthog = process.env.POSTHOG_API_KEY
? PostHogClient()
: (() => {
console.warn(
"POSTHOG_API_KEY is not provided - your events will not be logged. Using MockPostHog as a fallback. See posthog.ts for more."
);
return new MockPostHog();
})();

View File

@ -3,8 +3,8 @@ import { getWebScraperQueue } from "./queue-service";
import "dotenv/config";
import { logtail } from "./logtail";
import { startWebScraperPipeline } from "../main/runWebScraper";
import { WebScraperDataProvider } from "../scraper/WebScraper";
import { callWebhook } from "./webhook";
import { logJob } from "./logging/log_job";
getWebScraperQueue().process(
Math.floor(Number(process.env.NUM_WORKERS_PER_QUEUE ?? 8)),
@ -16,7 +16,11 @@ getWebScraperQueue().process(
current_step: "SCRAPING",
current_url: "",
});
const start = Date.now();
const { success, message, docs } = await startWebScraperPipeline({ job });
const end = Date.now();
const timeTakenInSeconds = (end - start) / 1000;
const data = {
success: success,
@ -30,6 +34,20 @@ getWebScraperQueue().process(
};
await callWebhook(job.data.team_id, data);
await logJob({
success: success,
message: message,
num_docs: docs.length,
docs: docs,
time_taken: timeTakenInSeconds,
team_id: job.data.team_id,
mode: "crawl",
url: job.data.url,
crawlerOptions: job.data.crawlerOptions,
pageOptions: job.data.pageOptions,
origin: job.data.origin,
});
done(null, data);
} catch (error) {
if (error instanceof CustomError) {
@ -56,6 +74,19 @@ getWebScraperQueue().process(
"Something went wrong... Contact help@mendable.ai or try again." /* etc... */,
};
await callWebhook(job.data.team_id, data);
await logJob({
success: false,
message: typeof error === 'string' ? error : (error.message ?? "Something went wrong... Contact help@mendable.ai"),
num_docs: 0,
docs: [],
time_taken: 0,
team_id: job.data.team_id,
mode: "crawl",
url: job.data.url,
crawlerOptions: job.data.crawlerOptions,
pageOptions: job.data.pageOptions,
origin: job.data.origin,
});
done(null, data);
}
}

View File

@ -1,12 +1,16 @@
import { RateLimiterRedis } from "rate-limiter-flexible";
import * as redis from "redis";
import { RateLimiterMode } from "../../src/types";
const MAX_REQUESTS_PER_MINUTE_PREVIEW = 5;
const MAX_CRAWLS_PER_MINUTE_STARTER = 2;
const MAX_CRAWLS_PER_MINUTE_STANDARD = 4;
const MAX_CRAWLS_PER_MINUTE_SCALE = 20;
const MAX_REQUESTS_PER_MINUTE_ACCOUNT = 40;
const MAX_REQUESTS_PER_MINUTE_ACCOUNT = 20;
const MAX_REQUESTS_PER_MINUTE_CRAWL_STATUS = 120;
@ -29,6 +33,20 @@ export const serverRateLimiter = new RateLimiterRedis({
duration: 60, // Duration in seconds
});
export const crawlStatusRateLimiter = new RateLimiterRedis({
storeClient: redisClient,
keyPrefix: "middleware",
points: MAX_REQUESTS_PER_MINUTE_CRAWL_STATUS,
duration: 60, // Duration in seconds
});
export const testSuiteRateLimiter = new RateLimiterRedis({
storeClient: redisClient,
keyPrefix: "middleware",
points: 1000,
duration: 60, // Duration in seconds
});
export function crawlRateLimit(plan: string){
if(plan === "standard"){
@ -56,10 +74,19 @@ export function crawlRateLimit(plan: string){
}
export function getRateLimiter(preview: boolean){
if(preview){
return previewRateLimiter;
}else{
return serverRateLimiter;
export function getRateLimiter(mode: RateLimiterMode, token: string){
// Special test suite case. TODO: Change this later.
if(token.includes("5089cefa58")){
return testSuiteRateLimiter;
}
switch(mode) {
case RateLimiterMode.Preview:
return previewRateLimiter;
case RateLimiterMode.CrawlStatus:
return crawlStatusRateLimiter;
default:
return serverRateLimiter;
}
}

View File

@ -1,6 +1,56 @@
import { createClient } from "@supabase/supabase-js";
import { createClient, SupabaseClient } from "@supabase/supabase-js";
export const supabase_service = createClient<any>(
process.env.SUPABASE_URL,
process.env.SUPABASE_SERVICE_TOKEN,
);
// SupabaseService class initializes the Supabase client conditionally based on environment variables.
class SupabaseService {
private client: SupabaseClient | null = null;
constructor() {
const supabaseUrl = process.env.SUPABASE_URL;
const supabaseServiceToken = process.env.SUPABASE_SERVICE_TOKEN;
// Only initialize the Supabase client if both URL and Service Token are provided.
if (process.env.USE_DB_AUTHENTICATION === "false") {
// Warn the user that Authentication is disabled by setting the client to null
console.warn(
"\x1b[33mAuthentication is disabled. Supabase client will not be initialized.\x1b[0m"
);
this.client = null;
} else if (!supabaseUrl || !supabaseServiceToken) {
console.error(
"\x1b[31mSupabase environment variables aren't configured correctly. Supabase client will not be initialized. Fix ENV configuration or disable DB authentication with USE_DB_AUTHENTICATION env variable\x1b[0m"
);
} else {
this.client = createClient(supabaseUrl, supabaseServiceToken);
}
}
// Provides access to the initialized Supabase client, if available.
getClient(): SupabaseClient | null {
return this.client;
}
}
// Using a Proxy to handle dynamic access to the Supabase client or service methods.
// This approach ensures that if Supabase is not configured, any attempt to use it will result in a clear error.
export const supabase_service: SupabaseClient = new Proxy(
new SupabaseService(),
{
get: function (target, prop, receiver) {
const client = target.getClient();
// If the Supabase client is not initialized, intercept property access to provide meaningful error feedback.
if (client === null) {
console.error(
"Attempted to access Supabase client when it's not configured."
);
return () => {
throw new Error("Supabase client is not configured.");
};
}
// Direct access to SupabaseService properties takes precedence.
if (prop in target) {
return Reflect.get(target, prop, receiver);
}
// Otherwise, delegate access to the Supabase client.
return Reflect.get(client, prop, receiver);
},
}
) as unknown as SupabaseClient;

View File

@ -1,6 +1,7 @@
import { supabase_service } from "./supabase";
export const callWebhook = async (teamId: string, data: any) => {
try {
const { data: webhooksData, error } = await supabase_service
.from('webhooks')
.select('url')
@ -37,5 +38,9 @@ export const callWebhook = async (teamId: string, data: any) => {
data: dataToSend,
error: data.error || undefined,
}),
});
}
});
} catch (error) {
console.error(`Error sending webhook for team ID: ${teamId}`, error.message);
}
};

View File

@ -1,3 +1,5 @@
import { ExtractorOptions } from "./lib/entities";
export interface CrawlResult {
source: string;
content: string;
@ -22,7 +24,39 @@ export interface WebScraperOptions {
crawlerOptions: any;
pageOptions: any;
team_id: string;
origin?: string;
}
export interface FirecrawlJob {
success: boolean;
message: string;
num_docs: number;
docs: any[];
time_taken: number;
team_id: string;
mode: string;
url: string;
crawlerOptions?: any;
pageOptions?: any;
origin: string;
extractor_options?: ExtractorOptions,
num_tokens?: number,
}
export enum RateLimiterMode {
Crawl = "crawl",
CrawlStatus = "crawl-status",
Scrape = "scrape",
Preview = "preview",
Search = "search",
}
export interface AuthResponse {
success: boolean;
team_id?: string;
error?: string;
status?: number;
}

View File

@ -1,7 +1,13 @@
import FirecrawlApp from '@mendable/firecrawl-js';
import { z } from "zod";
const app = new FirecrawlApp({apiKey: "YOUR_API_KEY"});
const app = new FirecrawlApp({apiKey: "fc-YOUR_API_KEY"});
// Scrape a website:
const scrapeResult = await app.scrapeUrl('firecrawl.dev');
console.log(scrapeResult.data.content)
// Crawl a website:
const crawlResult = await app.crawlUrl('mendable.ai', {crawlerOptions: {excludes: ['blog/*'], limit: 5}}, false);
console.log(crawlResult)
@ -17,4 +23,61 @@ while (true) {
await new Promise(resolve => setTimeout(resolve, 1000)); // wait 1 second
}
console.log(job.data[0].content);
console.log(job.data[0].content);
// Search for a query:
const query = 'what is mendable?'
const searchResult = await app.search(query)
console.log(searchResult)
// LLM Extraction:
// Define schema to extract contents into using zod schema
const zodSchema = z.object({
top: z
.array(
z.object({
title: z.string(),
points: z.number(),
by: z.string(),
commentsURL: z.string(),
})
)
.length(5)
.describe("Top 5 stories on Hacker News"),
});
let llmExtractionResult = await app.scrapeUrl("https://news.ycombinator.com", {
extractorOptions: { extractionSchema: zodSchema },
});
console.log(llmExtractionResult.data.llm_extraction);
// Define schema to extract contents into using json schema
const jsonSchema = {
"type": "object",
"properties": {
"top": {
"type": "array",
"items": {
"type": "object",
"properties": {
"title": {"type": "string"},
"points": {"type": "number"},
"by": {"type": "string"},
"commentsURL": {"type": "string"}
},
"required": ["title", "points", "by", "commentsURL"]
},
"minItems": 5,
"maxItems": 5,
"description": "Top 5 stories on Hacker News"
}
},
"required": ["top"]
}
llmExtractionResult = await app.scrapeUrl("https://news.ycombinator.com", {
extractorOptions: { extractionSchema: jsonSchema },
});
console.log(llmExtractionResult.data.llm_extraction);

83
apps/js-sdk/example.ts Normal file
View File

@ -0,0 +1,83 @@
import FirecrawlApp, { JobStatusResponse } from '@mendable/firecrawl-js';
import { z } from "zod";
const app = new FirecrawlApp({apiKey: "fc-YOUR_API_KEY"});
// Scrape a website:
const scrapeResult = await app.scrapeUrl('firecrawl.dev');
console.log(scrapeResult.data.content)
// Crawl a website:
const crawlResult = await app.crawlUrl('mendable.ai', {crawlerOptions: {excludes: ['blog/*'], limit: 5}}, false);
console.log(crawlResult)
const jobId: string = await crawlResult['jobId'];
console.log(jobId);
let job: JobStatusResponse;
while (true) {
job = await app.checkCrawlStatus(jobId);
if (job.status === 'completed') {
break;
}
await new Promise(resolve => setTimeout(resolve, 1000)); // wait 1 second
}
console.log(job.data[0].content);
// Search for a query:
const query = 'what is mendable?'
const searchResult = await app.search(query)
console.log(searchResult)
// LLM Extraction:
// Define schema to extract contents into using zod schema
const zodSchema = z.object({
top: z
.array(
z.object({
title: z.string(),
points: z.number(),
by: z.string(),
commentsURL: z.string(),
})
)
.length(5)
.describe("Top 5 stories on Hacker News"),
});
let llmExtractionResult = await app.scrapeUrl("https://news.ycombinator.com", {
extractorOptions: { extractionSchema: zodSchema },
});
console.log(llmExtractionResult.data.llm_extraction);
// Define schema to extract contents into using json schema
const jsonSchema = {
"type": "object",
"properties": {
"top": {
"type": "array",
"items": {
"type": "object",
"properties": {
"title": {"type": "string"},
"points": {"type": "number"},
"by": {"type": "string"},
"commentsURL": {"type": "string"}
},
"required": ["title", "points", "by", "commentsURL"]
},
"minItems": 5,
"maxItems": 5,
"description": "Top 5 stories on Hacker News"
}
},
"required": ["top"]
}
llmExtractionResult = await app.scrapeUrl("https://news.ycombinator.com", {
extractorOptions: { extractionSchema: jsonSchema },
});
console.log(llmExtractionResult.data.llm_extraction);

View File

@ -77,6 +77,42 @@ To scrape a single URL with error handling, use the `scrapeUrl` method. It takes
scrapeExample();
```
### Extracting structured data from a URL
With LLM extraction, you can easily extract structured data from any URL. We support zod schemas to make it easier for you too. Here is how you to use it:
```js
import { z } from "zod";
const zodSchema = z.object({
top: z
.array(
z.object({
title: z.string(),
points: z.number(),
by: z.string(),
commentsURL: z.string(),
})
)
.length(5)
.describe("Top 5 stories on Hacker News"),
});
let llmExtractionResult = await app.scrapeUrl("https://news.ycombinator.com", {
extractorOptions: { extractionSchema: zodSchema },
});
console.log(llmExtractionResult.data.llm_extraction);
```
### Search for a query
Used to search the web, get the most relevant results, scrap each page and return the markdown.
```js
query = 'what is mendable?'
searchResult = app.search(query)
```
### Crawling a Website

View File

@ -7,46 +7,111 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
import axios from 'axios';
import dotenv from 'dotenv';
dotenv.config();
import axios from "axios";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
/**
* Main class for interacting with the Firecrawl API.
*/
export default class FirecrawlApp {
/**
* Initializes a new instance of the FirecrawlApp class.
* @param {FirecrawlAppConfig} config - Configuration options for the FirecrawlApp instance.
*/
constructor({ apiKey = null }) {
this.apiKey = apiKey || process.env.FIRECRAWL_API_KEY || '';
this.apiKey = apiKey || "";
if (!this.apiKey) {
throw new Error('No API key provided');
throw new Error("No API key provided");
}
}
/**
* Scrapes a URL using the Firecrawl API.
* @param {string} url - The URL to scrape.
* @param {Params | null} params - Additional parameters for the scrape request.
* @returns {Promise<ScrapeResponse>} The response from the scrape operation.
*/
scrapeUrl(url_1) {
return __awaiter(this, arguments, void 0, function* (url, params = null) {
var _a;
const headers = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
"Content-Type": "application/json",
Authorization: `Bearer ${this.apiKey}`,
};
let jsonData = { url };
if (params) {
jsonData = Object.assign(Object.assign({}, jsonData), params);
let jsonData = Object.assign({ url }, params);
if ((_a = params === null || params === void 0 ? void 0 : params.extractorOptions) === null || _a === void 0 ? void 0 : _a.extractionSchema) {
let schema = params.extractorOptions.extractionSchema;
// Check if schema is an instance of ZodSchema to correctly identify Zod schemas
if (schema instanceof z.ZodSchema) {
schema = zodToJsonSchema(schema);
}
jsonData = Object.assign(Object.assign({}, jsonData), { extractorOptions: Object.assign(Object.assign({}, params.extractorOptions), { extractionSchema: schema, mode: params.extractorOptions.mode || "llm-extraction" }) });
}
try {
const response = yield axios.post('https://api.firecrawl.dev/v0/scrape', jsonData, { headers });
const response = yield axios.post("https://api.firecrawl.dev/v0/scrape", jsonData, { headers });
if (response.status === 200) {
const responseData = response.data;
if (responseData.success) {
return responseData.data;
return responseData;
}
else {
throw new Error(`Failed to scrape URL. Error: ${responseData.error}`);
}
}
else {
this.handleError(response, 'scrape URL');
this.handleError(response, "scrape URL");
}
}
catch (error) {
throw new Error(error.message);
}
return { success: false, error: "Internal server error." };
});
}
/**
* Searches for a query using the Firecrawl API.
* @param {string} query - The query to search for.
* @param {Params | null} params - Additional parameters for the search request.
* @returns {Promise<SearchResponse>} The response from the search operation.
*/
search(query_1) {
return __awaiter(this, arguments, void 0, function* (query, params = null) {
const headers = {
"Content-Type": "application/json",
Authorization: `Bearer ${this.apiKey}`,
};
let jsonData = { query };
if (params) {
jsonData = Object.assign(Object.assign({}, jsonData), params);
}
try {
const response = yield axios.post("https://api.firecrawl.dev/v0/search", jsonData, { headers });
if (response.status === 200) {
const responseData = response.data;
if (responseData.success) {
return responseData;
}
else {
throw new Error(`Failed to search. Error: ${responseData.error}`);
}
}
else {
this.handleError(response, "search");
}
}
catch (error) {
throw new Error(error.message);
}
return { success: false, error: "Internal server error." };
});
}
/**
* Initiates a crawl job for a URL using the Firecrawl API.
* @param {string} url - The URL to crawl.
* @param {Params | null} params - Additional parameters for the crawl request.
* @param {boolean} waitUntilDone - Whether to wait for the crawl job to complete.
* @param {number} timeout - Timeout in seconds for job status checks.
* @returns {Promise<CrawlResponse | any>} The response from the crawl operation.
*/
crawlUrl(url_1) {
return __awaiter(this, arguments, void 0, function* (url, params = null, waitUntilDone = true, timeout = 2) {
const headers = this.prepareHeaders();
@ -55,26 +120,32 @@ export default class FirecrawlApp {
jsonData = Object.assign(Object.assign({}, jsonData), params);
}
try {
const response = yield this.postRequest('https://api.firecrawl.dev/v0/crawl', jsonData, headers);
const response = yield this.postRequest("https://api.firecrawl.dev/v0/crawl", jsonData, headers);
if (response.status === 200) {
const jobId = response.data.jobId;
if (waitUntilDone) {
return this.monitorJobStatus(jobId, headers, timeout);
}
else {
return { jobId };
return { success: true, jobId };
}
}
else {
this.handleError(response, 'start crawl job');
this.handleError(response, "start crawl job");
}
}
catch (error) {
console.log(error);
throw new Error(error.message);
}
return { success: false, error: "Internal server error." };
});
}
/**
* Checks the status of a crawl job using the Firecrawl API.
* @param {string} jobId - The job ID of the crawl operation.
* @returns {Promise<JobStatusResponse>} The response containing the job status.
*/
checkCrawlStatus(jobId) {
return __awaiter(this, void 0, void 0, function* () {
const headers = this.prepareHeaders();
@ -84,59 +155,93 @@ export default class FirecrawlApp {
return response.data;
}
else {
this.handleError(response, 'check crawl status');
this.handleError(response, "check crawl status");
}
}
catch (error) {
throw new Error(error.message);
}
return {
success: false,
status: "unknown",
error: "Internal server error.",
};
});
}
/**
* Prepares the headers for an API request.
* @returns {AxiosRequestHeaders} The prepared headers.
*/
prepareHeaders() {
return {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
"Content-Type": "application/json",
Authorization: `Bearer ${this.apiKey}`,
};
}
/**
* Sends a POST request to the specified URL.
* @param {string} url - The URL to send the request to.
* @param {Params} data - The data to send in the request.
* @param {AxiosRequestHeaders} headers - The headers for the request.
* @returns {Promise<AxiosResponse>} The response from the POST request.
*/
postRequest(url, data, headers) {
return axios.post(url, data, { headers });
}
/**
* Sends a GET request to the specified URL.
* @param {string} url - The URL to send the request to.
* @param {AxiosRequestHeaders} headers - The headers for the request.
* @returns {Promise<AxiosResponse>} The response from the GET request.
*/
getRequest(url, headers) {
return axios.get(url, { headers });
}
/**
* Monitors the status of a crawl job until completion or failure.
* @param {string} jobId - The job ID of the crawl operation.
* @param {AxiosRequestHeaders} headers - The headers for the request.
* @param {number} timeout - Timeout in seconds for job status checks.
* @returns {Promise<any>} The final job status or data.
*/
monitorJobStatus(jobId, headers, timeout) {
return __awaiter(this, void 0, void 0, function* () {
while (true) {
const statusResponse = yield this.getRequest(`https://api.firecrawl.dev/v0/crawl/status/${jobId}`, headers);
if (statusResponse.status === 200) {
const statusData = statusResponse.data;
if (statusData.status === 'completed') {
if ('data' in statusData) {
if (statusData.status === "completed") {
if ("data" in statusData) {
return statusData.data;
}
else {
throw new Error('Crawl job completed but no data was returned');
throw new Error("Crawl job completed but no data was returned");
}
}
else if (['active', 'paused', 'pending', 'queued'].includes(statusData.status)) {
else if (["active", "paused", "pending", "queued"].includes(statusData.status)) {
if (timeout < 2) {
timeout = 2;
}
yield new Promise(resolve => setTimeout(resolve, timeout * 1000)); // Wait for the specified timeout before checking again
yield new Promise((resolve) => setTimeout(resolve, timeout * 1000)); // Wait for the specified timeout before checking again
}
else {
throw new Error(`Crawl job failed or was stopped. Status: ${statusData.status}`);
}
}
else {
this.handleError(statusResponse, 'check crawl status');
this.handleError(statusResponse, "check crawl status");
}
}
});
}
/**
* Handles errors from API responses.
* @param {AxiosResponse} response - The response from the API.
* @param {string} action - The action being performed when the error occurred.
*/
handleError(response, action) {
if ([402, 409, 500].includes(response.status)) {
const errorMessage = response.data.error || 'Unknown error occurred';
if ([402, 408, 409, 500].includes(response.status)) {
const errorMessage = response.data.error || "Unknown error occurred";
throw new Error(`Failed to ${action}. Status code: ${response.status}. Error: ${errorMessage}`);
}
else {

View File

@ -0,0 +1,5 @@
/** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
};

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,15 @@
{
"name": "@mendable/firecrawl-js",
"version": "0.0.9",
"version": "0.0.21",
"description": "JavaScript SDK for Firecrawl API",
"main": "build/index.js",
"types": "types/index.d.ts",
"type": "module",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
"build": "tsc",
"publish": "npm run build && npm publish --access public",
"publish-beta": "npm run build && npm publish --access public --tag beta",
"test": "jest src/**/*.test.ts"
},
"repository": {
"type": "git",
@ -15,16 +19,19 @@
"license": "MIT",
"dependencies": {
"axios": "^1.6.8",
"dotenv": "^16.4.5"
"zod": "^3.23.8",
"zod-to-json-schema": "^3.23.0"
},
"bugs": {
"url": "https://github.com/mendableai/firecrawl/issues"
},
"homepage": "https://github.com/mendableai/firecrawl#readme",
"devDependencies": {
"@jest/globals": "^29.7.0",
"@types/axios": "^0.14.0",
"@types/dotenv": "^8.2.0",
"@types/node": "^20.12.7",
"jest": "^29.7.0",
"ts-jest": "^29.1.2",
"typescript": "^5.4.5"
},
"keywords": [

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,48 @@
import { describe, test, expect, jest } from '@jest/globals';
import axios from 'axios';
import FirecrawlApp from '../index';
import { readFile } from 'fs/promises';
import { join } from 'path';
// Mock jest and set the type
jest.mock('axios');
const mockedAxios = axios as jest.Mocked<typeof axios>;
// Get the fixure data from the JSON file in ./fixtures
async function loadFixture(name: string): Promise<string> {
return await readFile(join(__dirname, 'fixtures', `${name}.json`), 'utf-8')
}
describe('the firecrawl JS SDK', () => {
test('Should require an API key to instantiate FirecrawlApp', async () => {
const fn = () => {
new FirecrawlApp({ apiKey: undefined });
};
expect(fn).toThrow('No API key provided');
});
test('Should return scraped data from a /scrape API call', async () => {
const mockData = await loadFixture('scrape');
mockedAxios.post.mockResolvedValue({
status: 200,
data: JSON.parse(mockData),
});
const apiKey = 'YOUR_API_KEY'
const app = new FirecrawlApp({ apiKey });
// Scrape a single URL
const url = 'https://mendable.ai';
const scrapedData = await app.scrapeUrl(url);
expect(mockedAxios.post).toHaveBeenCalledTimes(1);
expect(mockedAxios.post).toHaveBeenCalledWith(
expect.stringMatching(/^https:\/\/api.firecrawl.dev/),
expect.objectContaining({ url }),
expect.objectContaining({ headers: expect.objectContaining({'Authorization': `Bearer ${apiKey}`}) }),
)
expect(scrapedData.success).toBe(true);
expect(scrapedData.data.metadata.title).toEqual('Mendable');
});
})

View File

@ -1,135 +1,339 @@
import axios, { AxiosResponse, AxiosRequestHeaders } from 'axios';
import dotenv from 'dotenv';
dotenv.config();
interface FirecrawlAppConfig {
import axios, { AxiosResponse, AxiosRequestHeaders } from "axios";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
/**
* Configuration interface for FirecrawlApp.
*/
export interface FirecrawlAppConfig {
apiKey?: string | null;
}
interface Params {
/**
* Generic parameter interface.
*/
export interface Params {
[key: string]: any;
extractorOptions?: {
extractionSchema: z.ZodSchema | any;
mode?: "llm-extraction";
extractionPrompt?: string;
};
}
/**
* Response interface for scraping operations.
*/
export interface ScrapeResponse {
success: boolean;
data?: any;
error?: string;
}
/**
* Response interface for searching operations.
*/
export interface SearchResponse {
success: boolean;
data?: any;
error?: string;
}
/**
* Response interface for crawling operations.
*/
export interface CrawlResponse {
success: boolean;
jobId?: string;
data?: any;
error?: string;
}
/**
* Response interface for job status checks.
*/
export interface JobStatusResponse {
success: boolean;
status: string;
jobId?: string;
data?: any;
error?: string;
}
/**
* Main class for interacting with the Firecrawl API.
*/
export default class FirecrawlApp {
private apiKey: string;
/**
* Initializes a new instance of the FirecrawlApp class.
* @param {FirecrawlAppConfig} config - Configuration options for the FirecrawlApp instance.
*/
constructor({ apiKey = null }: FirecrawlAppConfig) {
this.apiKey = apiKey || process.env.FIRECRAWL_API_KEY || '';
this.apiKey = apiKey || "";
if (!this.apiKey) {
throw new Error('No API key provided');
throw new Error("No API key provided");
}
}
async scrapeUrl(url: string, params: Params | null = null): Promise<any> {
/**
* Scrapes a URL using the Firecrawl API.
* @param {string} url - The URL to scrape.
* @param {Params | null} params - Additional parameters for the scrape request.
* @returns {Promise<ScrapeResponse>} The response from the scrape operation.
*/
async scrapeUrl(
url: string,
params: Params | null = null
): Promise<ScrapeResponse> {
const headers: AxiosRequestHeaders = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
"Content-Type": "application/json",
Authorization: `Bearer ${this.apiKey}`,
} as AxiosRequestHeaders;
let jsonData: Params = { url };
if (params) {
jsonData = { ...jsonData, ...params };
let jsonData: Params = { url, ...params };
if (params?.extractorOptions?.extractionSchema) {
let schema = params.extractorOptions.extractionSchema;
// Check if schema is an instance of ZodSchema to correctly identify Zod schemas
if (schema instanceof z.ZodSchema) {
schema = zodToJsonSchema(schema);
}
jsonData = {
...jsonData,
extractorOptions: {
...params.extractorOptions,
extractionSchema: schema,
mode: params.extractorOptions.mode || "llm-extraction",
},
};
}
try {
const response: AxiosResponse = await axios.post('https://api.firecrawl.dev/v0/scrape', jsonData, { headers });
const response: AxiosResponse = await axios.post(
"https://api.firecrawl.dev/v0/scrape",
jsonData,
{ headers },
);
if (response.status === 200) {
const responseData = response.data;
if (responseData.success) {
return responseData.data;
return responseData;
} else {
throw new Error(`Failed to scrape URL. Error: ${responseData.error}`);
}
} else {
this.handleError(response, 'scrape URL');
this.handleError(response, "scrape URL");
}
} catch (error: any) {
throw new Error(error.message);
}
return { success: false, error: "Internal server error." };
}
async crawlUrl(url: string, params: Params | null = null, waitUntilDone: boolean = true, timeout: number = 2): Promise<any> {
/**
* Searches for a query using the Firecrawl API.
* @param {string} query - The query to search for.
* @param {Params | null} params - Additional parameters for the search request.
* @returns {Promise<SearchResponse>} The response from the search operation.
*/
async search(
query: string,
params: Params | null = null
): Promise<SearchResponse> {
const headers: AxiosRequestHeaders = {
"Content-Type": "application/json",
Authorization: `Bearer ${this.apiKey}`,
} as AxiosRequestHeaders;
let jsonData: Params = { query };
if (params) {
jsonData = { ...jsonData, ...params };
}
try {
const response: AxiosResponse = await axios.post(
"https://api.firecrawl.dev/v0/search",
jsonData,
{ headers }
);
if (response.status === 200) {
const responseData = response.data;
if (responseData.success) {
return responseData;
} else {
throw new Error(`Failed to search. Error: ${responseData.error}`);
}
} else {
this.handleError(response, "search");
}
} catch (error: any) {
throw new Error(error.message);
}
return { success: false, error: "Internal server error." };
}
/**
* Initiates a crawl job for a URL using the Firecrawl API.
* @param {string} url - The URL to crawl.
* @param {Params | null} params - Additional parameters for the crawl request.
* @param {boolean} waitUntilDone - Whether to wait for the crawl job to complete.
* @param {number} timeout - Timeout in seconds for job status checks.
* @returns {Promise<CrawlResponse | any>} The response from the crawl operation.
*/
async crawlUrl(
url: string,
params: Params | null = null,
waitUntilDone: boolean = true,
timeout: number = 2
): Promise<CrawlResponse | any> {
const headers = this.prepareHeaders();
let jsonData: Params = { url };
if (params) {
jsonData = { ...jsonData, ...params };
}
try {
const response: AxiosResponse = await this.postRequest('https://api.firecrawl.dev/v0/crawl', jsonData, headers);
const response: AxiosResponse = await this.postRequest(
"https://api.firecrawl.dev/v0/crawl",
jsonData,
headers
);
if (response.status === 200) {
const jobId: string = response.data.jobId;
if (waitUntilDone) {
return this.monitorJobStatus(jobId, headers, timeout);
} else {
return { jobId };
return { success: true, jobId };
}
} else {
this.handleError(response, 'start crawl job');
this.handleError(response, "start crawl job");
}
} catch (error: any) {
console.log(error)
console.log(error);
throw new Error(error.message);
}
return { success: false, error: "Internal server error." };
}
async checkCrawlStatus(jobId: string): Promise<any> {
/**
* Checks the status of a crawl job using the Firecrawl API.
* @param {string} jobId - The job ID of the crawl operation.
* @returns {Promise<JobStatusResponse>} The response containing the job status.
*/
async checkCrawlStatus(jobId: string): Promise<JobStatusResponse> {
const headers: AxiosRequestHeaders = this.prepareHeaders();
try {
const response: AxiosResponse = await this.getRequest(`https://api.firecrawl.dev/v0/crawl/status/${jobId}`, headers);
const response: AxiosResponse = await this.getRequest(
`https://api.firecrawl.dev/v0/crawl/status/${jobId}`,
headers
);
if (response.status === 200) {
return response.data;
} else {
this.handleError(response, 'check crawl status');
this.handleError(response, "check crawl status");
}
} catch (error: any) {
throw new Error(error.message);
}
return {
success: false,
status: "unknown",
error: "Internal server error.",
};
}
/**
* Prepares the headers for an API request.
* @returns {AxiosRequestHeaders} The prepared headers.
*/
prepareHeaders(): AxiosRequestHeaders {
return {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
"Content-Type": "application/json",
Authorization: `Bearer ${this.apiKey}`,
} as AxiosRequestHeaders;
}
postRequest(url: string, data: Params, headers: AxiosRequestHeaders): Promise<AxiosResponse> {
/**
* Sends a POST request to the specified URL.
* @param {string} url - The URL to send the request to.
* @param {Params} data - The data to send in the request.
* @param {AxiosRequestHeaders} headers - The headers for the request.
* @returns {Promise<AxiosResponse>} The response from the POST request.
*/
postRequest(
url: string,
data: Params,
headers: AxiosRequestHeaders
): Promise<AxiosResponse> {
return axios.post(url, data, { headers });
}
getRequest(url: string, headers: AxiosRequestHeaders): Promise<AxiosResponse> {
/**
* Sends a GET request to the specified URL.
* @param {string} url - The URL to send the request to.
* @param {AxiosRequestHeaders} headers - The headers for the request.
* @returns {Promise<AxiosResponse>} The response from the GET request.
*/
getRequest(
url: string,
headers: AxiosRequestHeaders
): Promise<AxiosResponse> {
return axios.get(url, { headers });
}
async monitorJobStatus(jobId: string, headers: AxiosRequestHeaders, timeout: number): Promise<any> {
/**
* Monitors the status of a crawl job until completion or failure.
* @param {string} jobId - The job ID of the crawl operation.
* @param {AxiosRequestHeaders} headers - The headers for the request.
* @param {number} timeout - Timeout in seconds for job status checks.
* @returns {Promise<any>} The final job status or data.
*/
async monitorJobStatus(
jobId: string,
headers: AxiosRequestHeaders,
timeout: number
): Promise<any> {
while (true) {
const statusResponse: AxiosResponse = await this.getRequest(`https://api.firecrawl.dev/v0/crawl/status/${jobId}`, headers);
const statusResponse: AxiosResponse = await this.getRequest(
`https://api.firecrawl.dev/v0/crawl/status/${jobId}`,
headers
);
if (statusResponse.status === 200) {
const statusData = statusResponse.data;
if (statusData.status === 'completed') {
if ('data' in statusData) {
if (statusData.status === "completed") {
if ("data" in statusData) {
return statusData.data;
} else {
throw new Error('Crawl job completed but no data was returned');
throw new Error("Crawl job completed but no data was returned");
}
} else if (['active', 'paused', 'pending', 'queued'].includes(statusData.status)) {
} else if (
["active", "paused", "pending", "queued"].includes(statusData.status)
) {
if (timeout < 2) {
timeout = 2;
}
await new Promise(resolve => setTimeout(resolve, timeout * 1000)); // Wait for the specified timeout before checking again
await new Promise((resolve) => setTimeout(resolve, timeout * 1000)); // Wait for the specified timeout before checking again
} else {
throw new Error(`Crawl job failed or was stopped. Status: ${statusData.status}`);
throw new Error(
`Crawl job failed or was stopped. Status: ${statusData.status}`
);
}
} else {
this.handleError(statusResponse, 'check crawl status');
this.handleError(statusResponse, "check crawl status");
}
}
}
/**
* Handles errors from API responses.
* @param {AxiosResponse} response - The response from the API.
* @param {string} action - The action being performed when the error occurred.
*/
handleError(response: AxiosResponse, action: string): void {
if ([402, 409, 500].includes(response.status)) {
const errorMessage: string = response.data.error || 'Unknown error occurred';
throw new Error(`Failed to ${action}. Status code: ${response.status}. Error: ${errorMessage}`);
if ([402, 408, 409, 500].includes(response.status)) {
const errorMessage: string =
response.data.error || "Unknown error occurred";
throw new Error(
`Failed to ${action}. Status code: ${response.status}. Error: ${errorMessage}`
);
} else {
throw new Error(`Unexpected error occurred while trying to ${action}. Status code: ${response.status}`);
throw new Error(
`Unexpected error occurred while trying to ${action}. Status code: ${response.status}`
);
}
}
}
}

View File

@ -49,7 +49,7 @@
// "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */
/* Emit */
// "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
"declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
// "declarationMap": true, /* Create sourcemaps for d.ts files. */
// "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
// "sourceMap": true, /* Create source map files for emitted JavaScript files. */
@ -70,7 +70,7 @@
// "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */
// "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
// "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */
// "declarationDir": "./", /* Specify the output directory for generated declaration files. */
"declarationDir": "./types", /* Specify the output directory for generated declaration files. */
// "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */
/* Interop Constraints */
@ -105,5 +105,7 @@
/* Completeness */
// "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
"skipLibCheck": true /* Skip type checking all .d.ts files. */
}
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "**/__tests__/*"]
}

128
apps/js-sdk/firecrawl/types/index.d.ts vendored Normal file
View File

@ -0,0 +1,128 @@
import { AxiosResponse, AxiosRequestHeaders } from "axios";
import { z } from "zod";
/**
* Configuration interface for FirecrawlApp.
*/
export interface FirecrawlAppConfig {
apiKey?: string | null;
}
/**
* Generic parameter interface.
*/
export interface Params {
[key: string]: any;
extractorOptions?: {
extractionSchema: z.ZodSchema | any;
mode?: "llm-extraction";
extractionPrompt?: string;
};
}
/**
* Response interface for scraping operations.
*/
export interface ScrapeResponse {
success: boolean;
data?: any;
error?: string;
}
/**
* Response interface for searching operations.
*/
export interface SearchResponse {
success: boolean;
data?: any;
error?: string;
}
/**
* Response interface for crawling operations.
*/
export interface CrawlResponse {
success: boolean;
jobId?: string;
data?: any;
error?: string;
}
/**
* Response interface for job status checks.
*/
export interface JobStatusResponse {
success: boolean;
status: string;
jobId?: string;
data?: any;
error?: string;
}
/**
* Main class for interacting with the Firecrawl API.
*/
export default class FirecrawlApp {
private apiKey;
/**
* Initializes a new instance of the FirecrawlApp class.
* @param {FirecrawlAppConfig} config - Configuration options for the FirecrawlApp instance.
*/
constructor({ apiKey }: FirecrawlAppConfig);
/**
* Scrapes a URL using the Firecrawl API.
* @param {string} url - The URL to scrape.
* @param {Params | null} params - Additional parameters for the scrape request.
* @returns {Promise<ScrapeResponse>} The response from the scrape operation.
*/
scrapeUrl(url: string, params?: Params | null): Promise<ScrapeResponse>;
/**
* Searches for a query using the Firecrawl API.
* @param {string} query - The query to search for.
* @param {Params | null} params - Additional parameters for the search request.
* @returns {Promise<SearchResponse>} The response from the search operation.
*/
search(query: string, params?: Params | null): Promise<SearchResponse>;
/**
* Initiates a crawl job for a URL using the Firecrawl API.
* @param {string} url - The URL to crawl.
* @param {Params | null} params - Additional parameters for the crawl request.
* @param {boolean} waitUntilDone - Whether to wait for the crawl job to complete.
* @param {number} timeout - Timeout in seconds for job status checks.
* @returns {Promise<CrawlResponse | any>} The response from the crawl operation.
*/
crawlUrl(url: string, params?: Params | null, waitUntilDone?: boolean, timeout?: number): Promise<CrawlResponse | any>;
/**
* Checks the status of a crawl job using the Firecrawl API.
* @param {string} jobId - The job ID of the crawl operation.
* @returns {Promise<JobStatusResponse>} The response containing the job status.
*/
checkCrawlStatus(jobId: string): Promise<JobStatusResponse>;
/**
* Prepares the headers for an API request.
* @returns {AxiosRequestHeaders} The prepared headers.
*/
prepareHeaders(): AxiosRequestHeaders;
/**
* Sends a POST request to the specified URL.
* @param {string} url - The URL to send the request to.
* @param {Params} data - The data to send in the request.
* @param {AxiosRequestHeaders} headers - The headers for the request.
* @returns {Promise<AxiosResponse>} The response from the POST request.
*/
postRequest(url: string, data: Params, headers: AxiosRequestHeaders): Promise<AxiosResponse>;
/**
* Sends a GET request to the specified URL.
* @param {string} url - The URL to send the request to.
* @param {AxiosRequestHeaders} headers - The headers for the request.
* @returns {Promise<AxiosResponse>} The response from the GET request.
*/
getRequest(url: string, headers: AxiosRequestHeaders): Promise<AxiosResponse>;
/**
* Monitors the status of a crawl job until completion or failure.
* @param {string} jobId - The job ID of the crawl operation.
* @param {AxiosRequestHeaders} headers - The headers for the request.
* @param {number} timeout - Timeout in seconds for job status checks.
* @returns {Promise<any>} The final job status or data.
*/
monitorJobStatus(jobId: string, headers: AxiosRequestHeaders, timeout: number): Promise<any>;
/**
* Handles errors from API responses.
* @param {AxiosResponse} response - The response from the API.
* @param {string} action - The action being performed when the error occurred.
*/
handleError(response: AxiosResponse, action: string): void;
}

View File

@ -9,19 +9,480 @@
"version": "1.0.0",
"license": "ISC",
"dependencies": {
"@mendable/firecrawl-js": "^0.0.8",
"axios": "^1.6.8"
"@mendable/firecrawl-js": "^0.0.19",
"axios": "^1.6.8",
"ts-node": "^10.9.2",
"typescript": "^5.4.5",
"zod": "^3.23.8"
},
"devDependencies": {
"tsx": "^4.9.3"
}
},
"node_modules/@cspotcode/source-map-support": {
"version": "0.8.1",
"resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz",
"integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==",
"dependencies": {
"@jridgewell/trace-mapping": "0.3.9"
},
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/aix-ppc64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz",
"integrity": "sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==",
"cpu": [
"ppc64"
],
"dev": true,
"optional": true,
"os": [
"aix"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/android-arm": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.20.2.tgz",
"integrity": "sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==",
"cpu": [
"arm"
],
"dev": true,
"optional": true,
"os": [
"android"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/android-arm64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz",
"integrity": "sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"android"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/android-x64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.20.2.tgz",
"integrity": "sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"android"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/darwin-arm64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz",
"integrity": "sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/darwin-x64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz",
"integrity": "sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/freebsd-arm64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz",
"integrity": "sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"freebsd"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/freebsd-x64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz",
"integrity": "sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"freebsd"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-arm": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz",
"integrity": "sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==",
"cpu": [
"arm"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-arm64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz",
"integrity": "sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-ia32": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz",
"integrity": "sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==",
"cpu": [
"ia32"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-loong64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz",
"integrity": "sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==",
"cpu": [
"loong64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-mips64el": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz",
"integrity": "sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==",
"cpu": [
"mips64el"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-ppc64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz",
"integrity": "sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==",
"cpu": [
"ppc64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-riscv64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz",
"integrity": "sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==",
"cpu": [
"riscv64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-s390x": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz",
"integrity": "sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==",
"cpu": [
"s390x"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/linux-x64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz",
"integrity": "sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/netbsd-x64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz",
"integrity": "sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"netbsd"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/openbsd-x64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz",
"integrity": "sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"openbsd"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/sunos-x64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz",
"integrity": "sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"sunos"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/win32-arm64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz",
"integrity": "sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==",
"cpu": [
"arm64"
],
"dev": true,
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/win32-ia32": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz",
"integrity": "sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==",
"cpu": [
"ia32"
],
"dev": true,
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@esbuild/win32-x64": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz",
"integrity": "sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=12"
}
},
"node_modules/@jridgewell/resolve-uri": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
"integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/sourcemap-codec": {
"version": "1.4.15",
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
"integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg=="
},
"node_modules/@jridgewell/trace-mapping": {
"version": "0.3.9",
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz",
"integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==",
"dependencies": {
"@jridgewell/resolve-uri": "^3.0.3",
"@jridgewell/sourcemap-codec": "^1.4.10"
}
},
"node_modules/@mendable/firecrawl-js": {
"version": "0.0.8",
"resolved": "https://registry.npmjs.org/@mendable/firecrawl-js/-/firecrawl-js-0.0.8.tgz",
"integrity": "sha512-dD7eA5X6UT8CM3z7qCqHgA4YbCsdwmmlaT/L0/ozM6gGvb0PnJMoB+e51+n4lAW8mxXOvHGbq9nrgBT1wEhhhw==",
"version": "0.0.19",
"resolved": "https://registry.npmjs.org/@mendable/firecrawl-js/-/firecrawl-js-0.0.19.tgz",
"integrity": "sha512-u9BDVIN/bftDztxLlE2cf02Nz0si3+Vmy9cANDFHj/iriT3guzI8ITBk4uC81CyRmPzNyXrW6hSAG90g9ol4cA==",
"dependencies": {
"axios": "^1.6.8",
"dotenv": "^16.4.5"
"zod": "^3.23.8",
"zod-to-json-schema": "^3.23.0"
}
},
"node_modules/@tsconfig/node10": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz",
"integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw=="
},
"node_modules/@tsconfig/node12": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz",
"integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag=="
},
"node_modules/@tsconfig/node14": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz",
"integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow=="
},
"node_modules/@tsconfig/node16": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz",
"integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA=="
},
"node_modules/@types/node": {
"version": "20.12.11",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.11.tgz",
"integrity": "sha512-vDg9PZ/zi+Nqp6boSOT7plNuthRugEKixDv5sFTIpkE89MmNtEArAShI4mxuX2+UrLEe9pxC1vm2cjm9YlWbJw==",
"peer": true,
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/acorn": {
"version": "8.11.3",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
"integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==",
"bin": {
"acorn": "bin/acorn"
},
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/acorn-walk": {
"version": "8.3.2",
"resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.2.tgz",
"integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==",
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/arg": {
"version": "4.1.3",
"resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz",
"integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA=="
},
"node_modules/asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
@ -48,6 +509,11 @@
"node": ">= 0.8"
}
},
"node_modules/create-require": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz",
"integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ=="
},
"node_modules/delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
@ -56,15 +522,50 @@
"node": ">=0.4.0"
}
},
"node_modules/dotenv": {
"version": "16.4.5",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz",
"integrity": "sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==",
"node_modules/diff": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz",
"integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==",
"engines": {
"node": ">=0.3.1"
}
},
"node_modules/esbuild": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz",
"integrity": "sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==",
"dev": true,
"hasInstallScript": true,
"bin": {
"esbuild": "bin/esbuild"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://dotenvx.com"
"optionalDependencies": {
"@esbuild/aix-ppc64": "0.20.2",
"@esbuild/android-arm": "0.20.2",
"@esbuild/android-arm64": "0.20.2",
"@esbuild/android-x64": "0.20.2",
"@esbuild/darwin-arm64": "0.20.2",
"@esbuild/darwin-x64": "0.20.2",
"@esbuild/freebsd-arm64": "0.20.2",
"@esbuild/freebsd-x64": "0.20.2",
"@esbuild/linux-arm": "0.20.2",
"@esbuild/linux-arm64": "0.20.2",
"@esbuild/linux-ia32": "0.20.2",
"@esbuild/linux-loong64": "0.20.2",
"@esbuild/linux-mips64el": "0.20.2",
"@esbuild/linux-ppc64": "0.20.2",
"@esbuild/linux-riscv64": "0.20.2",
"@esbuild/linux-s390x": "0.20.2",
"@esbuild/linux-x64": "0.20.2",
"@esbuild/netbsd-x64": "0.20.2",
"@esbuild/openbsd-x64": "0.20.2",
"@esbuild/sunos-x64": "0.20.2",
"@esbuild/win32-arm64": "0.20.2",
"@esbuild/win32-ia32": "0.20.2",
"@esbuild/win32-x64": "0.20.2"
}
},
"node_modules/follow-redirects": {
@ -99,6 +600,37 @@
"node": ">= 6"
}
},
"node_modules/fsevents": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
"integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
"dev": true,
"hasInstallScript": true,
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
"node_modules/get-tsconfig": {
"version": "4.7.4",
"resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.4.tgz",
"integrity": "sha512-ofbkKj+0pjXjhejr007J/fLf+sW+8H7K5GCm+msC8q3IpvgjobpyPqSRFemNyIMxklC0zeJpi7VDFna19FacvQ==",
"dev": true,
"dependencies": {
"resolve-pkg-maps": "^1.0.0"
},
"funding": {
"url": "https://github.com/privatenumber/get-tsconfig?sponsor=1"
}
},
"node_modules/make-error": {
"version": "1.3.6",
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz",
"integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw=="
},
"node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
@ -122,6 +654,123 @@
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
},
"node_modules/resolve-pkg-maps": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz",
"integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==",
"dev": true,
"funding": {
"url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1"
}
},
"node_modules/ts-node": {
"version": "10.9.2",
"resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz",
"integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==",
"dependencies": {
"@cspotcode/source-map-support": "^0.8.0",
"@tsconfig/node10": "^1.0.7",
"@tsconfig/node12": "^1.0.7",
"@tsconfig/node14": "^1.0.0",
"@tsconfig/node16": "^1.0.2",
"acorn": "^8.4.1",
"acorn-walk": "^8.1.1",
"arg": "^4.1.0",
"create-require": "^1.1.0",
"diff": "^4.0.1",
"make-error": "^1.1.1",
"v8-compile-cache-lib": "^3.0.1",
"yn": "3.1.1"
},
"bin": {
"ts-node": "dist/bin.js",
"ts-node-cwd": "dist/bin-cwd.js",
"ts-node-esm": "dist/bin-esm.js",
"ts-node-script": "dist/bin-script.js",
"ts-node-transpile-only": "dist/bin-transpile.js",
"ts-script": "dist/bin-script-deprecated.js"
},
"peerDependencies": {
"@swc/core": ">=1.2.50",
"@swc/wasm": ">=1.2.50",
"@types/node": "*",
"typescript": ">=2.7"
},
"peerDependenciesMeta": {
"@swc/core": {
"optional": true
},
"@swc/wasm": {
"optional": true
}
}
},
"node_modules/tsx": {
"version": "4.9.3",
"resolved": "https://registry.npmjs.org/tsx/-/tsx-4.9.3.tgz",
"integrity": "sha512-czVbetlILiyJZI5zGlj2kw9vFiSeyra9liPD4nG+Thh4pKTi0AmMEQ8zdV/L2xbIVKrIqif4sUNrsMAOksx9Zg==",
"dev": true,
"dependencies": {
"esbuild": "~0.20.2",
"get-tsconfig": "^4.7.3"
},
"bin": {
"tsx": "dist/cli.mjs"
},
"engines": {
"node": ">=18.0.0"
},
"optionalDependencies": {
"fsevents": "~2.3.3"
}
},
"node_modules/typescript": {
"version": "5.4.5",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz",
"integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==",
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
},
"engines": {
"node": ">=14.17"
}
},
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"peer": true
},
"node_modules/v8-compile-cache-lib": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz",
"integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg=="
},
"node_modules/yn": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz",
"integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==",
"engines": {
"node": ">=6"
}
},
"node_modules/zod": {
"version": "3.23.8",
"resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz",
"integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/zod-to-json-schema": {
"version": "3.23.0",
"resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.23.0.tgz",
"integrity": "sha512-az0uJ243PxsRIa2x1WmNE/pnuA05gUq/JB8Lwe1EDCCL/Fz9MgjYQ0fPlyc2Tcv6aF2ZA7WM5TWaRZVEFaAIag==",
"peerDependencies": {
"zod": "^3.23.3"
}
}
}
}

View File

@ -11,7 +11,13 @@
"author": "",
"license": "ISC",
"dependencies": {
"@mendable/firecrawl-js": "^0.0.8",
"axios": "^1.6.8"
"@mendable/firecrawl-js": "^0.0.19",
"axios": "^1.6.8",
"ts-node": "^10.9.2",
"typescript": "^5.4.5",
"zod": "^3.23.8"
},
"devDependencies": {
"tsx": "^4.9.3"
}
}

28
apps/js-sdk/test.ts Normal file
View File

@ -0,0 +1,28 @@
import FirecrawlApp from "@mendable/firecrawl-js";
import { z } from "zod";
async function a() {
const app = new FirecrawlApp({
apiKey: "fc-YOUR_API_KEY",
});
// Define schema to extract contents into
const schema = z.object({
top: z
.array(
z.object({
title: z.string(),
points: z.number(),
by: z.string(),
commentsURL: z.string(),
})
)
.length(5)
.describe("Top 5 stories on Hacker News"),
});
const scrapeResult = await app.scrapeUrl("https://firecrawl.dev", {
extractorOptions: { extractionSchema: schema },
});
console.log(scrapeResult.data["llm_extraction"]);
}
a();

72
apps/js-sdk/tsconfig.json Normal file
View File

@ -0,0 +1,72 @@
{
"compilerOptions": {
/* Visit https://aka.ms/tsconfig.json to read more about this file */
/* Basic Options */
// "incremental": true, /* Enable incremental compilation */
"target": "es6" /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */,
"module": "commonjs" /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */,
// "lib": [], /* Specify library files to be included in the compilation. */
// "allowJs": true, /* Allow javascript files to be compiled. */
// "checkJs": true, /* Report errors in .js files. */
// "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */
"declaration": true /* Generates corresponding '.d.ts' file. */,
// "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */
// "sourceMap": true, /* Generates corresponding '.map' file. */
// "outFile": "./", /* Concatenate and emit output to single file. */
"outDir": "./build" /* Redirect output structure to the directory. */,
// "rootDir": "./", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */
// "composite": true, /* Enable project compilation */
// "tsBuildInfoFile": "./", /* Specify file to store incremental compilation information */
// "removeComments": true, /* Do not emit comments to output. */
// "noEmit": true, /* Do not emit outputs. */
// "importHelpers": true, /* Import emit helpers from 'tslib'. */
// "downlevelIteration": true, /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */
// "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */
/* Strict Type-Checking Options */
"strict": false /* Enable all strict type-checking options. */,
// "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */
// "strictNullChecks": true, /* Enable strict null checks. */
// "strictFunctionTypes": true, /* Enable strict checking of function types. */
// "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */
// "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */
// "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */
// "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */
/* Additional Checks */
// "noUnusedLocals": true, /* Report errors on unused locals. */
// "noUnusedParameters": true, /* Report errors on unused parameters. */
// "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */
// "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */
/* Module Resolution Options */
// "moduleResolution": "node", /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */
// "baseUrl": "./", /* Base directory to resolve non-absolute module names. */
// "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */
// "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */
// "typeRoots": [], /* List of folders to include type definitions from. */
// "types": [], /* Type declaration files to be included in compilation. */
// "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */
"resolveJsonModule": true,
"esModuleInterop": true /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */,
// "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
/* Source Map Options */
// "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
// "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */
// "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */
/* Experimental Options */
// "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */
// "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */
/* Advanced Options */
"skipLibCheck": true /* Skip type checking of declaration files. */,
"forceConsistentCasingInFileNames": true /* Disallow inconsistently-cased references to the same file. */
},
"include": ["src", "test.ts"],
"exclude": ["node_modules", "**/__tests__/*"]
}

View File

@ -1,28 +1,38 @@
from fastapi import FastAPI, Response
from playwright.async_api import async_playwright
import os
from fastapi import FastAPI
from playwright.async_api import async_playwright, Browser
from fastapi.responses import JSONResponse
from pydantic import BaseModel
app = FastAPI()
from pydantic import BaseModel
app = FastAPI()
class UrlModel(BaseModel):
url: str
wait: int = None
@app.post("/html") # Kept as POST to accept body parameters
async def root(body: UrlModel): # Using Pydantic model for request body
async with async_playwright() as p:
browser = await p.chromium.launch()
context = await browser.new_context()
page = await context.new_page()
browser: Browser = None
await page.goto(body.url) # Adjusted to use the url from the request body model
page_content = await page.content() # Get the HTML content of the page
await browser.close()
@app.on_event("startup")
async def startup_event():
global browser
playwright = await async_playwright().start()
browser = await playwright.chromium.launch()
json_compatible_item_data = {"content": page_content}
return JSONResponse(content=json_compatible_item_data)
@app.on_event("shutdown")
async def shutdown_event():
await browser.close()
@app.post("/html")
async def root(body: UrlModel):
context = await browser.new_context()
page = await context.new_page()
await page.goto(body.url, timeout=15000) # Set max timeout to 15s
if body.wait: # Check if wait parameter is provided in the request body
await page.wait_for_timeout(body.wait) # Convert seconds to milliseconds for playwright
page_content = await page.content()
await context.close()
json_compatible_item_data = {"content": page_content}
return JSONResponse(content=json_compatible_item_data)

View File

@ -46,6 +46,40 @@ To scrape a single URL, use the `scrape_url` method. It takes the URL as a param
url = 'https://example.com'
scraped_data = app.scrape_url(url)
```
### Extracting structured data from a URL
With LLM extraction, you can easily extract structured data from any URL. We support pydantic schemas to make it easier for you too. Here is how you to use it:
```python
class ArticleSchema(BaseModel):
title: str
points: int
by: str
commentsURL: str
class TopArticlesSchema(BaseModel):
top: List[ArticleSchema] = Field(..., max_items=5, description="Top 5 stories")
data = app.scrape_url('https://news.ycombinator.com', {
'extractorOptions': {
'extractionSchema': TopArticlesSchema.model_json_schema(),
'mode': 'llm-extraction'
},
'pageOptions':{
'onlyMainContent': True
}
})
print(data["llm_extraction"])
```
### Search for a query
Used to search the web, get the most relevant results, scrap each page and return the markdown.
```python
query = 'what is mendable?'
search_result = app.search(query)
```
### Crawling a Website

View File

@ -1,5 +1,7 @@
import os
from typing import Any, Dict, Optional
import requests
import time
class FirecrawlApp:
def __init__(self, api_key=None):
@ -7,16 +9,61 @@ class FirecrawlApp:
if self.api_key is None:
raise ValueError('No API key provided')
def scrape_url(self, url, params=None):
def scrape_url(self, url: str, params: Optional[Dict[str, Any]] = None) -> Any:
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}'
}
json_data = {'url': url}
# Prepare the base scrape parameters with the URL
scrape_params = {'url': url}
# If there are additional params, process them
if params:
# Initialize extractorOptions if present
extractor_options = params.get('extractorOptions', {})
# Check and convert the extractionSchema if it's a Pydantic model
if 'extractionSchema' in extractor_options:
if hasattr(extractor_options['extractionSchema'], 'schema'):
extractor_options['extractionSchema'] = extractor_options['extractionSchema'].schema()
# Ensure 'mode' is set, defaulting to 'llm-extraction' if not explicitly provided
extractor_options['mode'] = extractor_options.get('mode', 'llm-extraction')
# Update the scrape_params with the processed extractorOptions
scrape_params['extractorOptions'] = extractor_options
# Include any other params directly at the top level of scrape_params
for key, value in params.items():
if key != 'extractorOptions':
scrape_params[key] = value
# Make the POST request with the prepared headers and JSON data
response = requests.post(
'https://api.firecrawl.dev/v0/scrape',
headers=headers,
json=scrape_params
)
if response.status_code == 200:
response = response.json()
if response['success']:
return response['data']
else:
raise Exception(f'Failed to scrape URL. Error: {response["error"]}')
elif response.status_code in [402, 409, 500]:
error_message = response.json().get('error', 'Unknown error occurred')
raise Exception(f'Failed to scrape URL. Status code: {response.status_code}. Error: {error_message}')
else:
raise Exception(f'Failed to scrape URL. Status code: {response.status_code}')
def search(self, query, params=None):
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}'
}
json_data = {'query': query}
if params:
json_data.update(params)
response = requests.post(
'https://api.firecrawl.dev/v0/scrape',
'https://api.firecrawl.dev/v0/search',
headers=headers,
json=json_data
)
@ -25,13 +72,13 @@ class FirecrawlApp:
if response['success'] == True:
return response['data']
else:
raise Exception(f'Failed to scrape URL. Error: {response["error"]}')
raise Exception(f'Failed to search. Error: {response["error"]}')
elif response.status_code in [402, 409, 500]:
error_message = response.json().get('error', 'Unknown error occurred')
raise Exception(f'Failed to scrape URL. Status code: {response.status_code}. Error: {error_message}')
raise Exception(f'Failed to search. Status code: {response.status_code}. Error: {error_message}')
else:
raise Exception(f'Failed to scrape URL. Status code: {response.status_code}')
raise Exception(f'Failed to search. Status code: {response.status_code}')
def crawl_url(self, url, params=None, wait_until_done=True, timeout=2):
headers = self._prepare_headers()
@ -62,11 +109,23 @@ class FirecrawlApp:
'Authorization': f'Bearer {self.api_key}'
}
def _post_request(self, url, data, headers):
return requests.post(url, headers=headers, json=data)
def _post_request(self, url, data, headers, retries=3, backoff_factor=0.5):
for attempt in range(retries):
response = requests.post(url, headers=headers, json=data)
if response.status_code == 502:
time.sleep(backoff_factor * (2 ** attempt))
else:
return response
return response
def _get_request(self, url, headers):
return requests.get(url, headers=headers)
def _get_request(self, url, headers, retries=3, backoff_factor=0.5):
for attempt in range(retries):
response = requests.get(url, headers=headers)
if response.status_code == 502:
time.sleep(backoff_factor * (2 ** attempt))
else:
return response
return response
def _monitor_job_status(self, job_id, headers, timeout):
import time

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,13 +1,73 @@
from firecrawl import FirecrawlApp
app = FirecrawlApp(api_key="fc-YOUR_API_KEY")
app = FirecrawlApp(api_key="YOUR_API_KEY")
# Scrape a website:
scrape_result = app.scrape_url('firecrawl.dev')
print(scrape_result['markdown'])
# Crawl a website:
crawl_result = app.crawl_url('mendable.ai', {'crawlerOptions': {'excludes': ['blog/*']}})
print(crawl_result[0]['markdown'])
print(crawl_result)
job_id = crawl_result['jobId']
print(job_id)
# LLM Extraction:
# Define schema to extract contents into using pydantic
from pydantic import BaseModel, Field
from typing import List
status = app.check_crawl_status(job_id)
print(status)
class ArticleSchema(BaseModel):
title: str
points: int
by: str
commentsURL: str
class TopArticlesSchema(BaseModel):
top: List[ArticleSchema] = Field(..., max_items=5, description="Top 5 stories")
llm_extraction_result = app.scrape_url('https://news.ycombinator.com', {
'extractorOptions': {
'extractionSchema': TopArticlesSchema.model_json_schema(),
'mode': 'llm-extraction'
},
'pageOptions':{
'onlyMainContent': True
}
})
print(llm_extraction_result['llm_extraction'])
# Define schema to extract contents into using json schema
json_schema = {
"type": "object",
"properties": {
"top": {
"type": "array",
"items": {
"type": "object",
"properties": {
"title": {"type": "string"},
"points": {"type": "number"},
"by": {"type": "string"},
"commentsURL": {"type": "string"}
},
"required": ["title", "points", "by", "commentsURL"]
},
"minItems": 5,
"maxItems": 5,
"description": "Top 5 stories on Hacker News"
}
},
"required": ["top"]
}
llm_extraction_result = app.scrape_url('https://news.ycombinator.com', {
'extractorOptions': {
'extractionSchema': json_schema,
'mode': 'llm-extraction'
},
'pageOptions':{
'onlyMainContent': True
}
})
print(llm_extraction_result['llm_extraction'])

View File

@ -1,5 +1,7 @@
import os
from typing import Any, Dict, Optional
import requests
import time
class FirecrawlApp:
def __init__(self, api_key=None):
@ -7,16 +9,61 @@ class FirecrawlApp:
if self.api_key is None:
raise ValueError('No API key provided')
def scrape_url(self, url, params=None):
def scrape_url(self, url: str, params: Optional[Dict[str, Any]] = None) -> Any:
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}'
}
json_data = {'url': url}
# Prepare the base scrape parameters with the URL
scrape_params = {'url': url}
# If there are additional params, process them
if params:
# Initialize extractorOptions if present
extractor_options = params.get('extractorOptions', {})
# Check and convert the extractionSchema if it's a Pydantic model
if 'extractionSchema' in extractor_options:
if hasattr(extractor_options['extractionSchema'], 'schema'):
extractor_options['extractionSchema'] = extractor_options['extractionSchema'].schema()
# Ensure 'mode' is set, defaulting to 'llm-extraction' if not explicitly provided
extractor_options['mode'] = extractor_options.get('mode', 'llm-extraction')
# Update the scrape_params with the processed extractorOptions
scrape_params['extractorOptions'] = extractor_options
# Include any other params directly at the top level of scrape_params
for key, value in params.items():
if key != 'extractorOptions':
scrape_params[key] = value
# Make the POST request with the prepared headers and JSON data
response = requests.post(
'https://api.firecrawl.dev/v0/scrape',
headers=headers,
json=scrape_params
)
if response.status_code == 200:
response = response.json()
if response['success']:
return response['data']
else:
raise Exception(f'Failed to scrape URL. Error: {response["error"]}')
elif response.status_code in [402, 409, 500]:
error_message = response.json().get('error', 'Unknown error occurred')
raise Exception(f'Failed to scrape URL. Status code: {response.status_code}. Error: {error_message}')
else:
raise Exception(f'Failed to scrape URL. Status code: {response.status_code}')
def search(self, query, params=None):
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}'
}
json_data = {'query': query}
if params:
json_data.update(params)
response = requests.post(
'https://api.firecrawl.dev/v0/scrape',
'https://api.firecrawl.dev/v0/search',
headers=headers,
json=json_data
)
@ -25,13 +72,13 @@ class FirecrawlApp:
if response['success'] == True:
return response['data']
else:
raise Exception(f'Failed to scrape URL. Error: {response["error"]}')
raise Exception(f'Failed to search. Error: {response["error"]}')
elif response.status_code in [402, 409, 500]:
error_message = response.json().get('error', 'Unknown error occurred')
raise Exception(f'Failed to scrape URL. Status code: {response.status_code}. Error: {error_message}')
raise Exception(f'Failed to search. Status code: {response.status_code}. Error: {error_message}')
else:
raise Exception(f'Failed to scrape URL. Status code: {response.status_code}')
raise Exception(f'Failed to search. Status code: {response.status_code}')
def crawl_url(self, url, params=None, wait_until_done=True, timeout=2):
headers = self._prepare_headers()
@ -62,11 +109,23 @@ class FirecrawlApp:
'Authorization': f'Bearer {self.api_key}'
}
def _post_request(self, url, data, headers):
return requests.post(url, headers=headers, json=data)
def _post_request(self, url, data, headers, retries=3, backoff_factor=0.5):
for attempt in range(retries):
response = requests.post(url, headers=headers, json=data)
if response.status_code == 502:
time.sleep(backoff_factor * (2 ** attempt))
else:
return response
return response
def _get_request(self, url, headers):
return requests.get(url, headers=headers)
def _get_request(self, url, headers, retries=3, backoff_factor=0.5):
for attempt in range(retries):
response = requests.get(url, headers=headers)
if response.status_code == 502:
time.sleep(backoff_factor * (2 ** attempt))
else:
return response
return response
def _monitor_job_status(self, job_id, headers, timeout):
import time

View File

@ -1,7 +1,7 @@
Metadata-Version: 2.1
Name: firecrawl-py
Version: 0.0.5
Version: 0.0.8
Summary: Python SDK for Firecrawl API
Home-page: https://github.com/mendableai/firecrawl-py
Home-page: https://github.com/mendableai/firecrawl
Author: Mendable.ai
Author-email: nick@mendable.ai

View File

@ -2,12 +2,12 @@ from setuptools import setup, find_packages
setup(
name='firecrawl-py',
version='0.0.5',
url='https://github.com/mendableai/firecrawl-py',
version='0.0.8',
url='https://github.com/mendableai/firecrawl',
author='Mendable.ai',
author_email='nick@mendable.ai',
description='Python SDK for Firecrawl API',
packages=find_packages(),
packages=find_packages(),
install_requires=[
'requests',
],

View File

@ -0,0 +1,5 @@
OPENAI_API_KEY=
TEST_API_KEY=
TEST_URL=http://localhost:3002
ANTHROPIC_API_KEY=
ENV=

43
apps/test-suite/README.md Normal file
View File

@ -0,0 +1,43 @@
# Test Suite for Firecrawl
This document provides an overview of the test suite for the Firecrawl project. It includes instructions on how to run the tests and interpret the results.
## Overview
The test suite is designed to ensure the reliability and performance of the Firecrawl system. It includes a series of automated tests that check various functionalities and performance metrics.
## Running the Tests
To run the tests, navigate to the `test-suite` directory and execute the following command:
```bash
npm install
npx playwright install
npm run test
```
## Test Results
The tests are designed to cover various aspects of the system, including:
- Crawling accuracy
- Response time
- Error handling
### Example Test Case
- **Test Name**: Accuracy Test
- **Description**: This test checks the accuracy of the scraping mechanism with 100 pages and a fuzzy threshold of 0.8.
- **Expected Result**: Accuracy >= 0.9
- **Received Result**: Accuracy between 0.2 and 0.3
## Troubleshooting
If you encounter any failures or unexpected results, please check the following:
- Ensure your network connection is stable.
- Verify that all dependencies are correctly installed.
- Review the error logs for any specific error messages.
## Contributing
Contributions to the test suite are welcome. Please refer to the project's main [CONTRIBUTING.md](../CONTRIBUTING.md) file for guidelines on how to contribute.

View File

@ -0,0 +1,118 @@
[
{
"website": "https://www.anthropic.com/claude",
"prompt": "Does this website contain pricing information?",
"expected_output": "yes"
},
{
"website": "https://mendable.ai/pricing",
"prompt": "Does this website contain pricing information?",
"expected_output": "yes"
},
{
"website": "https://openai.com/news",
"prompt": "Does this website contain a list of research news?",
"expected_output": "yes"
},
{
"website": "https://agentops.ai",
"prompt": "Does this website contain a code snippets?",
"expected_output": "yes"
},
{
"website": "https://ycombinator.com/companies",
"prompt": "Does this website contain a list bigger than 5 of ycombinator companies?",
"expected_output": "yes"
},
{
"website": "https://firecrawl.dev",
"prompt": "Does this website contain a list bigger than 5 of ycombinator companies?",
"expected_output": "no"
},
{
"website": "https://en.wikipedia.org/wiki/T._N._Seshan",
"prompt": "Does this website talk about Seshan's career?",
"expected_output": "yes"
},
{
"website": "https://mendable.ai/blog",
"prompt": "Does this website contain multiple blog articles?",
"expected_output": "yes"
},
{
"website": "https://www.framer.com/pricing",
"prompt": "Is there an enterprise pricing option?",
"expected_output": "yes"
},
{
"website": "https://fly.io/docs/gpus/gpu-quickstart",
"prompt": "Is there a fly deploy command on this page?",
"expected_output": "yes"
},
{
"website": "https://news.ycombinator.com/",
"prompt": "Does this website contain a list of articles in a table markdown format?",
"expected_output": "yes"
},
{
"website": "https://www.vellum.ai/llm-leaderboard",
"prompt": "Does this website contain a model comparison table?",
"expected_output": "yes"
},
{
"website": "https://www.bigbadtoystore.com",
"prompt": "are there more than 3 toys in the new arrivals section?",
"expected_output": "yes"
},
{
"website": "https://www.instructables.com",
"prompt": "Does the site offer more than 5 links about circuits?",
"expected_output": "yes"
},
{
"website": "https://www.powells.com",
"prompt": "is there at least 10 books webpage links?",
"expected_output": "yes"
},
{
"website": "https://www.royalacademy.org.uk",
"prompt": "is there information on upcoming art exhibitions?",
"expected_output": "yes"
},
{
"website": "https://www.eastbaytimes.com",
"prompt": "Is there a Trending Nationally section that lists articles?",
"expected_output": "yes"
},
{
"website": "https://www.manchestereveningnews.co.uk",
"prompt": "is the content focused on Manchester sports news?",
"expected_output": "no"
},
{
"website": "https://physicsworld.com",
"prompt": "does the site provide at least 15 updates on the latest physics research?",
"expected_output": "yes"
},
{
"website": "https://richmondconfidential.org",
"prompt": "does the page contains more than 4 articles?",
"expected_output": "yes"
},
{
"website": "https://www.techinasia.com",
"prompt": "is there at least 10 articles of the startup scene in Asia?",
"expected_output": "yes",
"notes": "The website has a paywall and bot detectors."
},
{
"website": "https://www.boardgamegeek.com",
"prompt": "are there more than 5 board game news?",
"expected_output": "yes"
},
{
"website": "https://www.mountainproject.com",
"prompt": "Are there more than 3 climbing guides for Arizona?",
"expected_output": "yes"
}
]

View File

@ -0,0 +1,189 @@
import request from "supertest";
import dotenv from "dotenv";
import Anthropic from "@anthropic-ai/sdk";
import { numTokensFromString } from "./utils/tokens";
import OpenAI from "openai";
import { WebsiteScrapeError } from "./utils/types";
import { logErrors } from "./utils/log";
const websitesData = require("./data/websites.json");
import "dotenv/config";
const fs = require('fs');
dotenv.config();
interface WebsiteData {
website: string;
prompt: string;
expected_output: string;
}
const TEST_URL = "http://127.0.0.1:3002";
describe("Scraping/Crawling Checkup (E2E)", () => {
beforeAll(() => {
if (!process.env.TEST_API_KEY) {
throw new Error("TEST_API_KEY is not set");
}
if (!process.env.OPENAI_API_KEY) {
throw new Error("OPENAI_API_KEY is not set");
}
});
describe("Scraping website tests with a dataset", () => {
it("Should scrape the website and prompt it against OpenAI", async () => {
let passedTests = 0;
const batchSize = 15; // Adjusted to comply with the rate limit of 15 per minute
const batchPromises = [];
let totalTokens = 0;
const startTime = new Date().getTime();
const date = new Date();
const logsDir = `logs/${date.getMonth() + 1}-${date.getDate()}-${date.getFullYear()}`;
let errorLogFileName = `${logsDir}/run.log_${new Date().toTimeString().split(' ')[0]}`;
const errorLog: WebsiteScrapeError[] = [];
for (let i = 0; i < websitesData.length; i += batchSize) {
// Introducing delay to respect the rate limit of 15 requests per minute
await new Promise(resolve => setTimeout(resolve, 10000));
const batch = websitesData.slice(i, i + batchSize);
const batchPromise = Promise.all(
batch.map(async (websiteData: WebsiteData) => {
try {
const scrapedContent = await request(TEST_URL || "")
.post("/v0/scrape")
.set("Content-Type", "application/json")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.send({ url: websiteData.website, pageOptions: { onlyMainContent: true } });
if (scrapedContent.statusCode !== 200) {
console.error(`Failed to scrape ${websiteData.website} ${scrapedContent.statusCode}`);
errorLog.push({
website: websiteData.website,
prompt: websiteData.prompt,
expected_output: websiteData.expected_output,
actual_output: "",
error: `Failed to scrape website. ${scrapedContent.statusCode} ${scrapedContent.body.error}`
});
return null;
}
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
});
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
const prompt = `Based on this markdown extracted from a website html page, ${websiteData.prompt} Just say 'yes' or 'no' to the question.\nWebsite markdown: ${scrapedContent.body.data.markdown}\n`;
let msg = null;
const maxRetries = 3;
let attempts = 0;
while (!msg && attempts < maxRetries) {
try {
msg = await openai.chat.completions.create({
model: "gpt-4-turbo",
max_tokens: 100,
temperature: 0,
messages: [
{
role: "user",
content: prompt
},
],
});
} catch (error) {
console.error(`Attempt ${attempts + 1}: Failed to prompt for ${websiteData.website}, error: ${error}`);
attempts++;
if (attempts < maxRetries) {
console.log(`Retrying... Attempt ${attempts + 1}`);
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait for 2 seconds before retrying
}
}
}
if (!msg) {
console.error(`Failed to prompt for ${websiteData.website} after ${maxRetries} attempts`);
errorLog.push({
website: websiteData.website,
prompt: websiteData.prompt,
expected_output: websiteData.expected_output,
actual_output: "",
error: "Failed to prompt... model error."
});
return null;
}
const actualOutput = (msg.choices[0].message.content ?? "").toLowerCase()
const expectedOutput = websiteData.expected_output.toLowerCase();
const numTokens = numTokensFromString(prompt,"gpt-4") + numTokensFromString(actualOutput,"gpt-4");
totalTokens += numTokens;
if (actualOutput.includes(expectedOutput)) {
passedTests++;
} else {
console.error(
`This website failed the test: ${websiteData.website}`
);
console.error(`Actual output: ${actualOutput}`);
errorLog.push({
website: websiteData.website,
prompt: websiteData.prompt,
expected_output: websiteData.expected_output,
actual_output: actualOutput,
error: "Output mismatch"
});
}
return {
website: websiteData.website,
prompt: websiteData.prompt,
expectedOutput,
actualOutput,
};
} catch (error) {
console.error(
`Error processing ${websiteData.website}: ${error}`
);
errorLog.push({
website: websiteData.website,
prompt: websiteData.prompt,
expected_output: websiteData.expected_output,
actual_output: "",
error: `Error processing ${websiteData.website}: ${error}`
});
return null;
}
})
);
batchPromises.push(batchPromise);
}
(await Promise.all(batchPromises)).flat();
const score = (passedTests / websitesData.length) * 100;
const endTime = new Date().getTime();
const timeTaken = (endTime - startTime) / 1000;
console.log(`Score: ${score}%`);
console.log(`Total tokens: ${totalTokens}`);
await logErrors(errorLog, timeTaken, totalTokens, score, websitesData.length);
if (process.env.ENV === "local" && errorLog.length > 0) {
if (!fs.existsSync(logsDir)){
fs.mkdirSync(logsDir, { recursive: true });
}
fs.writeFileSync(errorLogFileName, JSON.stringify(errorLog, null, 2));
}
expect(score).toBeGreaterThanOrEqual(75);
}, 350000); // 150 seconds timeout
});
});

View File

@ -0,0 +1,5 @@
module.exports = {
preset: "ts-jest",
testEnvironment: "node",
setupFiles: ["./jest.setup.js"],
};

View File

View File

@ -0,0 +1,26 @@
{
"name": "test-suite",
"version": "1.0.0",
"description": "",
"scripts": {
"test": "npx jest --detectOpenHandles --forceExit --openHandlesTimeout=120000 --watchAll=false"
},
"author": "",
"license": "ISC",
"dependencies": {
"@anthropic-ai/sdk": "^0.20.8",
"@dqbd/tiktoken": "^1.0.14",
"@supabase/supabase-js": "^2.43.1",
"dotenv": "^16.4.5",
"jest": "^29.7.0",
"openai": "^4.40.2",
"playwright": "^1.43.1",
"supertest": "^7.0.0",
"ts-jest": "^29.1.2"
},
"devDependencies": {
"@types/jest": "^29.5.12",
"@types/supertest": "^6.0.2",
"typescript": "^5.4.5"
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,109 @@
{
"compilerOptions": {
/* Visit https://aka.ms/tsconfig to read more about this file */
/* Projects */
// "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */
// "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */
// "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */
// "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */
// "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */
// "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */
/* Language and Environment */
"target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
// "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
// "jsx": "preserve", /* Specify what JSX code is generated. */
// "experimentalDecorators": true, /* Enable experimental support for legacy experimental decorators. */
// "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */
// "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */
// "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
// "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */
// "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */
// "noLib": true, /* Disable including any library files, including the default lib.d.ts. */
// "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */
// "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */
/* Modules */
"module": "commonjs", /* Specify what module code is generated. */
// "rootDir": "./", /* Specify the root folder within your source files. */
// "moduleResolution": "node10", /* Specify how TypeScript looks up a file from a given module specifier. */
// "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
// "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
// "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
// "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */
// "types": [], /* Specify type package names to be included without being referenced in a source file. */
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
// "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */
// "allowImportingTsExtensions": true, /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */
// "resolvePackageJsonExports": true, /* Use the package.json 'exports' field when resolving package imports. */
// "resolvePackageJsonImports": true, /* Use the package.json 'imports' field when resolving imports. */
// "customConditions": [], /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */
// "resolveJsonModule": true, /* Enable importing .json files. */
// "allowArbitraryExtensions": true, /* Enable importing files with any extension, provided a declaration file is present. */
// "noResolve": true, /* Disallow 'import's, 'require's or '<reference>'s from expanding the number of files TypeScript should add to a project. */
/* JavaScript Support */
// "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */
// "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */
// "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */
/* Emit */
// "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
// "declarationMap": true, /* Create sourcemaps for d.ts files. */
// "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
// "sourceMap": true, /* Create source map files for emitted JavaScript files. */
// "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
// "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
// "outDir": "./", /* Specify an output folder for all emitted files. */
// "removeComments": true, /* Disable emitting comments. */
// "noEmit": true, /* Disable emitting files from a compilation. */
// "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
// "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types. */
// "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
// "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
// "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */
// "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
// "newLine": "crlf", /* Set the newline character for emitting files. */
// "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */
// "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */
// "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
// "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */
// "declarationDir": "./", /* Specify the output directory for generated declaration files. */
// "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */
/* Interop Constraints */
// "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */
// "verbatimModuleSyntax": true, /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */
// "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
"esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
// "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
"forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */
/* Type Checking */
"strict": true, /* Enable all strict type-checking options. */
// "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */
// "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */
// "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
// "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */
// "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */
// "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */
// "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */
// "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */
// "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */
// "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */
// "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */
// "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */
// "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */
// "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */
// "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */
// "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */
// "allowUnusedLabels": true, /* Disable error reporting for unused labels. */
// "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */
/* Completeness */
// "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
"skipLibCheck": true /* Skip type checking all .d.ts files. */
}
}

View File

@ -0,0 +1,10 @@
import { supabase_service } from "./supabase";
import { WebsiteScrapeError } from "./types";
export async function logErrors(dataError: WebsiteScrapeError[], time_taken: number, num_tokens:number, score: number, num_pages_tested: number,) {
try {
await supabase_service.from("test_suite_logs").insert([{log:dataError, time_taken, num_tokens, score, num_pages_tested, is_error: dataError.length > 0}]);
} catch (error) {
console.error(`Error logging to supabase: ${error}`);
}
}

Some files were not shown because too many files have changed in this diff Show More