// Programatically interact with the Hub
await createRepo({
repo: {type: "model", name: "my-user/nlp-model"},
accessToken: HF_TOKEN
});
await uploadFile({
repo: "my-user/nlp-model",
accessToken: HF_TOKEN,
// Can work with native File in browsers
file: {
path: "pytorch_model.bin",
content: new Blob(...)
}
});
// Use Inference API
await inference.chatCompletion({
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [
{
role: "user",
content: "Hello, nice to meet you!",
},
],
max_tokens: 512,
temperature: 0.5,
});
await inference.textToImage({
model: "black-forest-labs/FLUX.1-dev",
inputs: "a picture of a green bird",
});
// and much more…
This is a collection of JS libraries to interact with the Hugging Face API, with TS types included.
- @huggingface/inference: Use Inference Endpoints (dedicated) and Inference API (serverless) to make calls to 100,000+ Machine Learning models
- @huggingface/hub: Interact with huggingface.co to create or delete repos and commit / download files
- @huggingface/agents: Interact with HF models through a natural language interface
- @huggingface/gguf: A GGUF parser that works on remotely hosted files.
- @huggingface/tasks: The definition files and source-of-truth for the Hub's main primitives like pipeline tasks, model libraries, etc.
- @huggingface/space-header: Use the Space
mini_header
outside Hugging Face
We use modern features to avoid polyfills and dependencies, so the libraries will only work on modern browsers / Node.js >= 18 / Bun / Deno.
The libraries are still very young, please help us by opening issues!
To install via NPM, you can download the libraries as needed:
npm install @huggingface/inference
npm install @huggingface/hub
npm install @huggingface/agents
Then import the libraries in your code:
import { HfInference } from "@huggingface/inference";
import { HfAgent } from "@huggingface/agents";
import { createRepo, commit, deleteRepo, listFiles } from "@huggingface/hub";
import type { RepoId } from "@huggingface/hub";
You can run our packages with vanilla JS, without any bundler, by using a CDN or static hosting. Using ES modules, i.e. <script type="module">
, you can import the libraries in your code:
<script type="module">
import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
import { createRepo, commit, deleteRepo, listFiles } from "https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm";
</script>
// esm.sh
import { HfInference } from "https://esm.sh/@huggingface/inference"
import { HfAgent } from "https://esm.sh/@huggingface/agents";
import { createRepo, commit, deleteRepo, listFiles } from "https://esm.sh/@huggingface/hub"
// or npm:
import { HfInference } from "npm:@huggingface/inference"
import { HfAgent } from "npm:@huggingface/agents";
import { createRepo, commit, deleteRepo, listFiles } from "npm:@huggingface/hub"
Get your HF access token in your account settings.
import { HfInference } from "@huggingface/inference";
const HF_TOKEN = "hf_...";
const inference = new HfInference(HF_TOKEN);
// Chat completion API
const out = await inference.chatCompletion({
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
max_tokens: 512
});
console.log(out.choices[0].message);
// Streaming chat completion API
for await (const chunk of inference.chatCompletionStream({
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
max_tokens: 512
})) {
console.log(chunk.choices[0].delta.content);
}
// You can also omit "model" to use the recommended model for the task
await inference.translation({
inputs: "My name is Wolfgang and I live in Amsterdam",
parameters: {
src_lang: "en",
tgt_lang: "fr",
},
});
await inference.textToImage({
model: 'black-forest-labs/FLUX.1-dev',
inputs: 'a picture of a green bird',
})
await inference.imageToText({
data: await (await fetch('https://picsum.photos/300/300')).blob(),
model: 'nlpconnect/vit-gpt2-image-captioning',
})
// Using your own dedicated inference endpoint: https://hf.co/docs/inference-endpoints/
const gpt2 = inference.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2');
const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'});
//Chat Completion
const llamaEndpoint = inference.endpoint(
"https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct"
);
const out = await llamaEndpoint.chatCompletion({
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
max_tokens: 512,
});
console.log(out.choices[0].message);
import { createRepo, uploadFile, deleteFiles } from "@huggingface/hub";
const HF_TOKEN = "hf_...";
await createRepo({
repo: "my-user/nlp-model", // or {type: "model", name: "my-user/nlp-test"},
accessToken: HF_TOKEN
});
await uploadFile({
repo: "my-user/nlp-model",
accessToken: HF_TOKEN,
// Can work with native File in browsers
file: {
path: "pytorch_model.bin",
content: new Blob(...)
}
});
await deleteFiles({
repo: {type: "space", name: "my-user/my-space"}, // or "spaces/my-user/my-space"
accessToken: HF_TOKEN,
paths: ["README.md", ".gitattributes"]
});
import {HfAgent, LLMFromHub, defaultTools} from '@huggingface/agents';
const HF_TOKEN = "hf_...";
const agent = new HfAgent(
HF_TOKEN,
LLMFromHub(HF_TOKEN),
[...defaultTools]
);
// you can generate the code, inspect it and then run it
const code = await agent.generateCode("Draw a picture of a cat wearing a top hat. Then caption the picture and read it out loud.");
console.log(code);
const messages = await agent.evaluateCode(code)
console.log(messages); // contains the data
// or you can run the code directly, however you can't check that the code is safe to execute this way, use at your own risk.
const messages = await agent.run("Draw a picture of a cat wearing a top hat. Then caption the picture and read it out loud.")
console.log(messages);
There are more features of course, check each library's README!
sudo corepack enable
pnpm install
pnpm -r format:check
pnpm -r lint:check
pnpm -r test
pnpm -r build
This will generate ESM and CJS javascript files in packages/*/dist
, eg packages/inference/dist/index.mjs
.