@@ -7,7 +7,10 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): stri
77 const response = await fetch(
88 "https://api-inference.huggingface.co/models/${ model . id } ",
99 {
10- headers: { Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } " },
10+ headers: {
11+ Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } "
12+ "Content-Type": "application/json",
13+ },
1114 method: "POST",
1215 body: JSON.stringify(data),
1316 }
@@ -20,12 +23,34 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
2023 console.log(JSON.stringify(response));
2124});` ;
2225
26+ export const snippetTextGeneration = ( model : ModelDataMinimal , accessToken : string ) : string => {
27+ if ( model . config ?. tokenizer_config ?. chat_template ) {
28+ // Conversational model detected, so we display a code snippet that features the Messages API
29+ return `import { HfInference } from "@huggingface/inference";
30+
31+ const inference = new HfInference("${ accessToken || `{API_TOKEN}` } ");
32+
33+ for await (const chunk of inference.chatCompletionStream({
34+ model: "${ model . id } ",
35+ messages: [{ role: "user", content: "What is the capital of France?" }],
36+ max_tokens: 500,
37+ })) {
38+ process.stdout.write(chunk.choices[0]?.delta?.content || "");
39+ }
40+ ` ;
41+ } else {
42+ return snippetBasic ( model , accessToken ) ;
43+ }
44+ } ;
2345export const snippetZeroShotClassification = ( model : ModelDataMinimal , accessToken : string ) : string =>
2446 `async function query(data) {
2547 const response = await fetch(
2648 "https://api-inference.huggingface.co/models/${ model . id } ",
2749 {
28- headers: { Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } " },
50+ headers: {
51+ Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } "
52+ "Content-Type": "application/json",
53+ },
2954 method: "POST",
3055 body: JSON.stringify(data),
3156 }
@@ -45,7 +70,10 @@ export const snippetTextToImage = (model: ModelDataMinimal, accessToken: string)
4570 const response = await fetch(
4671 "https://api-inference.huggingface.co/models/${ model . id } ",
4772 {
48- headers: { Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } " },
73+ headers: {
74+ Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } "
75+ "Content-Type": "application/json",
76+ },
4977 method: "POST",
5078 body: JSON.stringify(data),
5179 }
@@ -62,7 +90,10 @@ export const snippetTextToAudio = (model: ModelDataMinimal, accessToken: string)
6290 const response = await fetch(
6391 "https://api-inference.huggingface.co/models/${ model . id } ",
6492 {
65- headers: { Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } " },
93+ headers: {
94+ Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } "
95+ "Content-Type": "application/json",
96+ },
6697 method: "POST",
6798 body: JSON.stringify(data),
6899 }
@@ -99,7 +130,10 @@ export const snippetFile = (model: ModelDataMinimal, accessToken: string): strin
99130 const response = await fetch(
100131 "https://api-inference.huggingface.co/models/${ model . id } ",
101132 {
102- headers: { Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } " },
133+ headers: {
134+ Authorization: "Bearer ${ accessToken || `{API_TOKEN}` } "
135+ "Content-Type": "application/json",
136+ },
103137 method: "POST",
104138 body: data,
105139 }
@@ -122,7 +156,7 @@ export const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal,
122156 translation : snippetBasic ,
123157 summarization : snippetBasic ,
124158 "feature-extraction" : snippetBasic ,
125- "text-generation" : snippetBasic ,
159+ "text-generation" : snippetTextGeneration ,
126160 "text2text-generation" : snippetBasic ,
127161 "fill-mask" : snippetBasic ,
128162 "sentence-similarity" : snippetBasic ,
0 commit comments