Skip to content
This repository has been archived by the owner on Sep 12, 2024. It is now read-only.

Commit

Permalink
chore: update example
Browse files Browse the repository at this point in the history
  • Loading branch information
hlhr202 committed Apr 23, 2023
1 parent 9fa5b4e commit 653aa69
Show file tree
Hide file tree
Showing 27 changed files with 234 additions and 27 deletions.
29 changes: 29 additions & 0 deletions example/js/langchain/langchain.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { LLamaEmbeddings } from "llama-node/dist/extensions/langchain.js";
import { LLama } from "llama-node";
import { LLamaCpp } from "llama-node/dist/llm/llama-cpp.js";
import path from "path";
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-1.1-q4_1.bin");
const llama = new LLama(LLamaCpp);
const config = {
path: model,
enableLogging: true,
nCtx: 1024,
nParts: -1,
seed: 0,
f16Kv: false,
logitsAll: false,
vocabOnly: false,
useMlock: false,
embedding: true,
useMmap: true,
};
llama.load(config);
const run = async () => {
// Load the docs into the vector store
const vectorStore = await MemoryVectorStore.fromTexts(["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], new LLamaEmbeddings({ maxConcurrency: 1 }, llama));
// Search for the most similar document
const resultOne = await vectorStore.similaritySearch("hello world", 1);
console.log(resultOne);
};
run();
30 changes: 30 additions & 0 deletions example/js/llama-cpp/embedding.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import { LLama } from "llama-node";
import { LLamaCpp } from "llama-node/dist/llm/llama-cpp.js";
import path from "path";
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-1.1-q4_1.bin");
const llama = new LLama(LLamaCpp);
const config = {
path: model,
enableLogging: true,
nCtx: 1024,
nParts: -1,
seed: 0,
f16Kv: false,
logitsAll: false,
vocabOnly: false,
useMlock: false,
embedding: true,
useMmap: true,
};
llama.load(config);
const prompt = `Who is the president of the United States?`;
const params = {
nThreads: 4,
nTokPredict: 2048,
topK: 40,
topP: 0.1,
temp: 0.2,
repeatPenalty: 1,
prompt,
};
llama.getEmbedding(params).then(console.log);
37 changes: 37 additions & 0 deletions example/js/llama-cpp/llama-cpp.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import { LLama } from "llama-node";
import { LLamaCpp } from "llama-node/dist/llm/llama-cpp.js";
import path from "path";
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-1.1-q4_1.bin");
const llama = new LLama(LLamaCpp);
const config = {
path: model,
enableLogging: true,
nCtx: 1024,
nParts: -1,
seed: 0,
f16Kv: false,
logitsAll: false,
vocabOnly: false,
useMlock: false,
embedding: false,
useMmap: true,
};
llama.load(config);
const template = `How are you?`;
const prompt = `### Human:
${template}
### Assistant:`;
llama.createCompletion({
nThreads: 4,
nTokPredict: 2048,
topK: 40,
topP: 0.1,
temp: 0.2,
repeatPenalty: 1,
stopSequence: "### Human",
prompt,
}, (response) => {
process.stdout.write(response.token);
});
21 changes: 21 additions & 0 deletions example/js/llama-cpp/tokenize.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import { LLama } from "llama-node";
import { LLamaCpp } from "llama-node/dist/llm/llama-cpp.js";
import path from "path";
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-1.1-q4_1.bin");
const llama = new LLama(LLamaCpp);
const config = {
path: model,
enableLogging: true,
nCtx: 1024,
nParts: -1,
seed: 0,
f16Kv: false,
logitsAll: false,
vocabOnly: false,
useMlock: false,
embedding: false,
useMmap: true,
};
llama.load(config);
const content = "how are you?";
llama.tokenize({ content, nCtx: 2048 }).then(console.log);
30 changes: 30 additions & 0 deletions example/js/llama-rs/embedding.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import { LLama } from "llama-node";
import { LLamaRS } from "llama-node/dist/llm/llama-rs.js";
import path from "path";
import fs from "fs";
const model = path.resolve(process.cwd(), "../ggml-alpaca-7b-q4.bin");
const llama = new LLama(LLamaRS);
llama.load({ path: model });
const getWordEmbeddings = async (prompt, file) => {
const data = await llama.getEmbedding({
prompt,
numPredict: 128,
temp: 0.2,
topP: 1,
topK: 40,
repeatPenalty: 1,
repeatLastN: 64,
seed: 0,
});
console.log(prompt, data);
await fs.promises.writeFile(path.resolve(process.cwd(), file), JSON.stringify(data));
};
const run = async () => {
const dog1 = `My favourite animal is the dog`;
await getWordEmbeddings(dog1, "./example/semantic-compare/dog1.json");
const dog2 = `I have just adopted a cute dog`;
await getWordEmbeddings(dog2, "./example/semantic-compare/dog2.json");
const cat1 = `My favourite animal is the cat`;
await getWordEmbeddings(cat1, "./example/semantic-compare/cat1.json");
};
run();
27 changes: 27 additions & 0 deletions example/js/llama-rs/llama-rs.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import { LLama } from "llama-node";
import { LLamaRS } from "llama-node/dist/llm/llama-rs.js";
import path from "path";
const model = path.resolve(process.cwd(), "../ggml-alpaca-7b-q4.bin");
const llama = new LLama(LLamaRS);
llama.load({ path: model });
const template = `how are you`;
const prompt = `Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
${template}
### Response:`;
llama.createCompletion({
prompt,
numPredict: 128,
temp: 0.2,
topP: 1,
topK: 40,
repeatPenalty: 1,
repeatLastN: 64,
seed: 0,
feedPrompt: true,
}, (response) => {
process.stdout.write(response.token);
});
1 change: 1 addition & 0 deletions example/js/llama-rs/semantic-compare/cat1.json

Large diffs are not rendered by default.

16 changes: 16 additions & 0 deletions example/js/llama-rs/semantic-compare/compare.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import * as tf from "@tensorflow/tfjs-node";
import dog1 from "./dog1.json";
import dog2 from "./dog2.json";
import cat1 from "./cat1.json";
const dog1Tensor = tf.tensor(dog1);
const dog2Tensor = tf.tensor(dog2);
const cat1Tensor = tf.tensor(cat1);
const compareCosineSimilarity = (tensor1, tensor2) => {
const dotProduct = tensor1.dot(tensor2);
const norm1 = tensor1.norm();
const norm2 = tensor2.norm();
const cosineSimilarity = dotProduct.div(norm1.mul(norm2));
return cosineSimilarity.dataSync()[0];
};
console.log("dog1 vs dog2", compareCosineSimilarity(dog1Tensor, dog2Tensor));
console.log("dog1 vs cat1", compareCosineSimilarity(dog1Tensor, cat1Tensor));
1 change: 1 addition & 0 deletions example/js/llama-rs/semantic-compare/dog1.json

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions example/js/llama-rs/semantic-compare/dog2.json

Large diffs are not rendered by default.

8 changes: 8 additions & 0 deletions example/js/llama-rs/tokenize.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import { LLama } from "llama-node";
import { LLamaRS } from "llama-node/dist/llm/llama-rs.js";
import path from "path";
const model = path.resolve(process.cwd(), "../ggml-alpaca-7b-q4.bin");
const llama = new LLama(LLamaRS);
llama.load({ path: model });
const content = "how are you?";
llama.tokenize(content).then(console.log);
18 changes: 8 additions & 10 deletions example/package.json
Original file line number Diff line number Diff line change
@@ -1,27 +1,25 @@
{
"name": "@llama-node/examples",
"version": "1.0.0",
"version": "0.0.30",
"description": "",
"main": "index.js",
"type": "module",
"scripts": {
"build": "tsc -p .",
"langchain": "tsx src/langchain/langchain.ts",
"llama-cpp": "tsx src/llama-cpp/llama-cpp.ts",
"llama-rs": "tsx src/llama-rs/llama-rs.ts",
"test": "echo \"Error: no test specified\" && exit 1"
"langchain": "node js/langchain/langchain.js",
"llama-cpp": "node js/llama-cpp/llama-cpp.js",
"llama-rs": "node js/llama-rs/llama-rs.js"
},
"author": "",
"license": "MIT",
"devDependencies": {
"@types/node": "^18.15.11",
"tsx": "^3.12.6",
"typescript": "^5.0.4",
"langchain": "^0.0.56"
},
"dependencies": {
"@llama-node/core": "file:../packages/core",
"@llama-node/llama-cpp": "file:../packages/llama-cpp",
"llama-node": "file:../"
"@llama-node/core": "0.0.30",
"@llama-node/llama-cpp": "0.0.30",
"llama-node": "0.0.30"
}
}
}
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
6 changes: 3 additions & 3 deletions example/tsconfig.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
// "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */

/* Language and Environment */
"target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
"target": "ESNext", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
// "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
// "jsx": "preserve", /* Specify what JSX code is generated. */
// "experimentalDecorators": true, /* Enable experimental support for legacy experimental decorators. */
Expand All @@ -30,7 +30,7 @@
"moduleResolution": "nodenext", /* Specify how TypeScript looks up a file from a given module specifier. */
// "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
// "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
"rootDirs": ["src"], /* Allow multiple folders to be treated as one when resolving modules. */
"rootDirs": ["ts"], /* Allow multiple folders to be treated as one when resolving modules. */
// "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */
// "types": [], /* Specify type package names to be included without being referenced in a source file. */
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
Expand All @@ -55,7 +55,7 @@
// "sourceMap": true, /* Create source map files for emitted JavaScript files. */
// "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
// "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
"outDir": "./dist", /* Specify an output folder for all emitted files. */
"outDir": "./js", /* Specify an output folder for all emitted files. */
// "removeComments": true, /* Disable emitting comments. */
// "noEmit": true, /* Disable emitting files from a compilation. */
// "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
Expand Down
23 changes: 10 additions & 13 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 12 additions & 1 deletion scripts/version.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import main from "../package.json";
import core from "../packages/core/package.json";
import cpp from "../packages/llama-cpp/package.json";
import cli from "../packages/cli/package.json";
import example from "../example/package.json";
import semver from "semver";
import path from "path";
import fs from "fs";
Expand All @@ -21,7 +22,7 @@ if (!semver.valid(newVersion)) {
process.exit(1);
}

const newVersionIsGreaterThanAll = [main, core, cli].every((pkg) => {
const newVersionIsGreaterThanAll = [main, core, cli, example].every((pkg) => {
return semver.gt(newVersion, pkg.version);
});

Expand All @@ -35,6 +36,7 @@ console.log(`main: ${main.version}`);
console.log(`core: ${core.version}`);
console.log(`cli: ${cli.version}`);
console.log(`cpp: ${cpp.version}`);
console.log(`example: ${example.version}`);

console.log(`New version: ${newVersion}`);

Expand All @@ -43,6 +45,7 @@ main.version = newVersion;
core.version = newVersion;
cli.version = newVersion;
cpp.version = newVersion;
example.version = newVersion;
main.dependencies["@llama-node/cli"] = newVersion;
main.optionalDependencies["@llama-node/core"] = newVersion;
main.optionalDependencies["@llama-node/llama-cpp"] = newVersion;
Expand All @@ -53,6 +56,9 @@ main.peerDependencies["@llama-node/core"] = newVersion;
main.peerDependencies["@llama-node/llama-cpp"] = newVersion;
main.peerDependencies["@llama-node/cli"] = newVersion;
cli.dependencies["@llama-node/core"] = newVersion;
example.dependencies["llama-node"] = newVersion;
example.dependencies["@llama-node/core"] = newVersion;
example.dependencies["@llama-node/llama-cpp"] = newVersion;

console.log("Writing new versions...");
fs.writeFileSync(
Expand All @@ -75,4 +81,9 @@ fs.writeFileSync(
JSON.stringify(cli, null, 2)
);

fs.writeFileSync(
path.join(__dirname, "../example/package.json"),
JSON.stringify(example, null, 2)
);

console.log("Done!");

0 comments on commit 653aa69

Please sign in to comment.