diff --git a/models/llama-index-hack/llama2_github_repos.ipynb b/models/llama-index-hack/llama2_github_repos.ipynb
new file mode 100644
index 00000000..24a936c7
--- /dev/null
+++ b/models/llama-index-hack/llama2_github_repos.ipynb
@@ -0,0 +1,840 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from llama_hub.github_repo import GithubClient, GithubRepositoryReader\n",
+ "\n",
+ "from llama_index.llms import LlamaCPP\n",
+ "from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt\n",
+ "from llama_index import VectorStoreIndex\n",
+ "from llama_index import LLMPredictor, PromptHelper, ServiceContext\n",
+ "\n",
+ "import pandas as pd"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import nest_asyncio\n",
+ "nest_asyncio.apply()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "gh_auth=os.getenv(\"GITHUB_AUTH\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "llama_model_loader: loaded meta data with 19 key-value pairs and 291 tensors from ../../../llama-2-7b-chat.Q4_K_M.gguf (version GGUF V2 (latest))\n",
+ "llama_model_loader: - tensor 0: token_embd.weight q4_K [ 4096, 32000, 1, 1 ]\n",
+ "llama_model_loader: - tensor 1: blk.0.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 2: blk.0.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 3: blk.0.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 4: blk.0.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 5: blk.0.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 6: blk.0.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 7: blk.0.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 8: blk.0.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 9: blk.0.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 10: blk.1.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 11: blk.1.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 12: blk.1.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 13: blk.1.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 14: blk.1.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 15: blk.1.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 16: blk.1.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 17: blk.1.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 18: blk.1.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 19: blk.10.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 20: blk.10.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 21: blk.10.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 22: blk.10.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 23: blk.10.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 24: blk.10.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 25: blk.10.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 26: blk.10.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 27: blk.10.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 28: blk.11.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 29: blk.11.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 30: blk.11.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 31: blk.11.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 32: blk.11.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 33: blk.11.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 34: blk.11.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 35: blk.11.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 36: blk.11.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 37: blk.12.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 38: blk.12.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 39: blk.12.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 40: blk.12.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 41: blk.12.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 42: blk.12.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 43: blk.12.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 44: blk.12.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 45: blk.12.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 46: blk.13.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 47: blk.13.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 48: blk.13.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 49: blk.13.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 50: blk.13.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 51: blk.13.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 52: blk.13.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 53: blk.13.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 54: blk.13.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 55: blk.14.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 56: blk.14.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 57: blk.14.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 58: blk.14.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 59: blk.14.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 60: blk.14.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 61: blk.14.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 62: blk.14.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 63: blk.14.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 64: blk.15.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 65: blk.15.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 66: blk.15.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 67: blk.15.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 68: blk.15.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 69: blk.15.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 70: blk.15.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 71: blk.15.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 72: blk.15.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 73: blk.16.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 74: blk.16.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 75: blk.16.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 76: blk.16.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 77: blk.16.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 78: blk.16.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 79: blk.16.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 80: blk.16.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 81: blk.16.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 82: blk.17.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 83: blk.17.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 84: blk.17.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 85: blk.17.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 86: blk.17.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 87: blk.17.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 88: blk.17.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 89: blk.17.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 90: blk.17.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 91: blk.18.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 92: blk.18.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 93: blk.18.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 94: blk.18.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 95: blk.18.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 96: blk.18.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 97: blk.18.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 98: blk.18.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 99: blk.18.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 100: blk.19.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 101: blk.19.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 102: blk.19.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 103: blk.19.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 104: blk.19.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 105: blk.19.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 106: blk.19.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 107: blk.19.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 108: blk.19.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 109: blk.2.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 110: blk.2.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 111: blk.2.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 112: blk.2.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 113: blk.2.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 114: blk.2.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 115: blk.2.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 116: blk.2.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 117: blk.2.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 118: blk.20.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 119: blk.20.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 120: blk.20.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 121: blk.20.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 122: blk.20.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 123: blk.20.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 124: blk.20.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 125: blk.20.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 126: blk.20.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 127: blk.21.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 128: blk.21.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 129: blk.21.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 130: blk.21.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 131: blk.21.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 132: blk.21.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 133: blk.21.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 134: blk.21.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 135: blk.21.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 136: blk.22.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 137: blk.22.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 138: blk.22.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 139: blk.22.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 140: blk.22.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 141: blk.22.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 142: blk.22.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 143: blk.22.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 144: blk.22.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 145: blk.23.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 146: blk.23.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 147: blk.23.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 148: blk.23.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 149: blk.23.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 150: blk.23.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 151: blk.23.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 152: blk.23.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 153: blk.23.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 154: blk.3.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 155: blk.3.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 156: blk.3.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 157: blk.3.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 158: blk.3.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 159: blk.3.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 160: blk.3.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 161: blk.3.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 162: blk.3.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 163: blk.4.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 164: blk.4.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 165: blk.4.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 166: blk.4.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 167: blk.4.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 168: blk.4.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 169: blk.4.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 170: blk.4.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 171: blk.4.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 172: blk.5.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 173: blk.5.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 174: blk.5.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 175: blk.5.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 176: blk.5.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 177: blk.5.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 178: blk.5.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 179: blk.5.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 180: blk.5.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 181: blk.6.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 182: blk.6.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 183: blk.6.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 184: blk.6.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 185: blk.6.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 186: blk.6.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 187: blk.6.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 188: blk.6.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 189: blk.6.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 190: blk.7.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 191: blk.7.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 192: blk.7.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 193: blk.7.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 194: blk.7.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 195: blk.7.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 196: blk.7.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 197: blk.7.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 198: blk.7.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 199: blk.8.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 200: blk.8.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 201: blk.8.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 202: blk.8.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 203: blk.8.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 204: blk.8.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 205: blk.8.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 206: blk.8.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 207: blk.8.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 208: blk.9.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 209: blk.9.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 210: blk.9.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 211: blk.9.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 212: blk.9.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 213: blk.9.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 214: blk.9.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 215: blk.9.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 216: blk.9.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 217: output.weight q6_K [ 4096, 32000, 1, 1 ]\n",
+ "llama_model_loader: - tensor 218: blk.24.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 219: blk.24.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 220: blk.24.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 221: blk.24.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 222: blk.24.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 223: blk.24.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 224: blk.24.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 225: blk.24.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 226: blk.24.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 227: blk.25.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 228: blk.25.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 229: blk.25.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 230: blk.25.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 231: blk.25.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 232: blk.25.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 233: blk.25.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 234: blk.25.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 235: blk.25.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 236: blk.26.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 237: blk.26.ffn_down.weight q4_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 238: blk.26.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 239: blk.26.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 240: blk.26.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 241: blk.26.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 242: blk.26.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 243: blk.26.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 244: blk.26.attn_v.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 245: blk.27.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 246: blk.27.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 247: blk.27.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 248: blk.27.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 249: blk.27.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 250: blk.27.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 251: blk.27.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 252: blk.27.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 253: blk.27.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 254: blk.28.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 255: blk.28.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 256: blk.28.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 257: blk.28.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 258: blk.28.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 259: blk.28.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 260: blk.28.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 261: blk.28.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 262: blk.28.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 263: blk.29.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 264: blk.29.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 265: blk.29.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 266: blk.29.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 267: blk.29.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 268: blk.29.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 269: blk.29.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 270: blk.29.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 271: blk.29.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 272: blk.30.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 273: blk.30.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 274: blk.30.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 275: blk.30.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 276: blk.30.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 277: blk.30.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 278: blk.30.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 279: blk.30.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 280: blk.30.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 281: blk.31.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 282: blk.31.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 283: blk.31.ffn_gate.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 284: blk.31.ffn_up.weight q4_K [ 4096, 11008, 1, 1 ]\n",
+ "llama_model_loader: - tensor 285: blk.31.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - tensor 286: blk.31.attn_k.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 287: blk.31.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 288: blk.31.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 289: blk.31.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]\n",
+ "llama_model_loader: - tensor 290: output_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
+ "llama_model_loader: - kv 0: general.architecture str \n",
+ "llama_model_loader: - kv 1: general.name str \n",
+ "llama_model_loader: - kv 2: llama.context_length u32 \n",
+ "llama_model_loader: - kv 3: llama.embedding_length u32 \n",
+ "llama_model_loader: - kv 4: llama.block_count u32 \n",
+ "llama_model_loader: - kv 5: llama.feed_forward_length u32 \n",
+ "llama_model_loader: - kv 6: llama.rope.dimension_count u32 \n",
+ "llama_model_loader: - kv 7: llama.attention.head_count u32 \n",
+ "llama_model_loader: - kv 8: llama.attention.head_count_kv u32 \n",
+ "llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 \n",
+ "llama_model_loader: - kv 10: general.file_type u32 \n",
+ "llama_model_loader: - kv 11: tokenizer.ggml.model str \n",
+ "llama_model_loader: - kv 12: tokenizer.ggml.tokens arr \n",
+ "llama_model_loader: - kv 13: tokenizer.ggml.scores arr \n",
+ "llama_model_loader: - kv 14: tokenizer.ggml.token_type arr \n",
+ "llama_model_loader: - kv 15: tokenizer.ggml.bos_token_id u32 \n",
+ "llama_model_loader: - kv 16: tokenizer.ggml.eos_token_id u32 \n",
+ "llama_model_loader: - kv 17: tokenizer.ggml.unknown_token_id u32 \n",
+ "llama_model_loader: - kv 18: general.quantization_version u32 \n",
+ "llama_model_loader: - type f32: 65 tensors\n",
+ "llama_model_loader: - type q4_K: 193 tensors\n",
+ "llama_model_loader: - type q6_K: 33 tensors\n",
+ "llm_load_print_meta: format = GGUF V2 (latest)\n",
+ "llm_load_print_meta: arch = llama\n",
+ "llm_load_print_meta: vocab type = SPM\n",
+ "llm_load_print_meta: n_vocab = 32000\n",
+ "llm_load_print_meta: n_merges = 0\n",
+ "llm_load_print_meta: n_ctx_train = 4096\n",
+ "llm_load_print_meta: n_ctx = 3900\n",
+ "llm_load_print_meta: n_embd = 4096\n",
+ "llm_load_print_meta: n_head = 32\n",
+ "llm_load_print_meta: n_head_kv = 32\n",
+ "llm_load_print_meta: n_layer = 32\n",
+ "llm_load_print_meta: n_rot = 128\n",
+ "llm_load_print_meta: n_gqa = 1\n",
+ "llm_load_print_meta: f_norm_eps = 1.0e-05\n",
+ "llm_load_print_meta: f_norm_rms_eps = 1.0e-06\n",
+ "llm_load_print_meta: n_ff = 11008\n",
+ "llm_load_print_meta: freq_base = 10000.0\n",
+ "llm_load_print_meta: freq_scale = 1\n",
+ "llm_load_print_meta: model type = 7B\n",
+ "llm_load_print_meta: model ftype = mostly Q4_K - Medium\n",
+ "llm_load_print_meta: model size = 6.74 B\n",
+ "llm_load_print_meta: general.name = LLaMA v2\n",
+ "llm_load_print_meta: BOS token = 1 ''\n",
+ "llm_load_print_meta: EOS token = 2 ''\n",
+ "llm_load_print_meta: UNK token = 0 ''\n",
+ "llm_load_print_meta: LF token = 13 '<0x0A>'\n",
+ "llm_load_tensors: ggml ctx size = 0.09 MB\n",
+ "llm_load_tensors: mem required = 3891.34 MB (+ 1950.00 MB per state)\n",
+ "..................................................................................................\n",
+ "llama_new_context_with_model: kv self size = 1950.00 MB\n",
+ "llama_new_context_with_model: compute buffer total size = 269.22 MB\n",
+ "AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | \n"
+ ]
+ }
+ ],
+ "source": [
+ "llm = LlamaCPP(\n",
+ " model_path=\"../../../llama-2-7b-chat.Q4_K_M.gguf\",\n",
+ " temperature=0.1,\n",
+ " max_new_tokens=256,\n",
+ " # llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room\n",
+ " context_window=3900,\n",
+ " # kwargs to pass to __call__()\n",
+ " generate_kwargs={},\n",
+ " # kwargs to pass to __init__()\n",
+ " # set to at least 1 to use GPU\n",
+ " model_kwargs={\"n_gpu_layers\": 1},\n",
+ " # transform inputs into Llama2 format\n",
+ " messages_to_prompt=messages_to_prompt,\n",
+ " completion_to_prompt=completion_to_prompt,\n",
+ " verbose=True,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def load_Hut23():\n",
+ " loader = GithubRepositoryReader(\n",
+ " GithubClient(gh_auth),\n",
+ " owner=\"alan-turing-institute\",\n",
+ " repo=\"Hut23\",\n",
+ " verbose=False,\n",
+ " filter_file_extensions=([\".md\",\".ipynb\"], GithubRepositoryReader.FilterType.INCLUDE),\n",
+ " filter_directories=([\"JDs\",\"development\",\"newsletters\",\"objectives\",\"rfc\"], GithubRepositoryReader.FilterType.INCLUDE),\n",
+ " )\n",
+ " documents = loader.load_data(branch=\"master\")\n",
+ " return documents"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def load_REG_handbook():\n",
+ " loader = GithubRepositoryReader(\n",
+ " GithubClient(gh_auth),\n",
+ " owner=\"alan-turing-institute\",\n",
+ " repo=\"REG-handbook\",\n",
+ " verbose=False,\n",
+ " filter_file_extensions=([\".md\"], GithubRepositoryReader.FilterType.INCLUDE),\n",
+ " filter_directories=([\"content\"], GithubRepositoryReader.FilterType.INCLUDE),\n",
+ " )\n",
+ " documents = loader.load_data(branch=\"main\")\n",
+ " return documents"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def load_rse_course():\n",
+ " loader = GithubRepositoryReader(\n",
+ " GithubClient(gh_auth),\n",
+ " owner=\"alan-turing-institute\",\n",
+ " repo=\"rse-course\",\n",
+ " verbose=False,\n",
+ " filter_file_extensions=([\".md\",\".ipynb\"], GithubRepositoryReader.FilterType.INCLUDE),\n",
+ " )\n",
+ " documents = loader.load_data(branch=\"main\")\n",
+ " return documents"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def load_rds_course():\n",
+ " loader = GithubRepositoryReader(\n",
+ " GithubClient(gh_auth),\n",
+ " owner=\"alan-turing-institute\",\n",
+ " repo=\"rds-course\",\n",
+ " verbose=False,\n",
+ " filter_file_extensions=([\".md\",\".ipynb\"], GithubRepositoryReader.FilterType.INCLUDE),\n",
+ " )\n",
+ " documents = loader.load_data(branch=\"develop\")\n",
+ " return documents"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def load_rds_course():\n",
+ " loader = GithubRepositoryReader(\n",
+ " GithubClient(gh_auth),\n",
+ " owner=\"alan-turing-institute\",\n",
+ " repo=\"rds-course\",\n",
+ " verbose=False,\n",
+ " filter_file_extensions=([\".md\",\".ipynb\"], GithubRepositoryReader.FilterType.INCLUDE),\n",
+ " )\n",
+ " documents = loader.load_data(branch=\"develop\")\n",
+ " return documents"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# could also add:\n",
+ "# - https://github.com/alan-turing-institute/TuringDataStories\n",
+ "# - https://github.com/alan-turing-institute/DataScienceSkills/tree/master"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def load_turing_way():\n",
+ " loader = GithubRepositoryReader(\n",
+ " GithubClient(gh_auth),\n",
+ " owner=\"the-turing-way\",\n",
+ " repo=\"the-turing-way\",\n",
+ " verbose=False,\n",
+ " filter_file_extensions=([\".md\"], GithubRepositoryReader.FilterType.INCLUDE),\n",
+ " )\n",
+ " documents = loader.load_data(branch=\"main\")\n",
+ " return documents"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "documents = []\n",
+ "documents.extend(load_Hut23())\n",
+ "documents.extend(load_REG_handbook())\n",
+ "documents.extend(load_rse_course())\n",
+ "documents.extend(load_rds_course())\n",
+ "documents.extend(load_turing_way())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "877"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(documents)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "test_docs = documents[::87] # too many documents makes making index really slow (obviously can fix for actual slack bot)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def create_service_context(\n",
+ " model, \n",
+ " max_input_size=2048,\n",
+ " num_output=256,\n",
+ " chunk_size_lim=512,\n",
+ " overlap_ratio=0.1\n",
+ " ):\n",
+ " llm_predictor=LLMPredictor(llm=model)\n",
+ " prompt_helper=PromptHelper(max_input_size,num_output,overlap_ratio,chunk_size_lim)\n",
+ " service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=\"local\")\n",
+ " return service_context"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "635ae570bb134ca6bd00e5fc3a01bb88",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "Parsing documents into nodes: 0%| | 0/11 [00:00, ?it/s]"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "5ff91d4cb3b741c99bdd42b178a5d863",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "Generating embeddings: 0%| | 0/1101 [00:00, ?it/s]"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "service_context = create_service_context(llm)\n",
+ "index = VectorStoreIndex.from_documents(test_docs, service_context=service_context, show_progress=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# index.storage_context.persist()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_prompt = \"\"\"\\\n",
+ "You are a helpful assistant. \\\n",
+ "Always answer as helpfully as possible and follow ALL given instructions. \\\n",
+ "Do not speculate or make up information - use the information you are provided. \\\n",
+ "Do not reference any given instructions or context. \\\n",
+ "\"\"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "query_engine = index.as_query_engine(system_prompt=system_prompt)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Llama.generate: prefix-match hit\n",
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 115.49 ms / 162 runs ( 0.71 ms per token, 1402.71 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 19476.84 ms / 610 tokens ( 31.93 ms per token, 31.32 tokens per second)\n",
+ "llama_print_timings: eval time = 10027.50 ms / 161 runs ( 62.28 ms per token, 16.06 tokens per second)\n",
+ "llama_print_timings: total time = 29839.87 ms\n",
+ "Llama.generate: prefix-match hit\n",
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 182.41 ms / 256 runs ( 0.71 ms per token, 1403.46 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 23971.30 ms / 812 tokens ( 29.52 ms per token, 33.87 tokens per second)\n",
+ "llama_print_timings: eval time = 16405.28 ms / 255 runs ( 64.33 ms per token, 15.54 tokens per second)\n",
+ "llama_print_timings: total time = 40914.94 ms\n",
+ "Llama.generate: prefix-match hit\n",
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 181.36 ms / 256 runs ( 0.71 ms per token, 1411.55 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 25865.74 ms / 865 tokens ( 29.90 ms per token, 33.44 tokens per second)\n",
+ "llama_print_timings: eval time = 16184.72 ms / 255 runs ( 63.47 ms per token, 15.76 tokens per second)\n",
+ "llama_print_timings: total time = 42584.34 ms\n",
+ "Llama.generate: prefix-match hit\n",
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 183.00 ms / 256 runs ( 0.71 ms per token, 1398.93 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 15966.28 ms / 523 tokens ( 30.53 ms per token, 32.76 tokens per second)\n",
+ "llama_print_timings: eval time = 16870.72 ms / 255 runs ( 66.16 ms per token, 15.11 tokens per second)\n",
+ "llama_print_timings: total time = 33379.47 ms\n",
+ "Llama.generate: prefix-match hit\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " Thank you for providing additional context! Based on the updated information, I can now provide a revised answer to your original query.\n",
+ "As an honest and respectful assistant, I must inform you that the query you provided is still not a valid or relevant question in the context of the REG handbook. The REG handbook is a document that provides information and guidelines for researchers at the Alan Turing Institute, and it does not contain any sections or areas where contributions are solicited or accepted.\n",
+ "However, I understand that you may be interested in contributing to the REG community in some way. In that case, there are several ways you can get involved:\n",
+ "1. Participate in Tech Talks: The REG handbook mentions \"Tech Talks\" (Tuesdays 12:30pm) as a way for researchers to share their knowledge and expertise with others. You can sign up to give a talk or simply attend and learn from others.\n",
+ "2. Join the Society of Research Software Engineering: The REG handbook recommends joining the Society of Research Software Engineering (RSE), including its Slack workspace. This is a great way to connect with other researchers\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 182.57 ms / 256 runs ( 0.71 ms per token, 1402.17 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 4969.07 ms / 72 tokens ( 69.01 ms per token, 14.49 tokens per second)\n",
+ "llama_print_timings: eval time = 14999.77 ms / 255 runs ( 58.82 ms per token, 17.00 tokens per second)\n",
+ "llama_print_timings: total time = 20514.78 ms\n"
+ ]
+ }
+ ],
+ "source": [
+ "response = query_engine.query(\"How can I contribute to the REG handbook?\")\n",
+ "print(response.response)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Llama.generate: prefix-match hit\n",
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 77.61 ms / 109 runs ( 0.71 ms per token, 1404.42 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 18501.31 ms / 563 tokens ( 32.86 ms per token, 30.43 tokens per second)\n",
+ "llama_print_timings: eval time = 6730.49 ms / 108 runs ( 62.32 ms per token, 16.05 tokens per second)\n",
+ "llama_print_timings: total time = 25459.36 ms\n",
+ "Llama.generate: prefix-match hit\n",
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 181.92 ms / 256 runs ( 0.71 ms per token, 1407.22 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 20821.02 ms / 679 tokens ( 30.66 ms per token, 32.61 tokens per second)\n",
+ "llama_print_timings: eval time = 16030.69 ms / 255 runs ( 62.87 ms per token, 15.91 tokens per second)\n",
+ "llama_print_timings: total time = 37397.61 ms\n",
+ "Llama.generate: prefix-match hit\n",
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 181.33 ms / 256 runs ( 0.71 ms per token, 1411.78 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 22551.23 ms / 766 tokens ( 29.44 ms per token, 33.97 tokens per second)\n",
+ "llama_print_timings: eval time = 16028.97 ms / 255 runs ( 62.86 ms per token, 15.91 tokens per second)\n",
+ "llama_print_timings: total time = 39119.85 ms\n",
+ "Llama.generate: prefix-match hit\n",
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 180.98 ms / 256 runs ( 0.71 ms per token, 1414.50 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 12869.07 ms / 467 tokens ( 27.56 ms per token, 36.29 tokens per second)\n",
+ "llama_print_timings: eval time = 15807.03 ms / 255 runs ( 61.99 ms per token, 16.13 tokens per second)\n",
+ "llama_print_timings: total time = 29213.77 ms\n",
+ "Llama.generate: prefix-match hit\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " Thank you for providing additional context! Based on the information provided, a lay summary is a brief explanation of a complex topic or document, written in simple language that can be easily understood by a non-expert audience. It is often used to summarize scientific research papers, technical reports, or other documents that contain specialized knowledge or jargon that may be difficult for non-experts to understand. The purpose of a lay summary is to provide a clear and concise overview of the main points and key findings of the original document, making it accessible to a wider audience.\n",
+ "In light of the new context provided, here are some additional details that could be included in a lay summary:\n",
+ "* A brief description of the purpose of the document, such as why it was created and what it covers\n",
+ "* An overview of the main points and key findings of the document, including any notable trends or insights\n",
+ "* Any relevant examples or case studies that help to illustrate the points being made in the document\n",
+ "* A summary of any recommendations or actions that the reader should take as a result of reading the document\n",
+ "* Any additional information that could be helpful for non-experts to know, such as definitions of key terms\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "llama_print_timings: load time = 19297.75 ms\n",
+ "llama_print_timings: sample time = 181.83 ms / 256 runs ( 0.71 ms per token, 1407.89 tokens per second)\n",
+ "llama_print_timings: prompt eval time = 5578.25 ms / 122 tokens ( 45.72 ms per token, 21.87 tokens per second)\n",
+ "llama_print_timings: eval time = 14821.13 ms / 255 runs ( 58.12 ms per token, 17.21 tokens per second)\n",
+ "llama_print_timings: total time = 20941.35 ms\n"
+ ]
+ }
+ ],
+ "source": [
+ "response = query_engine.query(\"What is a lay summary?\")\n",
+ "print(response.response)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "reginald-slack-ZVq5BSHv-py3.11",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ },
+ "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/poetry.lock b/poetry.lock
index bee4578e..fb28e53f 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2030,13 +2030,13 @@ retrying = "*"
[[package]]
name = "llama-index"
-version = "0.8.24.post1"
+version = "0.8.24"
description = "Interface between LLMs and your data"
optional = false
python-versions = "*"
files = [
- {file = "llama_index-0.8.24.post1-py3-none-any.whl", hash = "sha256:4b7645a445d394640bad8c66a67483df29f7f0af25c53360cb382075be0c6c34"},
- {file = "llama_index-0.8.24.post1.tar.gz", hash = "sha256:7cd47cf6ba64d24dbc6db712bcd4834767e0d35890559feee139bd4fa90ad916"},
+ {file = "llama_index-0.8.24-py3-none-any.whl", hash = "sha256:c7bcf11ced7cd7a1a30d2f7d75b96c6b36dd815892e12e1a0642d7d20b76846e"},
+ {file = "llama_index-0.8.24.tar.gz", hash = "sha256:6a70dd010377be8170389721b0c5521e767f41fd676cf35d16214d80e532ec00"},
]
[package.dependencies]