Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update mongodb_connector.py #10

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 37 additions & 22 deletions maps/mongodb_connector.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
import pymongo as pm
from nomic import AtlasProject
from nomic import AtlasDataset
from sentence_transformers import SentenceTransformer
import numpy as np
import pandas as pd
from pathlib import Path
import nomic

# replace with your mongodb connect string / cert
client = pm.MongoClient('mongodb+srv://cluster0.l3jhqfs.mongodb.net/'
# Replace with your MongoDB connection string and certificate file path
client = pm.MongoClient('mongodb+srv://<username>:<password>@cluster0.l3jhqfs.mongodb.net/testdb'
'?authSource=%24external&authMechanism=MONGODB-X509&retryWrites=true&w=majority',
tls=True,
tlsCertificateKeyFile='mongocert.pem')
Expand All @@ -17,42 +16,58 @@
# Delete current content of collection
collection.delete_many({})

# Load embedding data into mongodb
# Load embedding data into MongoDB
mongo_so = pd.read_parquet(Path.cwd() / 'data' / 'mongo-so.parquet')

# Initialize SentenceTransformer model
model = SentenceTransformer('all-MiniLM-L6-v2')
akgom marked this conversation as resolved.
Show resolved Hide resolved
title_embeds = model.encode(mongo_so['title'])

# Encode titles into embeddings
title_embeds = model.encode(mongo_so['title'].tolist())

# Assign embeddings to DataFrame
mso_te = mongo_so.assign(title_embedding=list(title_embeds))

data = list(r._asdict() for r in mso_te.itertuples())
# Convert DataFrame to list of dictionaries for MongoDB insertion
data = mso_te.to_dict(orient='records')
for d in data:
del d['Index']
d['title_embedding'] = d['title_embedding'].tolist()
data[0]

# Insert data into MongoDB collection
collection.insert_many(data)

# Read a mongodb collection with embeddings in it and map it:
project = AtlasProject(
name='MongoDB Stack Overflow Questions',
unique_id_field='mongo_id',
reset_project_if_exists=True,
# Read MongoDB collection with embeddings and map it using AtlasDataset
dataset = AtlasDataset(
"MongoDB_StackOverflow_Questions",
unique_id_field="mongo_id",
is_public=True,
modality='embedding',
)

# Retrieve all items from MongoDB collection
all_items = list(collection.find())

# Extract embeddings into numpy array
embs = np.array([d['title_embedding'] for d in all_items])

# Prepare items for AtlasDataset by converting _id to mongo_id and removing embeddings
for d in all_items:
d['mongo_id'] = str(d['_id'])
del d['title_embedding']
del d['_id']

project.add_embeddings(all_items, embs)
# Add embeddings to AtlasDataset
dataset.add_data(data=all_items, embeddings=embs)

project.rebuild_maps()
project.create_index(
name='MongoDB Stack Overflow Questions',
topic_label_field='body',
build_topic_model=True,
)
# Create index in the dataset
index_options = {
"indexed_field": "title", # Replace with appropriate field for indexing
"modality": "embedding",
"topic_model": True,
"duplicate_detection": True,
"embedding_model": "NomicEmbed",
}
dataset.create_index(**index_options)

print(project)
# Print information about the AtlasDataset
print(dataset)