Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove examples gateway. #1243

Merged
merged 10 commits into from
Dec 13, 2024
20 changes: 13 additions & 7 deletions ChatQnA/chatqna.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import os
import re

from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
Expand Down Expand Up @@ -187,15 +188,15 @@ def align_generator(self, gen, **kwargs):
yield "data: [DONE]\n\n"


class ChatQnAService(Gateway):
class ChatQnAService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
ServiceOrchestrator.align_inputs = align_inputs
ServiceOrchestrator.align_outputs = align_outputs
ServiceOrchestrator.align_generator = align_generator

self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.CHAT_QNA)

def add_remote_service(self):

Expand Down Expand Up @@ -332,7 +333,7 @@ async def handle_request(self, request: Request):
data = await request.json()
stream_opt = data.get("stream", True)
chat_request = ChatCompletionRequest.parse_obj(data)
prompt = self._handle_message(chat_request.messages)
prompt = handle_message(chat_request.messages)
parameters = LLMParams(
max_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024,
top_k=chat_request.top_k if chat_request.top_k else 10,
Expand Down Expand Up @@ -379,15 +380,20 @@ async def handle_request(self, request: Request):

def start(self):

super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.CHAT_QNA),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)

self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])

self.service.start()


if __name__ == "__main__":
parser = argparse.ArgumentParser()
Expand Down
18 changes: 11 additions & 7 deletions ChatQnA/chatqna_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@

import os

from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
Expand All @@ -27,11 +28,12 @@
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))


class ChatQnAService(Gateway):
class ChatQnAService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.CHAT_QNA)

def add_remote_service(self):
embedding = MicroService(
Expand Down Expand Up @@ -75,7 +77,7 @@ async def handle_request(self, request: Request):
data = await request.json()
stream_opt = data.get("stream", True)
chat_request = ChatCompletionRequest.parse_obj(data)
prompt = self._handle_message(chat_request.messages)
prompt = handle_message(chat_request.messages)
parameters = LLMParams(
max_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024,
top_k=chat_request.top_k if chat_request.top_k else 10,
Expand Down Expand Up @@ -121,15 +123,17 @@ async def handle_request(self, request: Request):
return ChatCompletionResponse(model="chatqna", choices=choices, usage=usage)

def start(self):

super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.CHAT_QNA),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()


if __name__ == "__main__":
Expand Down
58 changes: 49 additions & 9 deletions DocSum/docsum.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
import os
from typing import List

from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps.cores.mega.gateway import read_text_from_file
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
Expand All @@ -27,11 +27,47 @@
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))


class DocSumService(Gateway):
def read_pdf(file):
from langchain.document_loaders import PyPDFLoader

loader = PyPDFLoader(file)
docs = loader.load_and_split()
return docs


def read_text_from_file(file, save_file_name):
import docx2txt
from langchain.text_splitter import CharacterTextSplitter

# read text file
if file.headers["content-type"] == "text/plain":
file.file.seek(0)
content = file.file.read().decode("utf-8")
# Split text
text_splitter = CharacterTextSplitter()
texts = text_splitter.split_text(content)
# Create multiple documents
file_content = texts
# read pdf file
elif file.headers["content-type"] == "application/pdf":
documents = read_pdf(save_file_name)
file_content = [doc.page_content for doc in documents]
# read docx file
elif (
file.headers["content-type"] == "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
or file.headers["content-type"] == "application/octet-stream"
):
file_content = docx2txt.process(save_file_name)

return file_content


class DocSumService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.DOC_SUMMARY)

def add_remote_service(self):

Expand Down Expand Up @@ -62,7 +98,7 @@ async def handle_request(self, request: Request, files: List[UploadFile] = File(
data = await request.json()
stream_opt = data.get("stream", True)
chat_request = ChatCompletionRequest.model_validate(data)
prompt = self._handle_message(chat_request.messages)
prompt = handle_message(chat_request.messages)

initial_inputs_data = {data["type"]: prompt}

Expand Down Expand Up @@ -98,9 +134,9 @@ async def handle_request(self, request: Request, files: List[UploadFile] = File(
file_summaries.append(docs)

if file_summaries:
prompt = self._handle_message(chat_request.messages) + "\n".join(file_summaries)
prompt = handle_message(chat_request.messages) + "\n".join(file_summaries)
else:
prompt = self._handle_message(chat_request.messages)
prompt = handle_message(chat_request.messages)

data_type = data.get("type")
if data_type is not None:
Expand Down Expand Up @@ -151,14 +187,18 @@ async def handle_request(self, request: Request, files: List[UploadFile] = File(
return ChatCompletionResponse(model="docsum", choices=choices, usage=usage)

def start(self):
super().__init__(
megaservice=self.megaservice,

self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.DOC_SUMMARY),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()


if __name__ == "__main__":
Expand Down
55 changes: 47 additions & 8 deletions FaqGen/faqgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
import os
from typing import List

from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps.cores.mega.gateway import read_text_from_file
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
Expand All @@ -23,11 +23,47 @@
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))


class FaqGenService(Gateway):
def read_pdf(file):
from langchain.document_loaders import PyPDFLoader

loader = PyPDFLoader(file)
docs = loader.load_and_split()
return docs


def read_text_from_file(file, save_file_name):
import docx2txt
from langchain.text_splitter import CharacterTextSplitter

# read text file
if file.headers["content-type"] == "text/plain":
file.file.seek(0)
content = file.file.read().decode("utf-8")
# Split text
text_splitter = CharacterTextSplitter()
texts = text_splitter.split_text(content)
# Create multiple documents
file_content = texts
# read pdf file
elif file.headers["content-type"] == "application/pdf":
documents = read_pdf(save_file_name)
file_content = [doc.page_content for doc in documents]
# read docx file
elif (
file.headers["content-type"] == "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
or file.headers["content-type"] == "application/octet-stream"
):
file_content = docx2txt.process(save_file_name)

return file_content


class FaqGenService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.FAQ_GEN)

def add_remote_service(self):
llm = MicroService(
Expand Down Expand Up @@ -61,9 +97,9 @@ async def handle_request(self, request: Request, files: List[UploadFile] = File(
file_summaries.append(docs)

if file_summaries:
prompt = self._handle_message(chat_request.messages) + "\n".join(file_summaries)
prompt = handle_message(chat_request.messages) + "\n".join(file_summaries)
else:
prompt = self._handle_message(chat_request.messages)
prompt = handle_message(chat_request.messages)

parameters = LLMParams(
max_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024,
Expand Down Expand Up @@ -101,14 +137,17 @@ async def handle_request(self, request: Request, files: List[UploadFile] = File(
return ChatCompletionResponse(model="faqgen", choices=choices, usage=usage)

def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.FAQ_GEN),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()


if __name__ == "__main__":
Expand Down
Loading