Skip to content

Commit

Permalink
refactor: change naming of RnExecutorchModule to LLM (#50)
Browse files Browse the repository at this point in the history
## Description
Turbo module which was handling running of llama models was named
RnExecutorchModule which wasn't really clear and conflicted with name of
library, it's now changed to LLM, matching the style of StyleTransfer,
ObjectDetection etc.

### Type of change
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing
functionality to not work as expected)
- [ ] Documentation update (improves or adds clarity to existing
documentation)

### Tested on
- [x] iOS
- [x] Android

### Testing instructions
<!-- Provide step-by-step instructions on how to test your changes.
Include setup details if necessary. -->

### Screenshots
<!-- Add screenshots here, if applicable -->

### Related issues
<!-- Link related issues here using #issue-number -->

### Checklist
- [x] I have performed a self-review of my code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have updated the documentation accordingly
- [ ] My changes generate no new warnings

### Additional notes
<!-- Include any additional information, assumptions, or context that
reviewers might need to understand this PR. -->
  • Loading branch information
NorbertKlockiewicz authored Dec 12, 2024
1 parent 73a97d6 commit 5a21d36
Show file tree
Hide file tree
Showing 10 changed files with 36 additions and 32 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ import com.swmansion.rnexecutorch.utils.llms.END_OF_TEXT_TOKEN
import org.pytorch.executorch.LlamaCallback
import org.pytorch.executorch.LlamaModule

class RnExecutorchModule(reactContext: ReactApplicationContext) :
NativeRnExecutorchSpec(reactContext), LlamaCallback {
class LLM(reactContext: ReactApplicationContext) :
NativeLLMSpec(reactContext), LlamaCallback {

private var llamaModule: LlamaModule? = null
private var tempLlamaResponse = StringBuilder()
Expand Down Expand Up @@ -94,7 +94,11 @@ class RnExecutorchModule(reactContext: ReactApplicationContext) :
return@tokenizerDownload
}

downloadResource(modelSource, ResourceType.MODEL, isLargeFile = true) modelDownload@{ modelPath, modelError ->
downloadResource(
modelSource,
ResourceType.MODEL,
isLargeFile = true
) modelDownload@{ modelPath, modelError ->
if (modelError != null) {
promise.reject(
"Download Error",
Expand Down Expand Up @@ -149,6 +153,6 @@ class RnExecutorchModule(reactContext: ReactApplicationContext) :
}

companion object {
const val NAME = "RnExecutorch"
const val NAME = "LLM"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ class RnExecutorchPackage : TurboReactPackage() {
}

override fun getModule(name: String, reactContext: ReactApplicationContext): NativeModule? =
if (name == RnExecutorchModule.NAME) {
RnExecutorchModule(reactContext)
if (name == LLM.NAME) {
LLM(reactContext)
} else if (name == ETModule.NAME) {
ETModule(reactContext)
} else if(name == StyleTransfer.NAME){
} else if (name == StyleTransfer.NAME) {
StyleTransfer(reactContext)
} else {
null
Expand All @@ -26,9 +26,9 @@ class RnExecutorchPackage : TurboReactPackage() {
override fun getReactModuleInfoProvider(): ReactModuleInfoProvider {
return ReactModuleInfoProvider {
val moduleInfos: MutableMap<String, ReactModuleInfo> = HashMap()
moduleInfos[RnExecutorchModule.NAME] = ReactModuleInfo(
RnExecutorchModule.NAME,
RnExecutorchModule.NAME,
moduleInfos[LLM.NAME] = ReactModuleInfo(
LLM.NAME,
LLM.NAME,
false, // canOverrideExistingModule
false, // needsEagerInit
false, // isCxxModule
Expand Down
2 changes: 1 addition & 1 deletion ios/RnExecutorch.xcodeproj/project.pbxproj
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
isa = PBXFileSystemSynchronizedGroupBuildPhaseMembershipExceptionSet;
buildPhase = 550986872CEF541900FECBB8 /* CopyFiles */;
membershipExceptions = (
RnExecutorch.h,
LLM.h,
);
};
/* End PBXFileSystemSynchronizedGroupBuildPhaseMembershipExceptionSet section */
Expand Down
5 changes: 5 additions & 0 deletions ios/RnExecutorch/LLM.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#import <RnExecutorchSpec/RnExecutorchSpec.h>

@interface LLM : NativeLLMSpecBase <NativeLLMSpec>

@end
6 changes: 3 additions & 3 deletions ios/RnExecutorch/RnExecutorch.mm → ios/RnExecutorch/LLM.mm
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#import "RnExecutorch.h"
#import "LLM.h"
#import <ExecutorchLib/LLaMARunner.h>
#import "utils/ConversationManager.h"
#import "utils/Constants.h"
Expand All @@ -15,7 +15,7 @@
#import <react/renderer/uimanager/primitives.h>


@implementation RnExecutorch {
@implementation LLM {
LLaMARunner *runner;
ConversationManager *conversationManager;
NSMutableString *tempLlamaResponse;
Expand Down Expand Up @@ -131,7 +131,7 @@ -(void)deleteModule {

- (std::shared_ptr<facebook::react::TurboModule>)getTurboModule:(const facebook::react::ObjCTurboModule::InitParams &)params
{
return std::make_shared<facebook::react::NativeRnExecutorchSpecJSI>(params);
return std::make_shared<facebook::react::NativeLLMSpecJSI>(params);
}

@end
Expand Down
5 changes: 0 additions & 5 deletions ios/RnExecutorch/RnExecutorch.h

This file was deleted.

14 changes: 7 additions & 7 deletions src/RnExecutorch.ts → src/LLM.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ import {
DEFAULT_SYSTEM_PROMPT,
EOT_TOKEN,
} from './constants/llamaDefaults';
import { RnExecutorch } from './native/RnExecutorchModules';
import { LLM } from './native/RnExecutorchModules';

const interrupt = () => {
RnExecutorch.interrupt();
LLM.interrupt();
};

export const useLLM = ({
Expand Down Expand Up @@ -50,15 +50,15 @@ export const useLLM = ({
tokenizerUrl = Image.resolveAssetSource(tokenizerSource).uri;
}

downloadProgressListener.current = RnExecutorch.onDownloadProgress(
downloadProgressListener.current = LLM.onDownloadProgress(
(data: number) => {
if (data) {
setDownloadProgress(data);
}
}
);

await RnExecutorch.loadLLM(
await LLM.loadLLM(
modelUrl as string,
tokenizerUrl as string,
systemPrompt,
Expand All @@ -67,7 +67,7 @@ export const useLLM = ({

setIsModelReady(true);

tokenGeneratedListener.current = RnExecutorch.onToken(
tokenGeneratedListener.current = LLM.onToken(
(data: string | undefined) => {
if (!data) {
return;
Expand All @@ -93,7 +93,7 @@ export const useLLM = ({
downloadProgressListener.current = null;
tokenGeneratedListener.current?.remove();
tokenGeneratedListener.current = null;
RnExecutorch.deleteModule();
LLM.deleteModule();
};
}, [contextWindowLength, modelSource, systemPrompt, tokenizerSource]);

Expand All @@ -109,7 +109,7 @@ export const useLLM = ({
try {
setResponse('');
setIsModelGenerating(true);
await RnExecutorch.runInference(input);
await LLM.runInference(input);
} catch (err) {
setIsModelGenerating(false);
throw new Error((err as Error).message);
Expand Down
2 changes: 1 addition & 1 deletion src/index.tsx
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
export * from './ETModule';
export * from './RnExecutorch';
export * from './LLM';
export * from './StyleTransfer';
export * from './constants/modelUrls';
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@ export interface Spec extends TurboModule {
readonly onDownloadProgress: EventEmitter<number>;
}

export default TurboModuleRegistry.get<Spec>('RnExecutorch');
export default TurboModuleRegistry.get<Spec>('LLM');
8 changes: 4 additions & 4 deletions src/native/RnExecutorchModules.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ const LINKING_ERROR =
'- You rebuilt the app after installing the package\n' +
'- You are not using Expo Go\n';

const RnExecutorchSpec = require('./NativeRnExecutorch').default;
const RnExecutorch = RnExecutorchSpec
? RnExecutorchSpec
const LLMSpec = require('./NativeLLM').default;
const LLM = LLMSpec
? LLMSpec
: new Proxy(
{},
{
Expand Down Expand Up @@ -44,4 +44,4 @@ const StyleTransfer = StyleTransferSpec
}
);

export { RnExecutorch, ETModule, StyleTransfer };
export { LLM, ETModule, StyleTransfer };

0 comments on commit 5a21d36

Please sign in to comment.