-
-
Notifications
You must be signed in to change notification settings - Fork 148
hugging face basic llm endpoint is implemented #598
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
huzaifansari54
wants to merge
4
commits into
davidmigloz:main
Choose a base branch
from
huzaifansari54:resolved_branch
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from 3 commits
Commits
Show all changes
4 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
34 changes: 32 additions & 2 deletions
34
packages/langchain_huggingface/example/langchain_huggingface_example.dart
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,3 +1,33 @@ | ||
| void main() { | ||
| // TODO | ||
| // ignore_for_file: avoid_print, unused_element | ||
|
|
||
| import 'package:langchain_core/chat_models.dart'; | ||
| import 'package:langchain_core/prompts.dart'; | ||
|
|
||
| import 'package:langchain_huggingface/src/llm/huggingface_inference.dart'; | ||
|
|
||
| void main() async { | ||
| // Uncomment the example you want to run: | ||
| await _example1(); | ||
| await _example2(); | ||
| } | ||
|
|
||
| /// The most basic building block of LangChain is calling an LLM on some input. | ||
| Future<void> _example1() async { | ||
| final huggingFace = HuggingfaceInference.call( | ||
| model: 'gpt2', | ||
| apiKey: '....API_KEY...', | ||
| ); | ||
| final result = await huggingFace('Who are you?'); | ||
| print(result); | ||
| } | ||
|
|
||
| Future<void> _example2() async { | ||
| final huggingFace = HuggingfaceInference.call( | ||
| model: 'gpt2', | ||
| apiKey: '....API_KEY...', | ||
| ); | ||
|
|
||
| final str = huggingFace.stream(PromptValue.string('Who are you?')); | ||
|
|
||
| str.listen(print); | ||
| } | ||
2 changes: 2 additions & 0 deletions
2
packages/langchain_huggingface/lib/langchain_huggingface.dart
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,2 +1,4 @@ | ||
| /// Hugging Face module for LangChain.dart. | ||
| library; | ||
|
|
||
| export 'src/llm/llm.dart'; |
79 changes: 79 additions & 0 deletions
79
packages/langchain_huggingface/lib/src/llm/huggingface_inference.dart
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,79 @@ | ||
| import 'package:huggingface_client/huggingface_client.dart'; | ||
| import 'package:langchain_core/llms.dart'; | ||
| import 'package:langchain_core/src/prompts/types.dart'; | ||
| import 'package:meta/meta.dart'; | ||
| import '../../langchain_huggingface.dart'; | ||
| import 'mappers.dart'; | ||
| import 'types.dart'; | ||
|
|
||
| @immutable | ||
| class HuggingfaceInference extends BaseLLM<HuggingFaceOptions> { | ||
| const HuggingfaceInference._({ | ||
| required this.model, | ||
| required this.apiKey, | ||
| required this.apiClient, | ||
| super.defaultOptions = const HuggingFaceOptions(), | ||
| }); | ||
| final InferenceApi apiClient; | ||
| final String apiKey; | ||
| final String model; | ||
| factory HuggingfaceInference.call({ | ||
| required String apiKey, | ||
| required String model, | ||
| }) { | ||
| final apiClient = InferenceApi(HuggingFaceClient.getInferenceClient( | ||
| apiKey, HuggingFaceClient.inferenceBasePath)); | ||
| return HuggingfaceInference._( | ||
| model: model, apiKey: apiKey, apiClient: apiClient); | ||
| } | ||
| @override | ||
| Future<LLMResult> invoke(PromptValue input, | ||
| {HuggingFaceOptions? options}) async { | ||
| final parameters = ApiQueryNLPTextGeneration( | ||
| inputs: input.toString(), | ||
| temperature: options?.temperature ?? 1.0, | ||
| topK: options?.topK ?? 0, | ||
| topP: options?.topP ?? 0.0, | ||
| maxTime: options?.maxTime ?? -1.0, | ||
| returnFullText: options?.returnFullText ?? true, | ||
| repetitionPenalty: options?.repetitionPenalty ?? -1, | ||
| doSample: options?.doSample ?? true, | ||
| maxNewTokens: options?.maxNewTokens ?? -1, | ||
| options: InferenceOptions( | ||
| useCache: options?.useCache ?? true, | ||
| waitForModel: options?.waitForModel ?? false)); | ||
| final result = await apiClient.queryNLPTextGeneration( | ||
huzaifansari54 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| taskParameters: parameters, model: model); | ||
|
|
||
| return result![0]!.toLLMResult(); | ||
| } | ||
|
|
||
| @override | ||
| Stream<LLMResult> stream(PromptValue input, {HuggingFaceOptions? options}) { | ||
| final query = ApiQueryNLPTextGeneration( | ||
| inputs: input.toString(), | ||
| temperature: options?.temperature ?? 1.0, | ||
| topK: options?.topK ?? 0, | ||
| topP: options?.topP ?? 0.0, | ||
| maxTime: options?.maxTime ?? -1.0, | ||
| returnFullText: options?.returnFullText ?? true, | ||
| repetitionPenalty: options?.repetitionPenalty ?? -1, | ||
| doSample: options?.doSample ?? true, | ||
| maxNewTokens: options?.maxNewTokens ?? -1, | ||
| options: InferenceOptions( | ||
| useCache: options?.useCache ?? true, | ||
| waitForModel: options?.waitForModel ?? false)); | ||
| final stream = apiClient.textStreamGeneration(query: query, model: model); | ||
|
|
||
| return stream.map((response) => response.toLLMResult()); | ||
| } | ||
|
|
||
| @override | ||
huzaifansari54 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| String get modelType => 'llm'; | ||
| @override | ||
| Future<List<int>> tokenize(PromptValue promptValue, | ||
| {HuggingFaceOptions? options}) async { | ||
| // TODO: implement tokenize | ||
| throw UnimplementedError(); | ||
| } | ||
| } | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,2 @@ | ||
| export 'huggingface_inference.dart'; | ||
| export 'types.dart'; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,27 @@ | ||
| import 'package:huggingface_client/huggingface_client.dart'; | ||
| import 'package:langchain_core/language_models.dart'; | ||
| import 'package:langchain_core/llms.dart'; | ||
|
|
||
| extension HuggingFaceResponseMapper on ApiResponseNLPTextGeneration { | ||
| //map to | ||
| LLMResult toLLMResult() { | ||
| return LLMResult( | ||
| id: 'id', | ||
huzaifansari54 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| output: generatedText, | ||
| finishReason: FinishReason.unspecified, | ||
| metadata: {}, | ||
| usage: const LanguageModelUsage()); | ||
| } | ||
| } | ||
|
|
||
| extension HuggingFaceStreamResponseMapper on TextGenerationStreamResponse { | ||
| //map to | ||
| LLMResult toLLMResult() { | ||
| return LLMResult( | ||
| id: id.toString(), | ||
| output: text, | ||
| finishReason: FinishReason.unspecified, | ||
| metadata: {}, | ||
| usage: const LanguageModelUsage()); | ||
| } | ||
| } | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,95 @@ | ||
| import 'package:langchain_core/llms.dart'; | ||
| import 'package:langchain_core/src/language_models/types.dart'; | ||
| import 'package:meta/meta.dart'; | ||
|
|
||
| @immutable | ||
| class HuggingFaceOptions extends LLMOptions { | ||
| const HuggingFaceOptions( | ||
| {this.topK, | ||
| this.topP, | ||
| super.model, | ||
| this.temperature, | ||
| this.repetitionPenalty, | ||
| this.maxNewTokens, | ||
| this.maxTime, | ||
| this.returnFullText, | ||
| this.numReturnSequences, | ||
| this.useCache, | ||
| this.waitForModel, | ||
| this.doSample}); | ||
|
|
||
| /// (Default: true). Boolean. There is a cache layer on the inference API to speedup requests we have already seen. | ||
| /// Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). | ||
| /// However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being | ||
| /// used resulting in a real new query. | ||
| final bool? useCache; | ||
|
|
||
| /// (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests | ||
| /// required to get your inference done. It is advised to only set this flag to true after receiving a 503 | ||
| /// error as it will limit hanging in your application to known places. | ||
| final bool? waitForModel; | ||
|
|
||
| /// (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | ||
| final int? topK; | ||
|
|
||
| /// (Default: None). Float to define the tokens that are within the sample operation of text generation. | ||
| /// Add tokens in the sample for more probable to least probable until the sum of the probabilities | ||
| /// is greater than top_p. | ||
| final double? topP; | ||
|
|
||
| /// (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, | ||
| /// 0 means always take the highest score, 100.0 is getting closer to uniform probability. | ||
| final double? temperature; | ||
|
|
||
| /// (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized | ||
| /// to not be picked in successive generation passes. | ||
| final double? repetitionPenalty; | ||
|
|
||
| /// (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input | ||
| /// length it is a estimate of the size of generated text you want. Each new tokens slows down the request, | ||
| /// so look for balance between response times and length of text generated. | ||
| final int? maxNewTokens; | ||
|
|
||
| /// (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. | ||
| /// Network can cause some overhead so it will be a soft limit. Use that in combination | ||
| /// with [maxNewTokens] for best results. | ||
| final double? maxTime; | ||
|
|
||
| /// (Default: True). Bool. If set to False, the return results will not contain the | ||
| /// original query making it easier for prompting. | ||
| final bool? returnFullText; | ||
|
|
||
| /// (Default: 1). Integer. The number of proposition you want to be returned. | ||
| final int? numReturnSequences; | ||
|
|
||
| /// (Optional: True). Bool. Whether or not to use sampling, use greedy | ||
| /// decoding otherwise | ||
| final bool? doSample; | ||
|
|
||
| @override | ||
| HuggingFaceOptions copyWith( | ||
| {final String? model, | ||
| final int? concurrencyLimit, | ||
| final int? topK, | ||
| final double? topP, | ||
| final double? temperature, | ||
| final double? repetitionPenalty, | ||
| final int? maxNewTokens, | ||
| final double? maxTime, | ||
| final bool? returnFullText, | ||
| final int? numReturnSequences, | ||
| final bool? doSample}) { | ||
| return HuggingFaceOptions( | ||
| model: model ?? this.model, | ||
| repetitionPenalty: repetitionPenalty ?? this.repetitionPenalty, | ||
| returnFullText: returnFullText ?? this.returnFullText, | ||
| numReturnSequences: numReturnSequences ?? this.numReturnSequences, | ||
| doSample: doSample ?? this.doSample, | ||
| topK: topK ?? this.topK, | ||
| temperature: temperature ?? this.temperature, | ||
| topP: topP ?? this.topP, | ||
| maxTime: maxTime ?? this.maxTime, | ||
| maxNewTokens: maxNewTokens ?? this.maxNewTokens, | ||
| ); | ||
| } | ||
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -15,3 +15,6 @@ topics: | |
|
|
||
| environment: | ||
| sdk: ">=3.4.0 <4.0.0" | ||
| dependencies: | ||
| huggingface_client: ^1.6.0 | ||
| langchain_core: ^0.3.6 | ||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.