repo: remove bindings that have no maintainer (#2429)

The C#, Java, and Go bindings are now removed from the repo.

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-06-11 18:11:25 -04:00 committed by GitHub
parent 41c9013fa4
commit beaede03fb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
75 changed files with 22 additions and 5192 deletions

View File

@ -15,6 +15,5 @@ workflows:
gpt4all-backend/.* run-all-workflows true gpt4all-backend/.* run-all-workflows true
gpt4all-bindings/python/.* run-python-workflow true gpt4all-bindings/python/.* run-python-workflow true
gpt4all-bindings/typescript/.* run-ts-workflow true gpt4all-bindings/typescript/.* run-ts-workflow true
gpt4all-bindings/csharp/.* run-csharp-workflow true
gpt4all-chat/.* run-chat-workflow true gpt4all-chat/.* run-chat-workflow true
.* run-default-workflow true .* run-default-workflow true

View File

@ -20,9 +20,6 @@ parameters:
run-ts-workflow: run-ts-workflow:
type: boolean type: boolean
default: false default: false
run-csharp-workflow:
type: boolean
default: false
jobs: jobs:
default-job: default-job:
@ -620,57 +617,6 @@ jobs:
- runtimes/osx-x64/*.metal - runtimes/osx-x64/*.metal
build-bindings-backend-windows: build-bindings-backend-windows:
executor:
name: win/default
size: large
shell: powershell.exe -ExecutionPolicy Bypass
steps:
- checkout
- run:
name: Update Submodules
command: |
git submodule sync
git submodule update --init --recursive
- run:
name: Install MinGW64
command: choco install -y mingw --force --no-progress
- run:
name: Install VulkanSDK
command: |
Invoke-WebRequest -Uri https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe -OutFile VulkanSDK-1.3.261.1-Installer.exe
.\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install
- run:
name: Install CUDA Toolkit
command: |
Invoke-WebRequest -Uri https://developer.download.nvidia.com/compute/cuda/12.4.1/network_installers/cuda_12.4.1_windows_network.exe -OutFile cuda_12.4.1_windows_network.exe
.\cuda_12.4.1_windows_network.exe -s cudart_12.4 nvcc_12.4 cublas_12.4 cublas_dev_12.4
- run:
name: Install dependencies
command: |
choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System'
- run:
name: Build Libraries
command: |
$MinGWBin = "C:\ProgramData\mingw64\mingw64\bin"
$Env:Path += ";$MinGwBin"
$Env:Path += ";C:\Program Files\CMake\bin"
$Env:Path += ";C:\VulkanSDK\1.3.261.1\bin"
$Env:VULKAN_SDK = "C:\VulkanSDK\1.3.261.1"
cd gpt4all-backend
mkdir runtimes/win-x64
cd runtimes/win-x64
cmake -G "MinGW Makefiles" -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON ../..
cmake --build . --parallel --config Release
cp "$MinGWBin\libgcc*.dll" .
cp "$MinGWBin\libstdc++*.dll" .
cp "$MinGWBin\libwinpthread*.dll" .
cp bin/*.dll .
- persist_to_workspace:
root: gpt4all-backend
paths:
- runtimes/win-x64/*.dll
build-bindings-backend-windows-msvc:
machine: machine:
image: 'windows-server-2022-gui:2023.03.1' image: 'windows-server-2022-gui:2023.03.1'
resource_class: windows.large resource_class: windows.large
@ -713,182 +659,6 @@ jobs:
paths: paths:
- runtimes/win-x64_msvc/*.dll - runtimes/win-x64_msvc/*.dll
build-csharp-linux:
docker:
- image: mcr.microsoft.com/dotnet/sdk:8.0
steps:
- checkout
- attach_workspace:
at: /tmp/workspace
- run:
name: "Prepare Native Libs"
command: |
cd gpt4all-bindings/csharp
mkdir -p runtimes/linux-x64/native
cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/
ls -R runtimes
- restore_cache:
keys:
- gpt4all-csharp-nuget-packages-nix
- run:
name: "Install project dependencies"
command: |
cd gpt4all-bindings/csharp
dotnet restore Gpt4All
- save_cache:
paths:
- ~/.nuget/packages
key: gpt4all-csharp-nuget-packages-nix
- run:
name: Build C# Project
command: |
cd gpt4all-bindings/csharp
dotnet build Gpt4All --configuration Release --nologo
- run:
name: "Run C# Tests"
command: |
cd gpt4all-bindings/csharp
dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx"
- run:
name: Test results
command: |
cd gpt4all-bindings/csharp/Gpt4All.Tests
dotnet tool install -g trx2junit
export PATH="$PATH:$HOME/.dotnet/tools"
trx2junit TestResults/*.trx
- store_test_results:
path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults
build-csharp-windows:
executor:
name: win/default
size: large
shell: powershell.exe -ExecutionPolicy Bypass
steps:
- checkout
- restore_cache:
keys:
- gpt4all-csharp-nuget-packages-win
- attach_workspace:
at: C:\Users\circleci\workspace
- run:
name: "Install .NET"
command: |
choco install -y dotnet-8.0-sdk
- run:
name: "Prepare Native Libs"
command: |
cd gpt4all-bindings/csharp
mkdir -p runtimes\win-x64\native
cp C:\Users\circleci\workspace\runtimes\win-x64\*.dll runtimes\win-x64\native\
ls -R runtimes
- run:
name: "Install project dependencies"
command: |
cd gpt4all-bindings/csharp
dotnet.exe restore Gpt4All
- save_cache:
paths:
- C:\Users\circleci\.nuget\packages
key: gpt4all-csharp-nuget-packages-win
- run:
name: Build C# Project
command: |
cd gpt4all-bindings/csharp
dotnet.exe build Gpt4All --configuration Release --nologo
- run:
name: "Run C# Tests"
command: |
cd gpt4all-bindings/csharp
dotnet.exe test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx"
- run:
name: Test results
command: |
cd gpt4all-bindings/csharp/Gpt4All.Tests
dotnet tool install -g trx2junit
$Env:Path += ";$Env:USERPROFILE\.dotnet\tools"
trx2junit TestResults/*.trx
- store_test_results:
path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults
build-csharp-macos:
macos:
xcode: "14.0.0"
steps:
- checkout
- restore_cache:
keys:
- gpt4all-csharp-nuget-packages-nix
- run:
name: Install dependencies
command: |
brew tap isen-ng/dotnet-sdk-versions
brew install --cask dotnet-sdk8-0-100
- attach_workspace:
at: /tmp/workspace
- run:
name: "Prepare Native Libs"
command: |
cd gpt4all-bindings/csharp
mkdir -p runtimes/osx/native
cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/
cp /tmp/workspace/runtimes/osx-x64/*.metal runtimes/osx/native/
ls -R runtimes
- run:
name: "Install project dependencies"
command: |
cd gpt4all-bindings/csharp
dotnet restore Gpt4All
- save_cache:
paths:
- ~/.nuget/packages
key: gpt4all-csharp-nuget-packages-nix
- run:
name: Build C# Project
command: |
cd gpt4all-bindings/csharp
dotnet build Gpt4All --configuration Release --nologo
- run:
name: "Run C# Tests"
command: |
cd gpt4all-bindings/csharp
dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx"
- run:
name: Test results
command: |
cd gpt4all-bindings/csharp/Gpt4All.Tests
dotnet tool install -g trx2junit
export PATH="$PATH:$HOME/.dotnet/tools"
trx2junit TestResults/*.trx
- store_test_results:
path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults
store-and-upload-nupkgs:
docker:
- image: mcr.microsoft.com/dotnet/sdk:8.0
steps:
- attach_workspace:
at: /tmp/workspace
- checkout
- restore_cache:
keys:
- gpt4all-csharp-nuget-packages-nix
- run:
name: NuGet Pack
command: |
cd gpt4all-bindings/csharp
mkdir -p runtimes/linux-x64/native
cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/
mkdir -p runtimes/win-x64/native
cp /tmp/workspace/runtimes/win-x64/*.dll runtimes/win-x64/native/
#mkdir -p runtimes/osx/native
#cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/
#cp /tmp/workspace/runtimes/osx-x64/*.metal runtimes/osx/native/
dotnet pack ./Gpt4All/Gpt4All.csproj -p:IncludeSymbols=true -p:SymbolPackageFormat=snupkg -c Release
dotnet nuget push ./Gpt4All/bin/Release/Gpt4All.*.nupkg -s $NUGET_URL -k $NUGET_TOKEN --skip-duplicate
- store_artifacts:
path: gpt4all-bindings/csharp/Gpt4All/bin/Release
build-nodejs-linux: build-nodejs-linux:
docker: docker:
- image: cimg/base:stable - image: cimg/base:stable
@ -1153,13 +923,10 @@ workflows:
or: or:
- << pipeline.parameters.run-all-workflows >> - << pipeline.parameters.run-all-workflows >>
- << pipeline.parameters.run-python-workflow >> - << pipeline.parameters.run-python-workflow >>
- << pipeline.parameters.run-csharp-workflow >>
- << pipeline.parameters.run-ts-workflow >> - << pipeline.parameters.run-ts-workflow >>
jobs: jobs:
- hold: - hold:
type: approval type: approval
- csharp-hold:
type: approval
- nuget-hold: - nuget-hold:
type: approval type: approval
- nodejs-hold: - nodejs-hold:
@ -1184,12 +951,6 @@ workflows:
only: only:
requires: requires:
- hold - hold
- build-bindings-backend-windows-msvc:
filters:
branches:
only:
requires:
- hold
# NodeJs Jobs # NodeJs Jobs
- prepare-npm-pkg: - prepare-npm-pkg:
@ -1214,7 +975,7 @@ workflows:
only: only:
requires: requires:
- nodejs-hold - nodejs-hold
- build-bindings-backend-windows-msvc - build-bindings-backend-windows
- build-nodejs-macos: - build-nodejs-macos:
filters: filters:
branches: branches:
@ -1222,36 +983,3 @@ workflows:
requires: requires:
- nodejs-hold - nodejs-hold
- build-bindings-backend-macos - build-bindings-backend-macos
# CSharp Jobs
- build-csharp-linux:
filters:
branches:
only:
requires:
- csharp-hold
- build-bindings-backend-linux
- build-csharp-windows:
filters:
branches:
only:
requires:
- csharp-hold
- build-bindings-backend-windows
- build-csharp-macos:
filters:
branches:
only:
requires:
- csharp-hold
- build-bindings-backend-macos
- store-and-upload-nupkgs:
filters:
branches:
only:
requires:
- nuget-hold
- build-csharp-windows
- build-csharp-linux
#- build-csharp-macos

View File

@ -1,3 +1,21 @@
# GPT4All Bindings # GPT4All Language Bindings
This directory will contain language specific bindings on top of the C/C++ model backends. These are the language bindings for the GPT4All backend. They provide functionality to load GPT4All models (and other llama.cpp models), generate text, and (in the case of the Python bindings) embed text as a vector representation.
We will have one directory per language binding (e.g. Python, Typescript, Golang, etc.).
See their respective folders for language-specific documentation.
### Languages
- [Python](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python) (Nomic official, maintained by [@cebtenzzre](https://github.com/cebtenzzre))
- [Node.js/Typescript](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/typescript) (community, maintained by [@jacoobes](https://github.com/jacoobes) and [@iimez](https://github.com/iimez))
<br/>
<br/>
<details><summary><b>Archived Bindings</b></summary>
<br/>
The following bindings have been removed from this repository due to lack of maintenance. If adopted, they can be brought back&mdash;feel free to message a developer on Dicsord if you are interested in maintaining one of them. Below are links to their last available version (not necessarily the last working version).
- C#: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/csharp)
- Java: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/java)
- Go: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/golang)
</details>

View File

@ -1,348 +0,0 @@
# EditorConfig is awesome: https://EditorConfig.org
# top-most EditorConfig file
root = true
# Don't use tabs for indentation.
[*]
indent_style = space
# (Please don't specify an indent_size here; that has too many unintended consequences.)
# Code files
[*.{cs,csx,vb,vbx}]
indent_size = 4
insert_final_newline = true
charset = utf-8-bom
# XML project files
[*.{csproj,vbproj,vcxproj,vcxproj.filters,proj,projitems,shproj}]
indent_size = 4
# XML config files
[*.{props,targets,ruleset,config,nuspec,resx,vsixmanifest,vsct}]
indent_size = 2
# JSON files
[*.json]
indent_size = 2
# Powershell files
[*.ps1]
indent_size = 2
# Shell script files
[*.sh]
end_of_line = lf
indent_size = 2
insert_final_newline = true
# Dotnet code style settings:
[*.{cs,vb}]
# IDE0055: Fix formatting
dotnet_diagnostic.IDE0055.severity = error
dotnet_diagnostic.CS1573.severity = suggestion
dotnet_diagnostic.CS1591.severity = suggestion
# Sort using and Import directives with System.* appearing first
dotnet_sort_system_directives_first = true
dotnet_separate_import_directive_groups = false
# Avoid "this." and "Me." if not necessary
dotnet_style_qualification_for_field = false:suggestion
dotnet_style_qualification_for_property = false:suggestion
dotnet_style_qualification_for_method = false:suggestion
dotnet_style_qualification_for_event = false:suggestion
# Use language keywords instead of framework type names for type references
dotnet_style_predefined_type_for_locals_parameters_members = true:warning
dotnet_style_predefined_type_for_member_access = true:warning
# Suggest more modern language features when available
dotnet_style_object_initializer = true:suggestion
dotnet_style_collection_initializer = true:suggestion
dotnet_style_coalesce_expression = true:suggestion
dotnet_style_null_propagation = true:suggestion
dotnet_style_explicit_tuple_names = true:suggestion
# Whitespace options
dotnet_style_allow_multiple_blank_lines_experimental = false
# Private fields are camelCase with '_' prefix
dotnet_naming_rule.private_members_with_underscore.symbols = private_fields
dotnet_naming_rule.private_members_with_underscore.style = prefix_underscore
dotnet_naming_rule.private_members_with_underscore.severity = error
dotnet_naming_symbols.private_fields.applicable_kinds = field
dotnet_naming_symbols.private_fields.applicable_accessibilities = private
dotnet_naming_style.prefix_underscore.capitalization = camel_case
dotnet_naming_style.prefix_underscore.required_prefix = _
# Non-private static fields are PascalCase
dotnet_naming_rule.non_private_static_fields_should_be_pascal_case.severity = suggestion
dotnet_naming_rule.non_private_static_fields_should_be_pascal_case.symbols = non_private_static_fields
dotnet_naming_rule.non_private_static_fields_should_be_pascal_case.style = non_private_static_field_style
dotnet_naming_symbols.non_private_static_fields.applicable_kinds = field
dotnet_naming_symbols.non_private_static_fields.applicable_accessibilities = public, protected, internal, protected_internal, private_protected
dotnet_naming_symbols.non_private_static_fields.required_modifiers = static
dotnet_naming_style.non_private_static_field_style.capitalization = pascal_case
# Non-private readonly fields are PascalCase
dotnet_naming_rule.non_private_readonly_fields_should_be_pascal_case.severity = suggestion
dotnet_naming_rule.non_private_readonly_fields_should_be_pascal_case.symbols = non_private_readonly_fields
dotnet_naming_rule.non_private_readonly_fields_should_be_pascal_case.style = non_private_static_field_style
dotnet_naming_symbols.non_private_readonly_fields.applicable_kinds = field
dotnet_naming_symbols.non_private_readonly_fields.applicable_accessibilities = public, protected, internal, protected_internal, private_protected
dotnet_naming_symbols.non_private_readonly_fields.required_modifiers = readonly
dotnet_naming_style.non_private_readonly_field_style.capitalization = pascal_case
# Constants are PascalCase
dotnet_naming_rule.constants_should_be_pascal_case.severity = suggestion
dotnet_naming_rule.constants_should_be_pascal_case.symbols = constants
dotnet_naming_rule.constants_should_be_pascal_case.style = non_private_static_field_style
dotnet_naming_symbols.constants.applicable_kinds = field, local
dotnet_naming_symbols.constants.required_modifiers = const
dotnet_naming_style.constant_style.capitalization = pascal_case
# Static fields are camelCase and start with s_
dotnet_naming_rule.static_fields_should_be_camel_case.severity = none
dotnet_naming_rule.static_fields_should_be_camel_case.symbols = static_fields
dotnet_naming_rule.static_fields_should_be_camel_case.style = static_field_style
dotnet_naming_symbols.static_fields.applicable_kinds = field
dotnet_naming_symbols.static_fields.required_modifiers = static
dotnet_naming_style.static_field_style.capitalization = camel_case
dotnet_naming_style.static_field_style.required_prefix = s_
# Instance fields are camelCase and start with _
dotnet_naming_rule.instance_fields_should_be_camel_case.severity = none
dotnet_naming_rule.instance_fields_should_be_camel_case.symbols = instance_fields
dotnet_naming_rule.instance_fields_should_be_camel_case.style = instance_field_style
dotnet_naming_symbols.instance_fields.applicable_kinds = field
dotnet_naming_style.instance_field_style.capitalization = camel_case
dotnet_naming_style.instance_field_style.required_prefix = _
# Locals and parameters are camelCase
dotnet_naming_rule.locals_should_be_camel_case.severity = suggestion
dotnet_naming_rule.locals_should_be_camel_case.symbols = locals_and_parameters
dotnet_naming_rule.locals_should_be_camel_case.style = camel_case_style
dotnet_naming_symbols.locals_and_parameters.applicable_kinds = parameter, local
dotnet_naming_style.camel_case_style.capitalization = camel_case
# Local functions are PascalCase
dotnet_naming_rule.local_functions_should_be_pascal_case.severity = suggestion
dotnet_naming_rule.local_functions_should_be_pascal_case.symbols = local_functions
dotnet_naming_rule.local_functions_should_be_pascal_case.style = non_private_static_field_style
dotnet_naming_symbols.local_functions.applicable_kinds = local_function
dotnet_naming_style.local_function_style.capitalization = pascal_case
# By default, name items with PascalCase
dotnet_naming_rule.members_should_be_pascal_case.severity = suggestion
dotnet_naming_rule.members_should_be_pascal_case.symbols = all_members
dotnet_naming_rule.members_should_be_pascal_case.style = non_private_static_field_style
dotnet_naming_symbols.all_members.applicable_kinds = *
dotnet_naming_style.pascal_case_style.capitalization = pascal_case
# error RS2008: Enable analyzer release tracking for the analyzer project containing rule '{0}'
dotnet_diagnostic.RS2008.severity = none
# IDE0073: File header
dotnet_diagnostic.IDE0073.severity = none
#file_header_template = Licensed to the .NET Foundation under one or more agreements.\nThe .NET Foundation licenses this file to you under the MIT license.\nSee the LICENSE file in the project root for more information.
# IDE0035: Remove unreachable code
dotnet_diagnostic.IDE0035.severity = warning
# IDE0036: Order modifiers
dotnet_diagnostic.IDE0036.severity = warning
# IDE0043: Format string contains invalid placeholder
dotnet_diagnostic.IDE0043.severity = warning
# IDE0044: Make field readonly
dotnet_diagnostic.IDE0044.severity = warning
# IDE1006: Naming rule violation
#dotnet_diagnostic.IDE1006.severity = none
# RS0016: Only enable if API files are present
dotnet_public_api_analyzer.require_api_files = true
dotnet_style_operator_placement_when_wrapping = beginning_of_line
tab_width = 4
end_of_line = crlf
dotnet_style_prefer_is_null_check_over_reference_equality_method = true:suggestion
dotnet_style_prefer_auto_properties = true:silent
dotnet_style_prefer_simplified_boolean_expressions = true:suggestion
dotnet_style_prefer_conditional_expression_over_assignment = true:silent
dotnet_style_prefer_conditional_expression_over_return = true:silent
dotnet_style_prefer_inferred_tuple_names = true:suggestion
dotnet_style_prefer_inferred_anonymous_type_member_names = true:suggestion
dotnet_style_prefer_compound_assignment = true:suggestion
dotnet_style_prefer_simplified_interpolation = true:suggestion
dotnet_style_namespace_match_folder = true:suggestion
# CSharp code style settings:
[*.cs]
# Newline settings
csharp_new_line_before_open_brace = all
csharp_new_line_before_else = true
csharp_new_line_before_catch = true
csharp_new_line_before_finally = true
csharp_new_line_before_members_in_object_initializers = true
csharp_new_line_before_members_in_anonymous_types = true
csharp_new_line_between_query_expression_clauses = true
# Indentation preferences
csharp_indent_block_contents = true
csharp_indent_braces = false
csharp_indent_case_contents = true
csharp_indent_case_contents_when_block = true
csharp_indent_switch_labels = true
csharp_indent_labels = flush_left
# Whitespace options
csharp_style_allow_embedded_statements_on_same_line_experimental = false
csharp_style_allow_blank_lines_between_consecutive_braces_experimental = false
csharp_style_allow_blank_line_after_colon_in_constructor_initializer_experimental = false
# Prefer "var" everywhere
csharp_style_var_for_built_in_types = true:suggestion
csharp_style_var_when_type_is_apparent = true:suggestion
csharp_style_var_elsewhere = true:suggestion
# Prefer method-like constructs to have a block body
csharp_style_expression_bodied_methods = false:none
csharp_style_expression_bodied_constructors = false:none
csharp_style_expression_bodied_operators = false:none
# Prefer property-like constructs to have an expression-body
csharp_style_expression_bodied_properties = true:none
csharp_style_expression_bodied_indexers = true:none
csharp_style_expression_bodied_accessors = true:none
# Suggest more modern language features when available
csharp_style_pattern_matching_over_is_with_cast_check = true:suggestion
csharp_style_pattern_matching_over_as_with_null_check = true:suggestion
csharp_style_inlined_variable_declaration = true:suggestion
csharp_style_throw_expression = true:suggestion
csharp_style_conditional_delegate_call = true:suggestion
# Space preferences
csharp_space_after_cast = false
csharp_space_after_colon_in_inheritance_clause = true
csharp_space_after_comma = true
csharp_space_after_dot = false
csharp_space_after_keywords_in_control_flow_statements = true
csharp_space_after_semicolon_in_for_statement = true
csharp_space_around_binary_operators = before_and_after
csharp_space_around_declaration_statements = do_not_ignore
csharp_space_before_colon_in_inheritance_clause = true
csharp_space_before_comma = false
csharp_space_before_dot = false
csharp_space_before_open_square_brackets = false
csharp_space_before_semicolon_in_for_statement = false
csharp_space_between_empty_square_brackets = false
csharp_space_between_method_call_empty_parameter_list_parentheses = false
csharp_space_between_method_call_name_and_opening_parenthesis = false
csharp_space_between_method_call_parameter_list_parentheses = false
csharp_space_between_method_declaration_empty_parameter_list_parentheses = false
csharp_space_between_method_declaration_name_and_open_parenthesis = false
csharp_space_between_method_declaration_parameter_list_parentheses = false
csharp_space_between_parentheses = false
csharp_space_between_square_brackets = false
# Blocks are allowed
csharp_prefer_braces = true:silent
csharp_preserve_single_line_blocks = true
csharp_preserve_single_line_statements = true
# Target-type new expressio
csharp_style_implicit_object_creation_when_type_is_apparent = true:suggestion
# Currently only enabled for C# due to crash in VB analyzer. VB can be enabled once
# https://github.com/dotnet/roslyn/pull/54259 has been published.
dotnet_style_allow_statement_immediately_after_block_experimental = false
dotnet_diagnostic.RCS0003.severity=warning
dotnet_diagnostic.RCS1036.severity=error
dotnet_diagnostic.IDE0005.severity=warning
dotnet_diagnostic.IDE0007.severity=error
csharp_using_directive_placement = outside_namespace:silent
csharp_prefer_simple_using_statement = true:suggestion
csharp_style_namespace_declarations = block_scoped:silent
csharp_style_expression_bodied_lambdas = true:silent
csharp_style_expression_bodied_local_functions = false:silent
csharp_style_prefer_null_check_over_type_check = true:suggestion
dotnet_diagnostic.RCS1075.severity = suggestion
[src/CodeStyle/**.{cs,vb}]
# warning RS0005: Do not use generic CodeAction.Create to create CodeAction
dotnet_diagnostic.RS0005.severity = none
[src/{Analyzers,CodeStyle,Features,Workspaces,EditorFeatures,VisualStudio}/**/*.{cs,vb}]
# IDE0011: Add braces
csharp_prefer_braces = when_multiline:warning
# NOTE: We need the below severity entry for Add Braces due to https://github.com/dotnet/roslyn/issues/44201
dotnet_diagnostic.IDE0011.severity = warning
# IDE0040: Add accessibility modifiers
dotnet_diagnostic.IDE0040.severity = warning
# CONSIDER: Are IDE0051 and IDE0052 too noisy to be warnings for IDE editing scenarios? Should they be made build-only warnings?
# IDE0051: Remove unused private member
dotnet_diagnostic.IDE0051.severity = warning
# IDE0052: Remove unread private member
dotnet_diagnostic.IDE0052.severity = warning
# IDE0059: Unnecessary assignment to a value
dotnet_diagnostic.IDE0059.severity = warning
# IDE0060: Remove unused parameter
dotnet_diagnostic.IDE0060.severity = warning
# CA1012: Abstract types should not have public constructors
dotnet_diagnostic.CA1012.severity = warning
# CA1822: Make member static
dotnet_diagnostic.CA1822.severity = warning
# Prefer "var" everywhere
dotnet_diagnostic.IDE0007.severity = warning
csharp_style_var_for_built_in_types = true:warning
csharp_style_var_when_type_is_apparent = true:warning
csharp_style_var_elsewhere = true:warning
# dotnet_style_allow_multiple_blank_lines_experimental
dotnet_diagnostic.IDE2000.severity = warning
# csharp_style_allow_embedded_statements_on_same_line_experimental
dotnet_diagnostic.IDE2001.severity = warning
# csharp_style_allow_blank_lines_between_consecutive_braces_experimental
dotnet_diagnostic.IDE2002.severity = warning
# dotnet_style_allow_statement_immediately_after_block_experimental
dotnet_diagnostic.IDE2003.severity = warning
# csharp_style_allow_blank_line_after_colon_in_constructor_initializer_experimental
dotnet_diagnostic.IDE2004.severity = warning
[src/{VisualStudio}/**/*.{cs,vb}]
# CA1822: Make member static
# There is a risk of accidentally breaking an internal API that partners rely on though IVT.
dotnet_code_quality.CA1822.api_surface = private

View File

@ -1,379 +0,0 @@
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
runtimes
**/*nuget
*.zip
include/
*.exp
*.lib
*.dll
# User-specific files
*.rsuser
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Mono auto generated files
mono_crash.*
Tests/**/launchSettings.json
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Ww][Ii][Nn]32/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Bb]in/
[Oo]bj/
[Oo]ut/
[Ll]og/
[Ll]ogs/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUnit
*.VisualState.xml
TestResult.xml
nunit-*.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# ASP.NET Scaffolding
ScaffoldingReadMe.txt
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Coverlet is a free, cross platform Code Coverage Tool
coverage*.json
coverage*.xml
coverage*.info
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# NuGet Symbol Packages
*.snupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
*.appxbundle
*.appxupload
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
*- [Bb]ackup.rdl
*- [Bb]ackup ([0-9]).rdl
*- [Bb]ackup ([0-9][0-9]).rdl
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# CodeRush personal settings
.cr/personal
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
# Local History for Visual Studio
.localhistory/
# BeatPulse healthcheck temp database
healthchecksdb
# Backup folder for Package Reference Convert tool in Visual Studio 2017
MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
# Fody - auto-generated XML schema
FodyWeavers.xsd
# JetBrains Rider
.idea
# Visual Studio Code
.vscode

View File

@ -1,44 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project>
<PropertyGroup>
<Company></Company>
<Copyright></Copyright>
<NeutralLanguage>en-US</NeutralLanguage>
<Version>0.6.4-alpha</Version>
<VersionSuffix>$(VersionSuffix)</VersionSuffix>
<Version Condition=" '$(VersionSuffix)' != '' ">$(Version)$(VersionSuffix)</Version>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
<RepositoryUrl></RepositoryUrl>
<RepositoryType>git</RepositoryType>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<AnalysisLevel>latest-minimum</AnalysisLevel>
<EnforceCodeStyleInBuild>true</EnforceCodeStyleInBuild>
</PropertyGroup>
<ItemGroup>
<Using Include="System"/>
</ItemGroup>
<PropertyGroup>
<LangVersion>preview</LangVersion>
<Features>strict</Features>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Roslynator.Analyzers" Version="4.2.0">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers</IncludeAssets>
</PackageReference>
<PackageReference Include="Roslynator.CodeAnalysis.Analyzers" Version="4.2.0">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers</IncludeAssets>
</PackageReference>
<PackageReference Include="Roslynator.Formatting.Analyzers" Version="4.2.0">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers</IncludeAssets>
</PackageReference>
</ItemGroup>
</Project>

View File

@ -1,33 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\Gpt4All\Gpt4All.csproj" />
</ItemGroup>
<ItemGroup>
<!-- Windows -->
<None Include="..\runtimes\win-x64\native\*.dll" Pack="true" PackagePath="runtimes\win-x64\native\%(Filename)%(Extension)" />
<!-- Linux -->
<None Include="..\runtimes\linux-x64\native\*.so" Pack="true" PackagePath="runtimes\linux-x64\native\%(Filename)%(Extension)" />
<!-- MacOS -->
<None Include="..\runtimes\osx\native\*.dylib" Pack="true" PackagePath="runtimes\osx\native\%(Filename)%(Extension)" />
</ItemGroup>
<ItemGroup>
<!-- Windows -->
<None Condition="$([MSBuild]::IsOSPlatform('Windows'))" Include="..\runtimes\win-x64\native\*.dll" Visible="False" CopyToOutputDirectory="PreserveNewest" />
<!-- Linux -->
<None Condition="$([MSBuild]::IsOSPlatform('Linux'))" Include="..\runtimes\linux-x64\native\*.so" Visible="False" CopyToOutputDirectory="PreserveNewest" />
<!-- MacOS -->
<None Condition="$([MSBuild]::IsOSPlatform('OSX'))" Include="..\runtimes\osx\native\*.dylib" Visible="False" CopyToOutputDirectory="PreserveNewest" />
<Content Condition="$([MSBuild]::IsOSPlatform('OSX'))" Include="..\runtimes\osx\native\*.metal" Visible="False" CopyToOutputDirectory="PreserveNewest" />
</ItemGroup>
</Project>

View File

@ -1,22 +0,0 @@
using Gpt4All;
var modelFactory = new Gpt4AllModelFactory();
if (args.Length < 2)
{
Console.WriteLine($"Usage: Gpt4All.Samples <model-path> <prompt>");
return;
}
var modelPath = args[0];
var prompt = args[1];
using var model = modelFactory.LoadModel(modelPath);
var result = await model.GetStreamingPredictionAsync(
prompt,
PredictRequestOptions.Defaults);
await foreach (var token in result.GetPredictionStreamingAsync())
{
Console.Write(token);
}

View File

@ -1,9 +0,0 @@
namespace Gpt4All.Tests;
public static class Constants
{
public const string MODELS_BASE_DIR = "../../../models";
public const string LLAMA_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-gpt4all-l13b-snoozy.bin";
public const string GPTJ_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-gpt4all-j-v1.3-groovy.bin";
public const string MPT_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-mpt-7b-chat.bin";
}

View File

@ -1,60 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net8.0</TargetFramework>
<Nullable>enable</Nullable>
<IsPackable>false</IsPackable>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.6.2" />
<PackageReference Include="xunit" Version="2.4.2" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.4.5">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets>
</PackageReference>
<PackageReference Include="coverlet.collector" Version="6.0.0">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets>
</PackageReference>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\Gpt4All\Gpt4All.csproj" />
</ItemGroup>
<ItemGroup>
<!-- Windows -->
<None Include="..\runtimes\win-x64\native\*.dll" Pack="true" PackagePath="runtimes\win-x64\native\%(Filename)%(Extension)" />
<!-- Linux -->
<None Include="..\runtimes\linux-x64\native\*.so" Pack="true" PackagePath="runtimes\linux-x64\native\%(Filename)%(Extension)" />
<!-- MacOS -->
<None Include="..\runtimes\osx\native\*.dylib" Pack="true" PackagePath="runtimes\osx\native\%(Filename)%(Extension)" />
</ItemGroup>
<ItemGroup>
<!-- Windows -->
<None Condition="$([MSBuild]::IsOSPlatform('Windows'))" Include="..\runtimes\win-x64\native\*.dll" Visible="False" CopyToOutputDirectory="PreserveNewest" />
<!-- Linux -->
<None Condition="$([MSBuild]::IsOSPlatform('Linux'))" Include="..\runtimes\linux-x64\native\*.so" Visible="False" CopyToOutputDirectory="PreserveNewest" />
<!-- MacOS -->
<None Condition="$([MSBuild]::IsOSPlatform('OSX'))" Include="..\runtimes\osx\native\*.dylib" Visible="False" CopyToOutputDirectory="PreserveNewest" />
</ItemGroup>
<ItemGroup>
<PackageReference Update="Roslynator.Analyzers" Version="4.3.0">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers</IncludeAssets>
</PackageReference>
<PackageReference Update="Roslynator.CodeAnalysis.Analyzers" Version="4.3.0">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers</IncludeAssets>
</PackageReference>
<PackageReference Update="Roslynator.Formatting.Analyzers" Version="4.3.0">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers</IncludeAssets>
</PackageReference>
</ItemGroup>
</Project>

View File

@ -1,34 +0,0 @@
using Xunit;
namespace Gpt4All.Tests;
public class ModelFactoryTests
{
private readonly Gpt4AllModelFactory _modelFactory;
public ModelFactoryTests()
{
_modelFactory = new Gpt4AllModelFactory();
}
[Fact]
[Trait(Traits.SkipOnCI, "True")]
public void CanLoadLlamaModel()
{
using var model = _modelFactory.LoadModel(Constants.LLAMA_MODEL_PATH);
}
[Fact]
[Trait(Traits.SkipOnCI, "True")]
public void CanLoadGptjModel()
{
using var model = _modelFactory.LoadModel(Constants.GPTJ_MODEL_PATH);
}
[Fact]
[Trait(Traits.SkipOnCI, "True")]
public void CanLoadMptModel()
{
using var model = _modelFactory.LoadModel(Constants.MPT_MODEL_PATH);
}
}

View File

@ -1,56 +0,0 @@
using System.IO;
using Gpt4All.LibraryLoader;
using Xunit;
namespace Gpt4All.Tests;
public class NativeLibraryLoaderTests
{
[Fact]
public void NativeLibraryShouldLoad()
{
var result = NativeLibraryLoader.LoadNativeLibrary(bypassLoading: false);
Assert.True(result.IsSuccess);
}
private const string LLModelLib = "libllmodel.{0}";
[PlatformSpecificFact(Platforms.Windows)]
public void NativeLibraryShouldLoad_Windows()
{
var libraryLoader = new WindowsLibraryLoader();
var libraryPath = Path.Combine(
Environment.CurrentDirectory,
string.Format(LLModelLib, "dll"));
var result = libraryLoader.OpenLibrary(libraryPath);
Assert.True(result.IsSuccess);
}
[PlatformSpecificFact(Platforms.Linux)]
public void NativeLibraryShouldLoad_Linux()
{
var libraryLoader = new LinuxLibraryLoader();
var libraryPath = Path.Combine(
Environment.CurrentDirectory,
string.Format(LLModelLib, "so"));
var result = libraryLoader.OpenLibrary(libraryPath);
Assert.True(result.IsSuccess);
}
[PlatformSpecificFact(Platforms.MacOS)]
public void NativeLibraryShouldLoad_MacOS()
{
var libraryLoader = new MacOsLibraryLoader();
var libraryPath = Path.Combine(
Environment.CurrentDirectory,
string.Format(LLModelLib, "dylib"));
var result = libraryLoader.OpenLibrary(libraryPath);
Assert.True(result.IsSuccess);
}
}

View File

@ -1,27 +0,0 @@
using Xunit;
namespace Gpt4All.Tests;
public static class Platforms
{
public const string Windows = "windows";
public const string Linux = "linux";
public const string MacOS = "macOS";
}
/// <summary>
/// This attribute ensures the Fact is only run on the specified platform.
/// </summary>
/// <remarks>
/// <see cref="OperatingSystem.IsOSPlatform(string)"/> for info about the platform string.
/// </remarks>
public class PlatformSpecificFactAttribute : FactAttribute
{
public PlatformSpecificFactAttribute(string platform)
{
if (!OperatingSystem.IsOSPlatform(platform))
{
Skip = $"Test only runs on {platform}.";
}
}
}

View File

@ -1,6 +0,0 @@
namespace Gpt4All.Tests;
public static class Traits
{
public const string SkipOnCI = "SKIP_ON_CI";
}

View File

@ -1,47 +0,0 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 17
VisualStudioVersion = 17.5.33516.290
MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Gpt4All.Samples", "Gpt4All.Samples\Gpt4All.Samples.csproj", "{59864AE8-E45D-42F7-A7C0-1308EF185F39}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{DA396C11-CEAD-4368-8234-FB12255A30D2}"
ProjectSection(SolutionItems) = preProject
.gitignore = .gitignore
build_linux.sh = build_linux.sh
build_win-mingw.ps1 = build_win-mingw.ps1
build_win-msvc.ps1 = build_win-msvc.ps1
docs\gpt4all_csharp.md = docs\gpt4all_csharp.md
README.md = README.md
EndProjectSection
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Gpt4All", "Gpt4All\Gpt4All.csproj", "{6015C62B-2008-426B-A334-740D6F1FE38B}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Gpt4All.Tests", "Gpt4All.Tests\Gpt4All.Tests.csproj", "{33A72341-52C1-4EAE-878B-A98BC77F686A}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{59864AE8-E45D-42F7-A7C0-1308EF185F39}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{59864AE8-E45D-42F7-A7C0-1308EF185F39}.Debug|Any CPU.Build.0 = Debug|Any CPU
{59864AE8-E45D-42F7-A7C0-1308EF185F39}.Release|Any CPU.ActiveCfg = Release|Any CPU
{59864AE8-E45D-42F7-A7C0-1308EF185F39}.Release|Any CPU.Build.0 = Release|Any CPU
{6015C62B-2008-426B-A334-740D6F1FE38B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{6015C62B-2008-426B-A334-740D6F1FE38B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{6015C62B-2008-426B-A334-740D6F1FE38B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{6015C62B-2008-426B-A334-740D6F1FE38B}.Release|Any CPU.Build.0 = Release|Any CPU
{33A72341-52C1-4EAE-878B-A98BC77F686A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{33A72341-52C1-4EAE-878B-A98BC77F686A}.Debug|Any CPU.Build.0 = Debug|Any CPU
{33A72341-52C1-4EAE-878B-A98BC77F686A}.Release|Any CPU.ActiveCfg = Release|Any CPU
{33A72341-52C1-4EAE-878B-A98BC77F686A}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {17632027-F4C2-4903-B88F-310CE3DE386B}
EndGlobalSection
EndGlobal

View File

@ -1,29 +0,0 @@
namespace Gpt4All.Bindings;
/// <summary>
/// Represents the interface exposed by the universal wrapper for GPT4All language models built around llmodel C-API.
/// </summary>
public interface ILLModel : IDisposable
{
ulong GetStateSizeBytes();
int GetThreadCount();
void SetThreadCount(int threadCount);
bool IsLoaded();
bool Load(string modelPath);
void Prompt(
string text,
LLModelPromptContext context,
Func<ModelPromptEventArgs, bool>? promptCallback = null,
Func<ModelResponseEventArgs, bool>? responseCallback = null,
Func<ModelRecalculatingEventArgs, bool>? recalculateCallback = null,
CancellationToken cancellationToken = default);
unsafe ulong RestoreStateData(byte* destination);
unsafe ulong SaveStateData(byte* source);
}

View File

@ -1,212 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
namespace Gpt4All.Bindings;
/// <summary>
/// Arguments for the response processing callback
/// </summary>
/// <param name="TokenId">The token id of the response</param>
/// <param name="Response"> The response string. NOTE: a token_id of -1 indicates the string is an error string</param>
/// <return>
/// A bool indicating whether the model should keep generating
/// </return>
public record ModelResponseEventArgs(int TokenId, string Response)
{
public bool IsError => TokenId == -1;
}
/// <summary>
/// Arguments for the prompt processing callback
/// </summary>
/// <param name="TokenId">The token id of the prompt</param>
/// <return>
/// A bool indicating whether the model should keep processing
/// </return>
public record ModelPromptEventArgs(int TokenId)
{
}
/// <summary>
/// Arguments for the recalculating callback
/// </summary>
/// <param name="IsRecalculating"> whether the model is recalculating the context.</param>
/// <return>
/// A bool indicating whether the model should keep generating
/// </return>
public record ModelRecalculatingEventArgs(bool IsRecalculating);
/// <summary>
/// Base class and universal wrapper for GPT4All language models built around llmodel C-API.
/// </summary>
public class LLModel : ILLModel
{
protected readonly IntPtr _handle;
private readonly ILogger _logger;
private bool _disposed;
internal LLModel(IntPtr handle, ILogger? logger = null)
{
_handle = handle;
_logger = logger ?? NullLogger.Instance;
}
/// <summary>
/// Create a new model from a pointer
/// </summary>
/// <param name="handle">Pointer to underlying model</param>
public static LLModel Create(IntPtr handle, ILogger? logger = null)
{
return new LLModel(handle, logger: logger);
}
/// <summary>
/// Generate a response using the model
/// </summary>
/// <param name="text">The input promp</param>
/// <param name="context">The context</param>
/// <param name="promptCallback">A callback function for handling the processing of prompt</param>
/// <param name="responseCallback">A callback function for handling the generated response</param>
/// <param name="recalculateCallback">A callback function for handling recalculation requests</param>
/// <param name="cancellationToken"></param>
public void Prompt(
string text,
LLModelPromptContext context,
Func<ModelPromptEventArgs, bool>? promptCallback = null,
Func<ModelResponseEventArgs, bool>? responseCallback = null,
Func<ModelRecalculatingEventArgs, bool>? recalculateCallback = null,
CancellationToken cancellationToken = default)
{
GC.KeepAlive(promptCallback);
GC.KeepAlive(responseCallback);
GC.KeepAlive(recalculateCallback);
GC.KeepAlive(cancellationToken);
_logger.LogInformation("Prompt input='{Prompt}' ctx={Context}", text, context.Dump());
NativeMethods.llmodel_prompt(
_handle,
text,
(tokenId) =>
{
if (cancellationToken.IsCancellationRequested) return false;
if (promptCallback == null) return true;
var args = new ModelPromptEventArgs(tokenId);
return promptCallback(args);
},
(tokenId, response) =>
{
if (cancellationToken.IsCancellationRequested)
{
_logger.LogDebug("ResponseCallback evt=CancellationRequested");
return false;
}
if (responseCallback == null) return true;
var args = new ModelResponseEventArgs(tokenId, response);
return responseCallback(args);
},
(isRecalculating) =>
{
if (cancellationToken.IsCancellationRequested) return false;
if (recalculateCallback == null) return true;
var args = new ModelRecalculatingEventArgs(isRecalculating);
return recalculateCallback(args);
},
ref context.UnderlyingContext
);
}
/// <summary>
/// Set the number of threads to be used by the model.
/// </summary>
/// <param name="threadCount">The new thread count</param>
public void SetThreadCount(int threadCount)
{
NativeMethods.llmodel_setThreadCount(_handle, threadCount);
}
/// <summary>
/// Get the number of threads used by the model.
/// </summary>
/// <returns>the number of threads used by the model</returns>
public int GetThreadCount()
{
return NativeMethods.llmodel_threadCount(_handle);
}
/// <summary>
/// Get the size of the internal state of the model.
/// </summary>
/// <remarks>
/// This state data is specific to the type of model you have created.
/// </remarks>
/// <returns>the size in bytes of the internal state of the model</returns>
public ulong GetStateSizeBytes()
{
return NativeMethods.llmodel_get_state_size(_handle);
}
/// <summary>
/// Saves the internal state of the model to the specified destination address.
/// </summary>
/// <param name="source">A pointer to the src</param>
/// <returns>The number of bytes copied</returns>
public unsafe ulong SaveStateData(byte* source)
{
return NativeMethods.llmodel_save_state_data(_handle, source);
}
/// <summary>
/// Restores the internal state of the model using data from the specified address.
/// </summary>
/// <param name="destination">A pointer to destination</param>
/// <returns>the number of bytes read</returns>
public unsafe ulong RestoreStateData(byte* destination)
{
return NativeMethods.llmodel_restore_state_data(_handle, destination);
}
/// <summary>
/// Check if the model is loaded.
/// </summary>
/// <returns>true if the model was loaded successfully, false otherwise.</returns>
public bool IsLoaded()
{
return NativeMethods.llmodel_isModelLoaded(_handle);
}
/// <summary>
/// Load the model from a file.
/// </summary>
/// <param name="modelPath">The path to the model file.</param>
/// <returns>true if the model was loaded successfully, false otherwise.</returns>
public bool Load(string modelPath)
{
return NativeMethods.llmodel_loadModel(_handle, modelPath, 2048, 100);
}
protected void Destroy()
{
NativeMethods.llmodel_model_destroy(_handle);
}
protected virtual void Dispose(bool disposing)
{
if (_disposed) return;
if (disposing)
{
// dispose managed state
}
Destroy();
_disposed = true;
}
public void Dispose()
{
Dispose(disposing: true);
GC.SuppressFinalize(this);
}
}

View File

@ -1,147 +0,0 @@
namespace Gpt4All.Bindings;
/// <summary>
/// Wrapper around the llmodel_prompt_context structure for holding the prompt context.
/// </summary>
/// <remarks>
/// The implementation takes care of all the memory handling of the raw logits pointer and the
/// raw tokens pointer.Attempting to resize them or modify them in any way can lead to undefined behavior
/// </remarks>
public unsafe class LLModelPromptContext
{
private llmodel_prompt_context _ctx;
internal ref llmodel_prompt_context UnderlyingContext => ref _ctx;
public LLModelPromptContext()
{
_ctx = new();
}
/// <summary>
/// logits of current context
/// </summary>
public Span<float> Logits => new(_ctx.logits, (int)_ctx.logits_size);
/// <summary>
/// the size of the raw logits vector
/// </summary>
public nuint LogitsSize
{
get => _ctx.logits_size;
set => _ctx.logits_size = value;
}
/// <summary>
/// current tokens in the context window
/// </summary>
public Span<int> Tokens => new(_ctx.tokens, (int)_ctx.tokens_size);
/// <summary>
/// the size of the raw tokens vector
/// </summary>
public nuint TokensSize
{
get => _ctx.tokens_size;
set => _ctx.tokens_size = value;
}
/// <summary>
/// top k logits to sample from
/// </summary>
public int TopK
{
get => _ctx.top_k;
set => _ctx.top_k = value;
}
/// <summary>
/// nucleus sampling probability threshold
/// </summary>
public float TopP
{
get => _ctx.top_p;
set => _ctx.top_p = value;
}
/// <summary>
/// min p sampling probability threshold
/// </summary>
public float MinP
{
get => _ctx.min_p;
set => _ctx.min_p = value;
}
/// <summary>
/// temperature to adjust model's output distribution
/// </summary>
public float Temperature
{
get => _ctx.temp;
set => _ctx.temp = value;
}
/// <summary>
/// number of tokens in past conversation
/// </summary>
public int PastNum
{
get => _ctx.n_past;
set => _ctx.n_past = value;
}
/// <summary>
/// number of predictions to generate in parallel
/// </summary>
public int Batches
{
get => _ctx.n_batch;
set => _ctx.n_batch = value;
}
/// <summary>
/// number of tokens to predict
/// </summary>
public int TokensToPredict
{
get => _ctx.n_predict;
set => _ctx.n_predict = value;
}
/// <summary>
/// penalty factor for repeated tokens
/// </summary>
public float RepeatPenalty
{
get => _ctx.repeat_penalty;
set => _ctx.repeat_penalty = value;
}
/// <summary>
/// last n tokens to penalize
/// </summary>
public int RepeatLastN
{
get => _ctx.repeat_last_n;
set => _ctx.repeat_last_n = value;
}
/// <summary>
/// number of tokens possible in context window
/// </summary>
public int ContextSize
{
get => _ctx.n_ctx;
set => _ctx.n_ctx = value;
}
/// <summary>
/// percent of context to erase if we exceed the context window
/// </summary>
public float ContextErase
{
get => _ctx.context_erase;
set => _ctx.context_erase = value;
}
}

View File

@ -1,112 +0,0 @@
using System.Runtime.InteropServices;
namespace Gpt4All.Bindings;
public unsafe partial struct llmodel_prompt_context
{
public float* logits;
[NativeTypeName("size_t")]
public nuint logits_size;
[NativeTypeName("int32_t *")]
public int* tokens;
[NativeTypeName("size_t")]
public nuint tokens_size;
[NativeTypeName("int32_t")]
public int n_past;
[NativeTypeName("int32_t")]
public int n_ctx;
[NativeTypeName("int32_t")]
public int n_predict;
[NativeTypeName("int32_t")]
public int top_k;
public float top_p;
public float min_p;
public float temp;
[NativeTypeName("int32_t")]
public int n_batch;
public float repeat_penalty;
[NativeTypeName("int32_t")]
public int repeat_last_n;
public float context_erase;
}
#pragma warning disable CA2101
internal static unsafe partial class NativeMethods
{
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.I1)]
public delegate bool LlmodelResponseCallback(int token_id, [MarshalAs(UnmanagedType.LPUTF8Str)] string response);
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.I1)]
public delegate bool LlmodelPromptCallback(int token_id);
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.I1)]
public delegate bool LlmodelRecalculateCallback(bool isRecalculating);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true, BestFitMapping = false, ThrowOnUnmappableChar = true)]
[return: NativeTypeName("llmodel_model")]
public static extern IntPtr llmodel_model_create2(
[NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string model_path,
[NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string build_variant,
out IntPtr error);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)]
public static extern void llmodel_model_destroy([NativeTypeName("llmodel_model")] IntPtr model);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true, BestFitMapping = false, ThrowOnUnmappableChar = true)]
[return: MarshalAs(UnmanagedType.I1)]
public static extern bool llmodel_loadModel(
[NativeTypeName("llmodel_model")] IntPtr model,
[NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string model_path,
[NativeTypeName("int32_t")] int n_ctx,
[NativeTypeName("int32_t")] int ngl);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)]
[return: MarshalAs(UnmanagedType.I1)]
public static extern bool llmodel_isModelLoaded([NativeTypeName("llmodel_model")] IntPtr model);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)]
[return: NativeTypeName("uint64_t")]
public static extern ulong llmodel_get_state_size([NativeTypeName("llmodel_model")] IntPtr model);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)]
[return: NativeTypeName("uint64_t")]
public static extern ulong llmodel_save_state_data([NativeTypeName("llmodel_model")] IntPtr model, [NativeTypeName("uint8_t *")] byte* dest);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)]
[return: NativeTypeName("uint64_t")]
public static extern ulong llmodel_restore_state_data([NativeTypeName("llmodel_model")] IntPtr model, [NativeTypeName("const uint8_t *")] byte* src);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true, BestFitMapping = false, ThrowOnUnmappableChar = true)]
public static extern void llmodel_prompt(
[NativeTypeName("llmodel_model")] IntPtr model,
[NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string prompt,
LlmodelPromptCallback prompt_callback,
LlmodelResponseCallback response_callback,
LlmodelRecalculateCallback recalculate_callback,
ref llmodel_prompt_context ctx);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)]
public static extern void llmodel_setThreadCount([NativeTypeName("llmodel_model")] IntPtr model, [NativeTypeName("int32_t")] int n_threads);
[DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)]
[return: NativeTypeName("int32_t")]
public static extern int llmodel_threadCount([NativeTypeName("llmodel_model")] IntPtr model);
}
#pragma warning restore CA2101

View File

@ -1,21 +0,0 @@
using System.Diagnostics;
namespace Gpt4All.Bindings;
/// <summary>Defines the type of a member as it was used in the native signature.</summary>
[AttributeUsage(AttributeTargets.Struct | AttributeTargets.Enum | AttributeTargets.Property | AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.ReturnValue, AllowMultiple = false, Inherited = true)]
[Conditional("DEBUG")]
internal sealed partial class NativeTypeNameAttribute : Attribute
{
private readonly string _name;
/// <summary>Initializes a new instance of the <see cref="NativeTypeNameAttribute" /> class.</summary>
/// <param name="name">The name of the type that was used in the native signature.</param>
public NativeTypeNameAttribute(string name)
{
_name = name;
}
/// <summary>Gets the name of the type that was used in the native signature.</summary>
public string Name => _name;
}

View File

@ -1,27 +0,0 @@
using Gpt4All.Bindings;
namespace Gpt4All;
internal static class LLPromptContextExtensions
{
public static string Dump(this LLModelPromptContext context)
{
var ctx = context.UnderlyingContext;
return @$"
{{
logits_size = {ctx.logits_size}
tokens_size = {ctx.tokens_size}
n_past = {ctx.n_past}
n_ctx = {ctx.n_ctx}
n_predict = {ctx.n_predict}
top_k = {ctx.top_k}
top_p = {ctx.top_p}
min_p = {ctx.min_p}
temp = {ctx.temp}
n_batch = {ctx.n_batch}
repeat_penalty = {ctx.repeat_penalty}
repeat_last_n = {ctx.repeat_last_n}
context_erase = {ctx.context_erase}
}}";
}
}

View File

@ -1,26 +0,0 @@
using Gpt4All.Bindings;
namespace Gpt4All;
public static class PredictRequestOptionsExtensions
{
public static LLModelPromptContext ToPromptContext(this PredictRequestOptions opts)
{
return new LLModelPromptContext
{
LogitsSize = opts.LogitsSize,
TokensSize = opts.TokensSize,
TopK = opts.TopK,
TopP = opts.TopP,
MinP = opts.MinP,
PastNum = opts.PastConversationTokensNum,
RepeatPenalty = opts.RepeatPenalty,
Temperature = opts.Temperature,
RepeatLastN = opts.RepeatLastN,
Batches = opts.Batches,
ContextErase = opts.ContextErase,
ContextSize = opts.ContextSize,
TokensToPredict = opts.TokensToPredict
};
}
}

View File

@ -1,21 +0,0 @@
--config
exclude-funcs-with-body
--with-access-specifier
*=Public
--include-directory
..\..\..\gpt4all-backend\
--file
..\..\..\gpt4all-backend\llmodel_c.h
--libraryPath
libllmodel
--remap
sbyte*=IntPtr
void*=IntPtr
--namespace
Gpt4All.Bindings
--methodClassName
NativeMethods
--output
.\Bindings\NativeMethods.cs
--output-mode
CSharp

View File

@ -1,135 +0,0 @@
using System.Diagnostics;
using System.Runtime.CompilerServices;
using Gpt4All.Bindings;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
[assembly: InternalsVisibleTo("Gpt4All.Tests")]
namespace Gpt4All;
public class Gpt4All : IGpt4AllModel
{
private readonly ILLModel _model;
private readonly ILogger _logger;
private const string ResponseErrorMessage =
"The model reported an error during token generation error={ResponseError}";
/// <inheritdoc/>
public IPromptFormatter? PromptFormatter { get; set; }
internal Gpt4All(ILLModel model, ILogger? logger = null)
{
_model = model;
_logger = logger ?? NullLogger.Instance;
PromptFormatter = new DefaultPromptFormatter();
}
private string FormatPrompt(string prompt)
{
if (PromptFormatter == null) return prompt;
return PromptFormatter.FormatPrompt(prompt);
}
public Task<ITextPredictionResult> GetPredictionAsync(string text, PredictRequestOptions opts, CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(text);
return Task.Run(() =>
{
_logger.LogInformation("Start prediction task");
var sw = Stopwatch.StartNew();
var result = new TextPredictionResult();
var context = opts.ToPromptContext();
var prompt = FormatPrompt(text);
try
{
_model.Prompt(prompt, context, responseCallback: e =>
{
if (e.IsError)
{
_logger.LogWarning(ResponseErrorMessage, e.Response);
result.Success = false;
result.ErrorMessage = e.Response;
return false;
}
result.Append(e.Response);
return true;
}, cancellationToken: cancellationToken);
}
catch (Exception e)
{
_logger.LogError(e, "Prompt error");
result.Success = false;
}
sw.Stop();
_logger.LogInformation("Prediction task completed elapsed={Elapsed}s", sw.Elapsed.TotalSeconds);
return (ITextPredictionResult)result;
}, CancellationToken.None);
}
public Task<ITextPredictionStreamingResult> GetStreamingPredictionAsync(string text, PredictRequestOptions opts, CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(text);
var result = new TextPredictionStreamingResult();
_ = Task.Run(() =>
{
_logger.LogInformation("Start streaming prediction task");
var sw = Stopwatch.StartNew();
try
{
var context = opts.ToPromptContext();
var prompt = FormatPrompt(text);
_model.Prompt(prompt, context, responseCallback: e =>
{
if (e.IsError)
{
_logger.LogWarning(ResponseErrorMessage, e.Response);
result.Success = false;
result.ErrorMessage = e.Response;
return false;
}
result.Append(e.Response);
return true;
}, cancellationToken: cancellationToken);
}
catch (Exception e)
{
_logger.LogError(e, "Prompt error");
result.Success = false;
}
finally
{
result.Complete();
sw.Stop();
_logger.LogInformation("Prediction task completed elapsed={Elapsed}s", sw.Elapsed.TotalSeconds);
}
}, CancellationToken.None);
return Task.FromResult((ITextPredictionStreamingResult)result);
}
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
_model.Dispose();
}
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
}

View File

@ -1,23 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TargetFramework>net8.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<!-- Windows -->
<None Include="..\runtimes\win-x64\native\*.dll" Pack="true" PackagePath="runtimes\win-x64\native\%(Filename)%(Extension)" />
<!-- Linux -->
<None Include="..\runtimes\linux-x64\native\*.so" Pack="true" PackagePath="runtimes\linux-x64\native\%(Filename)%(Extension)" />
<!-- MacOS -->
<None Include="..\runtimes\osx\native\*.dylib" Pack="true" PackagePath="runtimes\osx\native\%(Filename)%(Extension)" />
<Content Include="..\runtimes\osx\native\*.metal" Pack="true" PackagePath="contentFiles\any\any;content">
<PackageCopyToOutput>true</PackageCopyToOutput>
</Content>
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="7.0.0" />
</ItemGroup>
</Project>

View File

@ -1,6 +0,0 @@
namespace Gpt4All.LibraryLoader;
public interface ILibraryLoader
{
LoadResult OpenLibrary(string? fileName);
}

View File

@ -1,53 +0,0 @@
using System.Runtime.InteropServices;
namespace Gpt4All.LibraryLoader;
internal class LinuxLibraryLoader : ILibraryLoader
{
#pragma warning disable CA2101
[DllImport("libdl.so", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlopen")]
#pragma warning restore CA2101
public static extern IntPtr NativeOpenLibraryLibdl(string? filename, int flags);
#pragma warning disable CA2101
[DllImport("libdl.so.2", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlopen")]
#pragma warning restore CA2101
public static extern IntPtr NativeOpenLibraryLibdl2(string? filename, int flags);
[DllImport("libdl.so", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlerror")]
public static extern IntPtr GetLoadError();
[DllImport("libdl.so.2", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlerror")]
public static extern IntPtr GetLoadError2();
public LoadResult OpenLibrary(string? fileName)
{
IntPtr loadedLib;
try
{
// open with rtls lazy flag
loadedLib = NativeOpenLibraryLibdl2(fileName, 0x00001);
}
catch (DllNotFoundException)
{
loadedLib = NativeOpenLibraryLibdl(fileName, 0x00001);
}
if (loadedLib == IntPtr.Zero)
{
string errorMessage;
try
{
errorMessage = Marshal.PtrToStringAnsi(GetLoadError2()) ?? "Unknown error";
}
catch (DllNotFoundException)
{
errorMessage = Marshal.PtrToStringAnsi(GetLoadError()) ?? "Unknown error";
}
return LoadResult.Failure(errorMessage);
}
return LoadResult.Success;
}
}

View File

@ -1,20 +0,0 @@
namespace Gpt4All.LibraryLoader;
public class LoadResult
{
private LoadResult(bool isSuccess, string? errorMessage)
{
IsSuccess = isSuccess;
ErrorMessage = errorMessage;
}
public static LoadResult Success { get; } = new(true, null);
public static LoadResult Failure(string errorMessage)
{
return new(false, errorMessage);
}
public bool IsSuccess { get; }
public string? ErrorMessage { get; }
}

View File

@ -1,28 +0,0 @@
using System.Runtime.InteropServices;
namespace Gpt4All.LibraryLoader;
internal class MacOsLibraryLoader : ILibraryLoader
{
#pragma warning disable CA2101
[DllImport("libdl.dylib", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlopen")]
#pragma warning restore CA2101
public static extern IntPtr NativeOpenLibraryLibdl(string? filename, int flags);
[DllImport("libdl.dylib", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlerror")]
public static extern IntPtr GetLoadError();
public LoadResult OpenLibrary(string? fileName)
{
var loadedLib = NativeOpenLibraryLibdl(fileName, 0x00001);
if (loadedLib == IntPtr.Zero)
{
var errorMessage = Marshal.PtrToStringAnsi(GetLoadError()) ?? "Unknown error";
return LoadResult.Failure(errorMessage);
}
return LoadResult.Success;
}
}

View File

@ -1,81 +0,0 @@
#if !IOS && !MACCATALYST && !TVOS && !ANDROID
using System.Runtime.InteropServices;
#endif
namespace Gpt4All.LibraryLoader;
public static class NativeLibraryLoader
{
private static ILibraryLoader? defaultLibraryLoader;
/// <summary>
/// Sets the library loader used to load the native libraries. Overwrite this only if you want some custom loading.
/// </summary>
/// <param name="libraryLoader">The library loader to be used.</param>
public static void SetLibraryLoader(ILibraryLoader libraryLoader)
{
defaultLibraryLoader = libraryLoader;
}
internal static LoadResult LoadNativeLibrary(string? path = default, bool bypassLoading = true)
{
// If the user has handled loading the library themselves, we don't need to do anything.
if (bypassLoading)
{
return LoadResult.Success;
}
var architecture = RuntimeInformation.OSArchitecture switch
{
Architecture.X64 => "x64",
Architecture.X86 => "x86",
Architecture.Arm => "arm",
Architecture.Arm64 => "arm64",
_ => throw new PlatformNotSupportedException(
$"Unsupported OS platform, architecture: {RuntimeInformation.OSArchitecture}")
};
var (platform, extension) = Environment.OSVersion.Platform switch
{
_ when RuntimeInformation.IsOSPlatform(OSPlatform.Windows) => ("win", "dll"),
_ when RuntimeInformation.IsOSPlatform(OSPlatform.Linux) => ("linux", "so"),
_ when RuntimeInformation.IsOSPlatform(OSPlatform.OSX) => ("osx", "dylib"),
_ => throw new PlatformNotSupportedException(
$"Unsupported OS platform, architecture: {RuntimeInformation.OSArchitecture}")
};
// If the user hasn't set the path, we'll try to find it ourselves.
if (string.IsNullOrEmpty(path))
{
var libraryName = "libllmodel";
var assemblySearchPath = new[]
{
AppDomain.CurrentDomain.RelativeSearchPath,
Path.GetDirectoryName(typeof(NativeLibraryLoader).Assembly.Location),
Path.GetDirectoryName(Environment.GetCommandLineArgs()[0])
}.FirstOrDefault(it => !string.IsNullOrEmpty(it));
// Search for the library dll within the assembly search path. If it doesn't exist, for whatever reason, use the default path.
path = Directory.EnumerateFiles(assemblySearchPath ?? string.Empty, $"{libraryName}.{extension}", SearchOption.AllDirectories).FirstOrDefault() ?? Path.Combine("runtimes", $"{platform}-{architecture}", $"{libraryName}.{extension}");
}
if (defaultLibraryLoader != null)
{
return defaultLibraryLoader.OpenLibrary(path);
}
if (!File.Exists(path))
{
throw new FileNotFoundException($"Native Library not found in path {path}. " +
$"Verify you have have included the native Gpt4All library in your application.");
}
ILibraryLoader libraryLoader = platform switch
{
"win" => new WindowsLibraryLoader(),
"osx" => new MacOsLibraryLoader(),
"linux" => new LinuxLibraryLoader(),
_ => throw new PlatformNotSupportedException($"Currently {platform} platform is not supported")
};
return libraryLoader.OpenLibrary(path);
}
}

View File

@ -1,24 +0,0 @@
using System.ComponentModel;
using System.Runtime.InteropServices;
namespace Gpt4All.LibraryLoader;
internal class WindowsLibraryLoader : ILibraryLoader
{
public LoadResult OpenLibrary(string? fileName)
{
var loadedLib = LoadLibrary(fileName);
if (loadedLib == IntPtr.Zero)
{
var errorCode = Marshal.GetLastWin32Error();
var errorMessage = new Win32Exception(errorCode).Message;
return LoadResult.Failure(errorMessage);
}
return LoadResult.Success;
}
[DllImport("kernel32", SetLastError = true, CharSet = CharSet.Auto)]
private static extern IntPtr LoadLibrary([MarshalAs(UnmanagedType.LPWStr)] string? lpFileName);
}

View File

@ -1,16 +0,0 @@
namespace Gpt4All;
public class DefaultPromptFormatter : IPromptFormatter
{
public string FormatPrompt(string prompt)
{
return $"""
### Instruction:
The prompt below is a question to answer, a task to complete, or a conversation
to respond to; decide which and write an appropriate response.
### Prompt:
{prompt}
### Response:
""";
}
}

View File

@ -1,62 +0,0 @@
using System.Diagnostics;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Logging;
using Gpt4All.Bindings;
using Gpt4All.LibraryLoader;
using System.Runtime.InteropServices;
namespace Gpt4All;
public class Gpt4AllModelFactory : IGpt4AllModelFactory
{
private readonly ILoggerFactory _loggerFactory;
private readonly ILogger _logger;
private static bool bypassLoading;
private static string? libraryPath;
private static readonly Lazy<LoadResult> libraryLoaded = new(() =>
{
return NativeLibraryLoader.LoadNativeLibrary(Gpt4AllModelFactory.libraryPath, Gpt4AllModelFactory.bypassLoading);
}, true);
public Gpt4AllModelFactory(string? libraryPath = default, bool bypassLoading = true, ILoggerFactory? loggerFactory = null)
{
_loggerFactory = loggerFactory ?? NullLoggerFactory.Instance;
_logger = _loggerFactory.CreateLogger<Gpt4AllModelFactory>();
Gpt4AllModelFactory.libraryPath = libraryPath;
Gpt4AllModelFactory.bypassLoading = bypassLoading;
if (!libraryLoaded.Value.IsSuccess)
{
throw new Exception($"Failed to load native gpt4all library. Error: {libraryLoaded.Value.ErrorMessage}");
}
}
private Gpt4All CreateModel(string modelPath)
{
_logger.LogInformation("Creating model path={ModelPath}", modelPath);
IntPtr error;
var handle = NativeMethods.llmodel_model_create2(modelPath, "auto", out error);
if (error != IntPtr.Zero)
{
throw new Exception(Marshal.PtrToStringAnsi(error));
}
_logger.LogDebug("Model created handle=0x{ModelHandle:X8}", handle);
_logger.LogInformation("Model loading started");
var loadedSuccessfully = NativeMethods.llmodel_loadModel(handle, modelPath, 2048, 100);
_logger.LogInformation("Model loading completed success={ModelLoadSuccess}", loadedSuccessfully);
if (!loadedSuccessfully)
{
throw new Exception($"Failed to load model: '{modelPath}'");
}
var logger = _loggerFactory.CreateLogger<LLModel>();
var underlyingModel = LLModel.Create(handle, logger: logger);
Debug.Assert(underlyingModel.IsLoaded());
return new Gpt4All(underlyingModel, logger: logger);
}
public IGpt4AllModel LoadModel(string modelPath) => CreateModel(modelPath);
}

View File

@ -1,10 +0,0 @@
namespace Gpt4All;
public interface IGpt4AllModel : ITextPrediction, IDisposable
{
/// <summary>
/// The prompt formatter used to format the prompt before
/// feeding it to the model, if null no transformation is applied
/// </summary>
IPromptFormatter? PromptFormatter { get; set; }
}

View File

@ -1,6 +0,0 @@
namespace Gpt4All;
public interface IGpt4AllModelFactory
{
IGpt4AllModel LoadModel(string modelPath);
}

View File

@ -1,14 +0,0 @@
namespace Gpt4All;
/// <summary>
/// Formats a prompt
/// </summary>
public interface IPromptFormatter
{
/// <summary>
/// Format the provided prompt
/// </summary>
/// <param name="prompt">the input prompt</param>
/// <returns>The formatted prompt</returns>
string FormatPrompt(string prompt);
}

View File

@ -1,6 +0,0 @@
namespace Gpt4All;
public record ModelOptions
{
public int Threads { get; init; } = 4;
}

View File

@ -1,31 +0,0 @@
namespace Gpt4All;
/// <summary>
/// Interface for text prediction services
/// </summary>
public interface ITextPrediction
{
/// <summary>
/// Get prediction results for the prompt and provided options.
/// </summary>
/// <param name="text">The text to complete</param>
/// <param name="opts">The prediction settings</param>
/// <param name="cancellation">The <see cref="CancellationToken"/> for cancellation requests. The default is <see cref="CancellationToken.None"/>.</param>
/// <returns>The prediction result generated by the model</returns>
Task<ITextPredictionResult> GetPredictionAsync(
string text,
PredictRequestOptions opts,
CancellationToken cancellation = default);
/// <summary>
/// Get streaming prediction results for the prompt and provided options.
/// </summary>
/// <param name="text">The text to complete</param>
/// <param name="opts">The prediction settings</param>
/// <param name="cancellationToken">The <see cref="CancellationToken"/> for cancellation requests. The default is <see cref="CancellationToken.None"/>.</param>
/// <returns>The prediction result generated by the model</returns>
Task<ITextPredictionStreamingResult> GetStreamingPredictionAsync(
string text,
PredictRequestOptions opts,
CancellationToken cancellationToken = default);
}

View File

@ -1,10 +0,0 @@
namespace Gpt4All;
public interface ITextPredictionResult
{
bool Success { get; }
string? ErrorMessage { get; }
Task<string> GetPredictionAsync(CancellationToken cancellationToken = default);
}

View File

@ -1,6 +0,0 @@
namespace Gpt4All;
public interface ITextPredictionStreamingResult : ITextPredictionResult
{
IAsyncEnumerable<string> GetPredictionStreamingAsync(CancellationToken cancellationToken = default);
}

View File

@ -1,32 +0,0 @@
namespace Gpt4All;
public record PredictRequestOptions
{
public nuint LogitsSize { get; init; } = 0;
public nuint TokensSize { get; init; } = 0;
public int PastConversationTokensNum { get; init; } = 0;
public int ContextSize { get; init; } = 1024;
public int TokensToPredict { get; init; } = 128;
public int TopK { get; init; } = 40;
public float TopP { get; init; } = 0.9f;
public float MinP { get; init; } = 0.0f;
public float Temperature { get; init; } = 0.1f;
public int Batches { get; init; } = 8;
public float RepeatPenalty { get; init; } = 1.2f;
public int RepeatLastN { get; init; } = 10;
public float ContextErase { get; init; } = 0.5f;
public static readonly PredictRequestOptions Defaults = new();
}

View File

@ -1,27 +0,0 @@
using System.Text;
namespace Gpt4All;
public record TextPredictionResult : ITextPredictionResult
{
private readonly StringBuilder _result;
public bool Success { get; internal set; } = true;
public string? ErrorMessage { get; internal set; }
internal TextPredictionResult()
{
_result = new StringBuilder();
}
internal void Append(string token)
{
_result.Append(token);
}
public Task<string> GetPredictionAsync(CancellationToken cancellationToken = default)
{
return Task.FromResult(_result.ToString());
}
}

View File

@ -1,49 +0,0 @@
using System.Text;
using System.Threading.Channels;
namespace Gpt4All;
public record TextPredictionStreamingResult : ITextPredictionStreamingResult
{
private readonly Channel<string> _channel;
public bool Success { get; internal set; } = true;
public string? ErrorMessage { get; internal set; }
public Task Completion => _channel.Reader.Completion;
internal TextPredictionStreamingResult()
{
_channel = Channel.CreateUnbounded<string>();
}
internal bool Append(string token)
{
return _channel.Writer.TryWrite(token);
}
internal void Complete()
{
_channel.Writer.Complete();
}
public async Task<string> GetPredictionAsync(CancellationToken cancellationToken = default)
{
var sb = new StringBuilder();
var tokens = GetPredictionStreamingAsync(cancellationToken).ConfigureAwait(false);
await foreach (var token in tokens)
{
sb.Append(token);
}
return sb.ToString();
}
public IAsyncEnumerable<string> GetPredictionStreamingAsync(CancellationToken cancellationToken = default)
{
return _channel.Reader.ReadAllAsync(cancellationToken);
}
}

View File

@ -1 +0,0 @@
ClangSharpPInvokeGenerator @(Get-Content .\GenLLModelBindings.rsp)

View File

@ -1,124 +0,0 @@
# C# GPT4All
This package contains a set of C# bindings around the `llmodel` C-API.
## Documentation
TBD
## Installation
Windows and Linux builds are available on NuGet: https://www.nuget.org/packages/Gpt4All
macOS is WIP due to code signing issues, contributions are welcome.
## Project Structure
```
gpt4all-bindings/
└── csharp
   ├── Gpt4All // .NET Bindigs
   ├── Gpt4All.Samples // Sample project
├── build_win-msvc.ps1 // Native build scripts
├── build_win-mingw.ps1
├── build_linux.sh
└── runtimes // [POST-BUILD] Platform-specific native libraries
├── win-x64
├── ...
└── linux-x64
```
## Prerequisites
On Windows and Linux, building GPT4All requires the complete Vulkan SDK. You may download it from here: https://vulkan.lunarg.com/sdk/home
macOS users do not need Vulkan, as GPT4All will use Metal instead.
## Local Build Instructions
> **Note**
> Tested On:
> - Windows 11 22H + VS2022 (CE) x64
> - Linux Ubuntu x64
> - Linux Ubuntu (WSL2) x64
1. Setup the repository
2. Build the native libraries for the platform of choice (see below)
3. Build the C# Bindings (NET6+ SDK is required)
```
git clone --recurse-submodules https://github.com/nomic-ai/gpt4all
cd gpt4all/gpt4all-bindings/csharp
```
### Linux
1. Setup build environment and install NET6+ SDK with the appropriate procedure for your distribution
```
sudo apt-get update
sudo apt-get install -y cmake build-essential
chmod +x ./build_linux.sh
```
2. `./build_linux.sh`
3. The native libraries should be present at `.\native\linux-x64`
### Windows - MinGW64
#### Additional requirements
- [MinGW64](https://www.mingw-w64.org/)
- CMAKE
1. Setup
```
choco install mingw
$env:Path += ";C:\ProgramData\mingw64\mingw64\bin"
choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System'
```
2. Run the `./build_win-mingw.ps1` build script
3. The native libraries should be present at `.\native\win-x64`
### Windows - MSVC
#### Additional requirements
- Visual Studio 2022
1. Open a terminal using the `x64 Native Tools Command Prompt for VS 2022` (`vcvars64.bat`)
2. Run the `./build_win-msvc.ps1` build script
3. `libllmodel.dll` and `libllama.dll` should be present at `.\native\win-x64`
> **Warning**
> If the build fails with: '**error C7555: use of designated initializers requires at least '/std:c++20'**'
>
> Modify `cd gpt4all/gpt4all-backends/CMakeLists.txt` adding `CXX_STANDARD_20` to `llmodel` properties.
> ```cmake
> set_target_properties(llmodel PROPERTIES
> VERSION ${PROJECT_VERSION}
> CXX_STANDARD 20 # <---- ADD THIS -----------------------
> SOVERSION ${PROJECT_VERSION_MAJOR})
> ```
## C# Bindings Build Instructions
Build the `Gpt4All` (or `Gpt4All.Samples`) projects from within VisualStudio.
### Try the bindings
```csharp
using Gpt4All;
// load the model
var modelFactory = new ModelFactory();
using var model = modelFactory.LoadModel("./path/to/ggml-gpt4all-j-v1.3-groovy.bin");
var input = "Name 3 Colors";
// request a prediction
var result = await model.GetStreamingPredictionAsync(
input,
PredictRequestOptions.Defaults);
// asynchronously print the tokens as soon as they are produces by the model
await foreach(var token in result.GetPredictionStreamingAsync())
{
Console.Write(token);
}
```
Output:
```
gptj_model_load: loading model from 'ggml-gpt4all-j-v1.3-groovy.bin' - please wait ...
gptj_model_load: n_vocab = 50400
[...TRUNCATED...]
gptj_model_load: ggml ctx size = 5401.45 MB
gptj_model_load: kv self size = 896.00 MB
gptj_model_load: ................................... done
gptj_model_load: model size = 3609.38 MB / num tensors = 285
Black, Blue and White
```

View File

@ -1,10 +0,0 @@
#!/bin/sh
mkdir -p runtimes
rm -rf runtimes/linux-x64
mkdir -p runtimes/linux-x64/native
mkdir runtimes/linux-x64/build
cmake -S ../../gpt4all-backend -B runtimes/linux-x64/build
cmake --build runtimes/linux-x64/build --parallel --config Release
cp runtimes/linux-x64/build/libllmodel.so runtimes/linux-x64/native/libllmodel.so
cp runtimes/linux-x64/build/libgptj*.so runtimes/linux-x64/native/
cp runtimes/linux-x64/build/libllama*.so runtimes/linux-x64/native/

View File

@ -1,16 +0,0 @@
$ROOT_DIR = '.\runtimes\win-x64'
$BUILD_DIR = '.\runtimes\win-x64\build\mingw'
$LIBS_DIR = '.\runtimes\win-x64\native'
# cleanup env
Remove-Item -Force -Recurse $ROOT_DIR -ErrorAction SilentlyContinue | Out-Null
mkdir $BUILD_DIR | Out-Null
mkdir $LIBS_DIR | Out-Null
# build
cmake -G "MinGW Makefiles" -S ..\..\gpt4all-backend -B $BUILD_DIR
cmake --build $BUILD_DIR --parallel --config Release
# copy native dlls
cp "C:\ProgramData\mingw64\mingw64\bin\*dll" $LIBS_DIR
cp "$BUILD_DIR\bin\*.dll" $LIBS_DIR

View File

@ -1,6 +0,0 @@
Remove-Item -Force -Recurse .\runtimes\win-x64\msvc -ErrorAction SilentlyContinue
mkdir .\runtimes\win-x64\msvc\build | Out-Null
cmake -G "Visual Studio 17 2022" -A X64 -S ..\..\gpt4all-backend -B .\runtimes\win-x64\msvc\build
cmake --build .\runtimes\win-x64\msvc\build --parallel --config Release
cp .\runtimes\win-x64\msvc\build\bin\Release\*.dll .\runtimes\win-x64
mv .\runtimes\win-x64\llmodel.dll .\runtimes\win-x64\libllmodel.dll

View File

@ -1 +0,0 @@
# GPT4All C# API

View File

@ -1,163 +0,0 @@
INCLUDE_PATH := $(abspath ./)
LIBRARY_PATH := $(abspath ./)
CMAKEFLAGS=
ifndef UNAME_S
UNAME_S := $(shell uname -s)
endif
ifndef UNAME_P
UNAME_P := $(shell uname -p)
endif
ifndef UNAME_M
UNAME_M := $(shell uname -m)
endif
CCV := $(shell $(CC) --version | head -n 1)
CXXV := $(shell $(CXX) --version | head -n 1)
# Mac OS + Arm can report x86_64
# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
ifeq ($(UNAME_S),Darwin)
ifneq ($(UNAME_P),arm)
SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null)
ifeq ($(SYSCTL_M),1)
# UNAME_P := arm
# UNAME_M := arm64
warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
endif
endif
endif
#
# Compile flags
#
# keep standard at C11 and C++11
CFLAGS = -I. -I../../gpt4all-backend/llama.cpp -I../../gpt4all-backend -I -O3 -DNDEBUG -std=c11 -fPIC
CXXFLAGS = -I. -I../../gpt4all-backend/llama.cpp -I../../gpt4all-backend -O3 -DNDEBUG -std=c++17 -fPIC
LDFLAGS =
# warnings
CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wno-unused-function
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
# OS specific
# TODO: support Windows
ifeq ($(UNAME_S),Linux)
CFLAGS += -pthread
CXXFLAGS += -pthread
endif
ifeq ($(UNAME_S),Darwin)
CFLAGS += -pthread
CXXFLAGS += -pthread
endif
ifeq ($(UNAME_S),FreeBSD)
CFLAGS += -pthread
CXXFLAGS += -pthread
endif
ifeq ($(UNAME_S),NetBSD)
CFLAGS += -pthread
CXXFLAGS += -pthread
endif
ifeq ($(UNAME_S),OpenBSD)
CFLAGS += -pthread
CXXFLAGS += -pthread
endif
ifeq ($(UNAME_S),Haiku)
CFLAGS += -pthread
CXXFLAGS += -pthread
endif
# Architecture specific
# TODO: probably these flags need to be tweaked on some architectures
# feel free to update the Makefile for your architecture and send a pull request or issue
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
# Use all CPU extensions that are available:
CFLAGS += -march=native -mtune=native
CXXFLAGS += -march=native -mtune=native
endif
ifneq ($(filter ppc64%,$(UNAME_M)),)
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
ifneq (,$(findstring POWER9,$(POWER9_M)))
CFLAGS += -mcpu=power9
CXXFLAGS += -mcpu=power9
endif
# Require c++23's std::byteswap for big-endian support.
ifeq ($(UNAME_M),ppc64)
CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
endif
endif
ifndef LLAMA_NO_ACCELERATE
# Mac M1 - include Accelerate framework.
# `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
ifeq ($(UNAME_S),Darwin)
CFLAGS += -DGGML_USE_ACCELERATE
LDFLAGS += -framework Accelerate
endif
endif
ifdef LLAMA_OPENBLAS
CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas
LDFLAGS += -lopenblas
endif
ifdef LLAMA_GPROF
CFLAGS += -pg
CXXFLAGS += -pg
endif
ifneq ($(filter aarch64%,$(UNAME_M)),)
CFLAGS += -mcpu=native
CXXFLAGS += -mcpu=native
endif
ifneq ($(filter armv6%,$(UNAME_M)),)
# Raspberry Pi 1, 2, 3
CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
endif
ifneq ($(filter armv7%,$(UNAME_M)),)
# Raspberry Pi 4
CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
endif
ifneq ($(filter armv8%,$(UNAME_M)),)
# Raspberry Pi 4
CFLAGS += -mfp16-format=ieee -mno-unaligned-access
endif
#
# Print build information
#
$(info I go-gpt4all build info: )
$(info I UNAME_S: $(UNAME_S))
$(info I UNAME_P: $(UNAME_P))
$(info I UNAME_M: $(UNAME_M))
$(info I CFLAGS: $(CFLAGS))
$(info I CXXFLAGS: $(CXXFLAGS))
$(info I LDFLAGS: $(LDFLAGS))
$(info I CMAKEFLAGS: $(CMAKEFLAGS))
$(info I CC: $(CCV))
$(info I CXX: $(CXXV))
$(info )
llmodel.o:
[ -e buildllm ] || mkdir buildllm
cd buildllm && cmake ../../../gpt4all-backend/ $(CMAKEFLAGS) && make
cd buildllm && cp -rf CMakeFiles/llmodel.dir/llmodel_c.cpp.o ../llmodel_c.o
cd buildllm && cp -rf CMakeFiles/llmodel.dir/llmodel.cpp.o ../llmodel.o
clean:
rm -f *.o
rm -f *.a
rm -rf buildllm
rm -rf example/main
binding.o: binding.cpp binding.h
$(CXX) $(CXXFLAGS) binding.cpp -o binding.o -c $(LDFLAGS)
libgpt4all.a: binding.o llmodel.o
ar src libgpt4all.a llmodel.o binding.o
test: libgpt4all.a
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} go test -v ./...
example/main: libgpt4all.a
C_INCLUDE_PATH=$(INCLUDE_PATH) LIBRARY_PATH=$(INCLUDE_PATH) go build -o example/main ./example/

View File

@ -1,59 +0,0 @@
# GPT4All Golang bindings
The golang bindings have been tested on:
- MacOS
- Linux
### Usage
```
import (
"github.com/nomic-ai/gpt4all/gpt4all-bindings/golang"
)
func main() {
// Load the model
model, err := gpt4all.New("model.bin", gpt4all.SetModelType(gpt4all.GPTJType))
if err != nil {
panic(err)
}
defer model.Free()
model.SetTokenCallback(func(s string) bool {
fmt.Print(s)
return true
})
_, err = model.Predict("Here are 4 steps to create a website:", "", "", gpt4all.SetTemperature(0.1))
if err != nil {
panic(err)
}
}
```
## Building
In order to use the bindings you will need to build `libgpt4all.a`:
```
git clone --recurse-submodules https://github.com/nomic-ai/gpt4all
cd gpt4all/gpt4all-bindings/golang
make libgpt4all.a
```
To use the bindings in your own software:
- Import `github.com/nomic-ai/gpt4all/gpt4all-bindings/golang`;
- Compile `libgpt4all.a` (you can use `make libgpt4all.a` in the bindings/go directory);
- Link your go binary by setting the environment variables `C_INCLUDE_PATH` and `LIBRARY_PATH` to point to the `binding.h` file directory and `libgpt4all.a` file directory respectively.
- Note: you need to have *.so/*.dynlib/*.dll files of the implementation nearby the binary produced by the binding in order to make this to work
## Testing
To run tests, run `make test`:
```
git clone https://github.com/nomic-ai/gpt4all
cd gpt4all/gpt4all-bindings/golang
make test
```

View File

@ -1,107 +0,0 @@
#include "../../gpt4all-backend/llmodel_c.h"
#include "../../gpt4all-backend/llmodel.h"
#include "../../gpt4all-backend/llmodel_c.cpp"
#include "binding.h"
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdio>
#include <cstring>
#include <fstream>
#include <map>
#include <string>
#include <vector>
#include <iostream>
#include <unistd.h>
void* load_model(const char *fname, int n_threads) {
// load the model
const char *new_error;
auto model = llmodel_model_create2(fname, "auto", &new_error);
if (model == nullptr) {
fprintf(stderr, "%s: error '%s'\n", __func__, new_error);
return nullptr;
}
if (!llmodel_loadModel(model, fname, 2048, 100)) {
llmodel_model_destroy(model);
return nullptr;
}
llmodel_setThreadCount(model, n_threads);
return model;
}
std::string res = "";
void * mm;
void model_prompt(const char *prompt, const char *prompt_template, int special, const char *fake_reply,
void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens,
int top_k, float top_p, float min_p, float temp, int n_batch,float ctx_erase)
{
llmodel_model* model = (llmodel_model*) m;
// std::string res = "";
auto lambda_prompt = [](int token_id) {
return true;
};
mm=model;
res="";
auto lambda_response = [](int token_id, const char *responsechars) {
res.append((char*)responsechars);
return !!getTokenCallback(mm, (char*)responsechars);
};
auto lambda_recalculate = [](bool is_recalculating) {
// You can handle recalculation requests here if needed
return is_recalculating;
};
llmodel_prompt_context* prompt_context = new llmodel_prompt_context{
.logits = NULL,
.logits_size = 0,
.tokens = NULL,
.tokens_size = 0,
.n_past = 0,
.n_ctx = 1024,
.n_predict = 50,
.top_k = 10,
.top_p = 0.9,
.min_p = 0.0,
.temp = 1.0,
.n_batch = 1,
.repeat_penalty = 1.2,
.repeat_last_n = 10,
.context_erase = 0.5
};
prompt_context->n_predict = tokens;
prompt_context->repeat_last_n = repeat_last_n;
prompt_context->repeat_penalty = repeat_penalty;
prompt_context->n_ctx = n_ctx;
prompt_context->top_k = top_k;
prompt_context->context_erase = ctx_erase;
prompt_context->top_p = top_p;
prompt_context->min_p = min_p;
prompt_context->temp = temp;
prompt_context->n_batch = n_batch;
llmodel_prompt(model, prompt, prompt_template,
lambda_prompt,
lambda_response,
lambda_recalculate,
prompt_context, special, fake_reply);
strcpy(result, res.c_str());
free(prompt_context);
}
void free_model(void *state_ptr) {
llmodel_model* ctx = (llmodel_model*) state_ptr;
llmodel_model_destroy(*ctx);
}

View File

@ -1,19 +0,0 @@
#ifdef __cplusplus
extern "C" {
#endif
#include <stdbool.h>
void* load_model(const char *fname, int n_threads);
void model_prompt(const char *prompt, const char *prompt_template, int special, const char *fake_reply,
void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens,
int top_k, float top_p, float min_p, float temp, int n_batch,float ctx_erase);
void free_model(void *state_ptr);
extern unsigned char getTokenCallback(void *, char *);
#ifdef __cplusplus
}
#endif

View File

@ -1,82 +0,0 @@
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"runtime"
"strings"
gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang"
)
var (
threads = 4
tokens = 128
)
func main() {
var model string
flags := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
flags.StringVar(&model, "m", "./models/7B/ggml-model-q4_0.bin", "path to q4_0.bin model file to load")
flags.IntVar(&threads, "t", runtime.NumCPU(), "number of threads to use during computation")
flags.IntVar(&tokens, "n", 512, "number of tokens to predict")
err := flags.Parse(os.Args[1:])
if err != nil {
fmt.Printf("Parsing program arguments failed: %s", err)
os.Exit(1)
}
l, err := gpt4all.New(model, gpt4all.SetThreads(threads))
if err != nil {
fmt.Println("Loading the model failed:", err.Error())
os.Exit(1)
}
fmt.Printf("Model loaded successfully.\n")
l.SetTokenCallback(func(token string) bool {
fmt.Print(token)
return true
})
reader := bufio.NewReader(os.Stdin)
for {
text := readMultiLineInput(reader)
_, err := l.Predict(text, "", "", gpt4all.SetTokens(tokens), gpt4all.SetTopK(90), gpt4all.SetTopP(0.86))
if err != nil {
panic(err)
}
fmt.Printf("\n\n")
}
}
// readMultiLineInput reads input until an empty line is entered.
func readMultiLineInput(reader *bufio.Reader) string {
var lines []string
fmt.Print(">>> ")
for {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
os.Exit(0)
}
fmt.Printf("Reading the prompt failed: %s", err)
os.Exit(1)
}
if len(strings.TrimSpace(line)) == 0 {
break
}
lines = append(lines, line)
}
text := strings.Join(lines, "")
return text
}

View File

@ -1,20 +0,0 @@
module github.com/nomic-ai/gpt4all/gpt4all-bindings/golang
go 1.19
require (
github.com/onsi/ginkgo/v2 v2.9.4
github.com/onsi/gomega v1.27.6
)
require (
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
golang.org/x/net v0.9.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/tools v0.8.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@ -1,40 +0,0 @@
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,112 +0,0 @@
package gpt4all
// #cgo CFLAGS: -I${SRCDIR}../../gpt4all-backend/ -I${SRCDIR}../../gpt4all-backend/llama.cpp -I./
// #cgo CXXFLAGS: -std=c++17 -I${SRCDIR}../../gpt4all-backend/ -I${SRCDIR}../../gpt4all-backend/llama.cpp -I./
// #cgo darwin LDFLAGS: -framework Accelerate
// #cgo darwin CXXFLAGS: -std=c++17
// #cgo LDFLAGS: -lgpt4all -lm -lstdc++ -ldl
// void* load_model(const char *fname, int n_threads);
// void model_prompt( const char *prompt, const char *prompt_template, int special, const char *fake_reply, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
// float top_p, float min_p, float temp, int n_batch,float ctx_erase);
// void free_model(void *state_ptr);
// extern unsigned char getTokenCallback(void *, char *);
// void llmodel_set_implementation_search_path(const char *path);
import "C"
import (
"fmt"
"runtime"
"strings"
"sync"
"unsafe"
)
// The following code is https://github.com/go-skynet/go-llama.cpp with small adaptations
type Model struct {
state unsafe.Pointer
}
func New(model string, opts ...ModelOption) (*Model, error) {
ops := NewModelOptions(opts...)
if ops.LibrarySearchPath != "" {
C.llmodel_set_implementation_search_path(C.CString(ops.LibrarySearchPath))
}
state := C.load_model(C.CString(model), C.int(ops.Threads))
if state == nil {
return nil, fmt.Errorf("failed loading model")
}
gpt := &Model{state: state}
// set a finalizer to remove any callbacks when the struct is reclaimed by the garbage collector.
runtime.SetFinalizer(gpt, func(g *Model) {
setTokenCallback(g.state, nil)
})
return gpt, nil
}
func (l *Model) Predict(text, template, fakeReplyText string, opts ...PredictOption) (string, error) {
po := NewPredictOptions(opts...)
input := C.CString(text)
if po.Tokens == 0 {
po.Tokens = 99999999
}
templateInput := C.CString(template)
fakeReplyInput := C.CString(fakeReplyText)
out := make([]byte, po.Tokens)
C.model_prompt(input, templateInput, C.int(po.Special), fakeReplyInput, l.state, (*C.char)(unsafe.Pointer(&out[0])),
C.int(po.RepeatLastN), C.float(po.RepeatPenalty), C.int(po.ContextSize), C.int(po.Tokens),
C.int(po.TopK), C.float(po.TopP), C.float(po.MinP), C.float(po.Temperature), C.int(po.Batch),
C.float(po.ContextErase))
res := C.GoString((*C.char)(unsafe.Pointer(&out[0])))
res = strings.TrimPrefix(res, " ")
res = strings.TrimPrefix(res, text)
res = strings.TrimPrefix(res, "\n")
res = strings.TrimSuffix(res, "<|endoftext|>")
return res, nil
}
func (l *Model) Free() {
C.free_model(l.state)
}
func (l *Model) SetTokenCallback(callback func(token string) bool) {
setTokenCallback(l.state, callback)
}
var (
m sync.Mutex
callbacks = map[uintptr]func(string) bool{}
)
//export getTokenCallback
func getTokenCallback(statePtr unsafe.Pointer, token *C.char) bool {
m.Lock()
defer m.Unlock()
if callback, ok := callbacks[uintptr(statePtr)]; ok {
return callback(C.GoString(token))
}
return true
}
// setCallback can be used to register a token callback for LLama. Pass in a nil callback to
// remove the callback.
func setTokenCallback(statePtr unsafe.Pointer, callback func(string) bool) {
m.Lock()
defer m.Unlock()
if callback == nil {
delete(callbacks, uintptr(statePtr))
} else {
callbacks[uintptr(statePtr)] = callback
}
}

View File

@ -1,13 +0,0 @@
package gpt4all_test
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestGPT(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "go-gpt4all-j test suite")
}

View File

@ -1,17 +0,0 @@
package gpt4all_test
import (
. "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("LLama binding", func() {
Context("Declaration", func() {
It("fails with no model", func() {
model, err := New("not-existing")
Expect(err).To(HaveOccurred())
Expect(model).To(BeNil())
})
})
})

View File

@ -1,138 +0,0 @@
package gpt4all
type PredictOptions struct {
ContextSize, RepeatLastN, Tokens, TopK, Batch, Special int
TopP, MinP, Temperature, ContextErase, RepeatPenalty float64
}
type PredictOption func(p *PredictOptions)
var DefaultOptions PredictOptions = PredictOptions{
Tokens: 200,
TopK: 10,
TopP: 0.90,
MinP: 0.0,
Temperature: 0.96,
Batch: 1,
Special: 0,
ContextErase: 0.55,
ContextSize: 1024,
RepeatLastN: 10,
RepeatPenalty: 1.2,
}
var DefaultModelOptions ModelOptions = ModelOptions{
Threads: 4,
}
type ModelOptions struct {
Threads int
LibrarySearchPath string
}
type ModelOption func(p *ModelOptions)
// SetTokens sets the number of tokens to generate.
func SetTokens(tokens int) PredictOption {
return func(p *PredictOptions) {
p.Tokens = tokens
}
}
// SetTopK sets the value for top-K sampling.
func SetTopK(topk int) PredictOption {
return func(p *PredictOptions) {
p.TopK = topk
}
}
// SetTopP sets the value for nucleus sampling.
func SetTopP(topp float64) PredictOption {
return func(p *PredictOptions) {
p.TopP = topp
}
}
// SetMinP sets the value for min p sampling
func SetMinP(minp float64) PredictOption {
return func(p *PredictOptions) {
p.MinP = minp
}
}
// SetRepeatPenalty sets the repeat penalty.
func SetRepeatPenalty(ce float64) PredictOption {
return func(p *PredictOptions) {
p.RepeatPenalty = ce
}
}
// SetRepeatLastN sets the RepeatLastN.
func SetRepeatLastN(ce int) PredictOption {
return func(p *PredictOptions) {
p.RepeatLastN = ce
}
}
// SetContextErase sets the context erase %.
func SetContextErase(ce float64) PredictOption {
return func(p *PredictOptions) {
p.ContextErase = ce
}
}
// SetTemperature sets the temperature value for text generation.
func SetTemperature(temp float64) PredictOption {
return func(p *PredictOptions) {
p.Temperature = temp
}
}
// SetBatch sets the batch size.
func SetBatch(size int) PredictOption {
return func(p *PredictOptions) {
p.Batch = size
}
}
// SetSpecial is true if special tokens in the prompt should be processed, false otherwise.
func SetSpecial(special bool) PredictOption {
return func(p *PredictOptions) {
if special {
p.Special = 1
} else {
p.Special = 0
}
}
}
// Create a new PredictOptions object with the given options.
func NewPredictOptions(opts ...PredictOption) PredictOptions {
p := DefaultOptions
for _, opt := range opts {
opt(&p)
}
return p
}
// SetThreads sets the number of threads to use for text generation.
func SetThreads(c int) ModelOption {
return func(p *ModelOptions) {
p.Threads = c
}
}
// SetLibrarySearchPath sets the dynamic libraries used by gpt4all for the various ggml implementations.
func SetLibrarySearchPath(t string) ModelOption {
return func(p *ModelOptions) {
p.LibrarySearchPath = t
}
}
// Create a new PredictOptions object with the given options.
func NewModelOptions(opts ...ModelOption) ModelOptions {
p := DefaultModelOptions
for _, opt := range opts {
opt(&p)
}
return p
}

View File

@ -1,5 +0,0 @@
# Make sure native directory never gets commited to git for the project.
/src/main/resources/native
# IntelliJ project file
*.iml

View File

@ -1,80 +0,0 @@
# Java Bindings Developer documents.
This document is meant to anyone looking to build the Java bindings from source, test a build locally and perform a release.
## Building locally
Maven is the build tool used by the project. Maven version of 3.8 or higher is recommended. Make sure the **mvn**
is available on the command path.
The project builds to Java version 11 target so make sure that a JDK at version 11 or newer is installed.
### Setting up location of native shared libraries
The property **native.libs.location** in pom.xml may need to be set:
```
<properties>
...
<native.libs.location>C:\Users\felix\dev\gpt4all_java_bins\release_1_1_3_Jun22_2023</native.libs.location>
</properties>
```
All the native shared libraries bundled with the Java binding jar will be copied from this location.
The directory structure is **native/linux**, **native/macos**, **native/windows**. These directories are copied
into the **src/main/resources** folder during the build process.
For the purposes of local testing, none of these directories have to be present or just one OS type may be present.
If none of the native libraries are present in **native.libs.location** the shared libraries will be searched for
in location path set by **LLModel.LIBRARY_SEARCH_PATH** static variable in Java source code that is using the bindings.
Alternately you can copy the shared libraries into the **src/resources/native/linux** before
you build, but note **src/main/resources/native** is on the .gitignore, so it will not be committed to sources.
### Building
To package the bindings jar run:
```
mvn package
```
This will build two jars. One has only the Java bindings and the other is a fat jar that will have required dependencies included as well.
To package and install the Java bindings to your local maven repository run:
```
mvn install
```
### Using in a sample application
You can check out a sample project that uses the java bindings here:
https://github.com/felix-zaslavskiy/gpt4all-java-bindings-sample.git
1. First, update the dependency of java bindings to whatever you have installed in local repository such as **1.1.4-SNAPSHOT**
2. Second, update **Main.java** and set **baseModelPath** to the correct location of model weight files.
3. To make a runnable jar run:
```
mvn package
```
A fat jar is also created which is easy to run from command line:
```
java -jar target/gpt4all-java-bindings-sample-1.0-SNAPSHOT-jar-with-dependencies.jar
```
### Publish a public release.
For publishing a new version to maven central repository requires password and signing keys which F.Z. currently maintains, so
he is responsible for making a public release.
The procedure is as follows:
For a snapshot release
Run:
```
mvn deploy -P signing-profile
```
For a non-snapshot release
Run:
```
mvn clean deploy -P signing-profile,release
```

View File

@ -1,126 +0,0 @@
# Java bindings
Java bindings let you load a gpt4all library into your Java application and execute text
generation using an intuitive and easy to use API. No GPU is required because gpt4all executes on the CPU.
The gpt4all models are quantized to easily fit into system RAM and use about 4 to 7GB of system RAM.
## Getting Started
You can add Java bindings into your Java project by adding the following dependency to your project:
**Maven**
```
<dependency>
<groupId>com.hexadevlabs</groupId>
<artifactId>gpt4all-java-binding</artifactId>
<version>1.1.5</version>
</dependency>
```
**Gradle**
```
implementation 'com.hexadevlabs:gpt4all-java-binding:1.1.5'
```
To add the library dependency for another build system see [Maven Central Java bindings](https://central.sonatype.com/artifact/com.hexadevlabs/gpt4all-java-binding/).
To download model binary weights file use a URL such as [`https://gpt4all.io/models/gguf/gpt4all-13b-snoozy-q4_0.gguf`](https://gpt4all.io/models/gguf/gpt4all-13b-snoozy-q4_0.gguf).
For information about other models available see the [model file list](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-chat#manual-download-of-models).
### Sample code
```java
public class Example {
public static void main(String[] args) {
String prompt = "### Human:\nWhat is the meaning of life\n### Assistant:";
// Replace the hardcoded path with the actual path where your model file resides
String modelFilePath = "C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\ggml-gpt4all-j-v1.3-groovy.bin";
try (LLModel model = new LLModel(Path.of(modelFilePath))) {
// May generate up to 4096 tokens but generally stops early
LLModel.GenerationConfig config = LLModel.config()
.withNPredict(4096).build();
// Will also stream to standard output
String fullGeneration = model.generate(prompt, config, true);
} catch (Exception e) {
// Exceptions generally may happen if the model file fails to load
// for a number of reasons such as a file not found.
// It is possible that Java may not be able to dynamically load the native shared library or
// the llmodel shared library may not be able to dynamically load the backend
// implementation for the model file you provided.
//
// Once the LLModel class is successfully loaded into memory the text generation calls
// generally should not throw exceptions.
e.printStackTrace(); // Printing here but in a production system you may want to take some action.
}
}
}
```
For a Maven-based sample project that uses this library see this [sample project](https://github.com/felix-zaslavskiy/gpt4all-java-bindings-sample)
### Additional considerations
#### Logger warnings
The Java bindings library may produce a warning if you don't have a SLF4J binding included in your project:
```
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
```
The Java bindings only use logging for informational
purposes, so a logger is not essential to correctly use the library. You can ignore this warning if you don't have SLF4J bindings
in your project.
To add a simple logger using a Maven dependency you may use:
```
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.7.36</version>
</dependency>
```
#### Loading your native libraries
1. the Java bindings package JAR comes bundled with a native library files for Windows, macOS and Linux. These library files are
copied to a temporary directory and loaded at runtime. For advanced users who may want to package shared libraries into Docker containers
or want to use a custom build of the shared libraries and ignore the once bundled with the Java package they have option
to load libraries from your local directory by setting a static property to the location of library files.
There are no guarantees of compatibility if used in such a way so be careful if you really want to do it.
For example:
```java
class Example {
public static void main(String[] args) {
// gpt4all native shared libraries location
LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\";
// ... use the library normally
}
}
```
2. Not every AVX-only shared library is bundled with the JAR right now to reduce size. Only libgptj-avx is included.
If you are running into issues please let us know using the [gpt4all project issue tracker](https://github.com/nomic-ai/gpt4all/issues).
3. For Windows the native library included in jar depends on specific Microsoft C and C++ (MSVC) runtime libraries which may not be installed on your system.
If this is the case you can easily download and install the latest x64 Microsoft Visual C++ Redistributable package from https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170
4. When running Java in a Docker container it is advised to use eclipse-temurin:17-jre parent image. Alpine based parent images don't work due to the native library dependencies.
## Version history
1. Version **1.1.2**:
- Java bindings is compatible with gpt4ll version 2.4.6
- Initial stable release with the initial feature set
2. Version **1.1.3**:
- Java bindings is compatible with gpt4all version 2.4.8
- Add static GPT4ALL_VERSION to signify gpt4all version of the bindings
- Add PromptIsTooLongException for prompts that are longer than context size.
- Replit model support to include Metal Mac hardware support.
3. Version **1.1.4**:
- Java bindings is compatible with gpt4all version 2.4.11
- Falcon model support included.
4. Version **1.1.5**:
- Add a check for model file readability before loading model.

View File

@ -1,6 +0,0 @@
## Needed
1. Integrate with circleci build pipeline like the C# binding.
## These are just ideas
1. Better Chat completions function.
2. Chat completion that returns result in OpenAI compatible format.

View File

@ -1,216 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.hexadevlabs</groupId>
<artifactId>gpt4all-java-binding</artifactId>
<version>1.1.5</version>
<packaging>jar</packaging>
<properties>
<maven.compiler.source>11</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<native.libs.location>C:\Users\felix\dev\gpt4all_java_bins\release_1_1_4_July8_2023</native.libs.location>
</properties>
<name>${project.groupId}:${project.artifactId}</name>
<description>Java bindings for GPT4ALL LLM</description>
<url>https://github.com/nomic-ai/gpt4all</url>
<licenses>
<license>
<name>The Apache License, Version 2.0</name>
<url>https://github.com/nomic-ai/gpt4all/blob/main/LICENSE.txt</url>
</license>
</licenses>
<developers>
<developer>
<name>Felix Zaslavskiy</name>
<email>felixz@hexadevlabs.com</email>
<organizationUrl>https://github.com/felix-zaslavskiy/</organizationUrl>
</developer>
</developers>
<scm>
<connection>scm:git:git://github.com/nomic-ai/gpt4all.git</connection>
<developerConnection>scm:git:ssh://github.com/nomic-ai/gpt4all.git</developerConnection>
<url>https://github.com/nomic-ai/gpt4all/tree/main</url>
</scm>
<dependencies>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-ffi</artifactId>
<version>2.2.13</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.36</version>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<version>5.9.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-junit-jupiter</artifactId>
<version>5.4.0</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>5.4.0</version>
<scope>test</scope>
</dependency>
</dependencies>
<distributionManagement>
<snapshotRepository>
<id>ossrh</id>
<url>https://s01.oss.sonatype.org/content/repositories/snapshots</url>
</snapshotRepository>
<repository>
<id>ossrh</id>
<url>https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/</url>
</repository>
</distributionManagement>
<build>
<resources>
<resource>
<directory>src/main/resources</directory>
</resource>
<resource>
<directory>${project.build.directory}/generated-resources</directory>
</resource>
</resources>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>3.0.0</version>
<configuration>
<forkCount>0</forkCount>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
<version>3.3.1</version>
<executions>
<execution>
<id>copy-resources</id>
<!-- Here the phase you need -->
<phase>validate</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/generated-resources</outputDirectory>
<resources>
<resource>
<directory>${native.libs.location}</directory>
</resource>
</resources>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId>
<version>1.6.13</version>
<extensions>true</extensions>
<configuration>
<serverId>ossrh</serverId>
<nexusUrl>https://s01.oss.sonatype.org/</nexusUrl>
<autoReleaseAfterClose>true</autoReleaseAfterClose>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>2.2.1</version>
<executions>
<execution>
<id>attach-sources</id>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>3.5.0</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.6.0</version>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>signing-profile</id>
<!-- activation conditions here, if any -->
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-gpg-plugin</artifactId>
<version>3.1.0</version>
<executions>
<execution>
<id>sign-artifacts</id>
<phase>verify</phase>
<goals>
<goal>sign</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -1,641 +0,0 @@
package com.hexadevlabs.gpt4all;
import jnr.ffi.Pointer;
import jnr.ffi.byref.PointerByReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayOutputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.*;
import java.util.stream.Collectors;
public class LLModel implements AutoCloseable {
/**
* Config used for how to decode LLM outputs.
* High temperature closer to 1 gives more creative outputs
* while low temperature closer to 0 produce more precise outputs.
* <p>
* Use builder to set settings you want.
*/
public static class GenerationConfig extends LLModelLibrary.LLModelPromptContext {
private GenerationConfig() {
super(jnr.ffi.Runtime.getSystemRuntime());
logits_size.set(0);
tokens_size.set(0);
n_past.set(0);
n_ctx.set(1024);
n_predict.set(128);
top_k.set(40);
top_p.set(0.95);
min_p.set(0.0);
temp.set(0.28);
n_batch.set(8);
repeat_penalty.set(1.1);
repeat_last_n.set(10);
context_erase.set(0.55);
}
public static class Builder {
private final GenerationConfig configToBuild;
public Builder() {
configToBuild = new GenerationConfig();
}
public Builder withNPast(int n_past) {
configToBuild.n_past.set(n_past);
return this;
}
public Builder withNCtx(int n_ctx) {
configToBuild.n_ctx.set(n_ctx);
return this;
}
public Builder withNPredict(int n_predict) {
configToBuild.n_predict.set(n_predict);
return this;
}
public Builder withTopK(int top_k) {
configToBuild.top_k.set(top_k);
return this;
}
public Builder withTopP(float top_p) {
configToBuild.top_p.set(top_p);
return this;
}
public Builder withMinP(float min_p) {
configToBuild.min_p.set(min_p);
return this;
}
public Builder withTemp(float temp) {
configToBuild.temp.set(temp);
return this;
}
public Builder withNBatch(int n_batch) {
configToBuild.n_batch.set(n_batch);
return this;
}
public Builder withRepeatPenalty(float repeat_penalty) {
configToBuild.repeat_penalty.set(repeat_penalty);
return this;
}
public Builder withRepeatLastN(int repeat_last_n) {
configToBuild.repeat_last_n.set(repeat_last_n);
return this;
}
public Builder withContextErase(float context_erase) {
configToBuild.context_erase.set(context_erase);
return this;
}
/**
*
* @return GenerationConfig build instance of the config
*/
public GenerationConfig build() {
return configToBuild;
}
}
}
/**
* Shortcut for making GenerativeConfig builder.
*
* @return GenerationConfig.Builder - builder that can be used to make a GenerationConfig
*/
public static GenerationConfig.Builder config(){
return new GenerationConfig.Builder();
}
/**
* This may be set before any Model instance classes are instantiated to
* set where the native shared libraries are to be found.
* <p>
* This may be needed if setting library search path by standard means is not available
* or the libraries loaded from the temp folder bundled with the binding jar is not desirable.
*/
public static String LIBRARY_SEARCH_PATH;
/**
* Generally for debugging purposes only. Will print
* the numerical tokens as they are generated instead of the string representations.
* Will also print out the processed input tokens as numbers to standard out.
*/
public static boolean OUTPUT_DEBUG = false;
private static final Logger logger = LoggerFactory.getLogger(LLModel.class);
/**
* Which version of GPT4ALL that this binding is built for.
* The binding is guaranteed to work with this version of
* GPT4ALL native libraries. The binding may work for older
* versions but that is not guaranteed.
*/
public static final String GPT4ALL_VERSION = "2.4.11";
protected static LLModelLibrary library;
protected Pointer model;
protected String modelName;
/**
* Package private default constructor, for testing purposes.
*/
LLModel(){
}
public LLModel(Path modelPath) {
logger.info("Java bindings for gpt4all version: " + GPT4ALL_VERSION);
if(library==null) {
if (LIBRARY_SEARCH_PATH != null){
library = Util.loadSharedLibrary(LIBRARY_SEARCH_PATH);
library.llmodel_set_implementation_search_path(LIBRARY_SEARCH_PATH);
} else {
// Copy system libraries to Temp folder
Path tempLibraryDirectory = Util.copySharedLibraries();
library = Util.loadSharedLibrary(tempLibraryDirectory.toString());
library.llmodel_set_implementation_search_path(tempLibraryDirectory.toString() );
}
}
// modelType = type;
modelName = modelPath.getFileName().toString();
String modelPathAbs = modelPath.toAbsolutePath().toString();
PointerByReference error = new PointerByReference();
// Check if model file exists
if(!Files.exists(modelPath)){
throw new IllegalStateException("Model file does not exist: " + modelPathAbs);
}
// Check if file is Readable
if(!Files.isReadable(modelPath)){
throw new IllegalStateException("Model file cannot be read: " + modelPathAbs);
}
// Create Model Struct. Will load dynamically the correct backend based on model type
model = library.llmodel_model_create2(modelPathAbs, "auto", error);
if(model == null) {
throw new IllegalStateException("Could not load, gpt4all backend returned error: " + error.getValue().getString(0));
}
library.llmodel_loadModel(model, modelPathAbs, 2048, 100);
if(!library.llmodel_isModelLoaded(model)){
throw new IllegalStateException("The model " + modelName + " could not be loaded");
}
}
public void setThreadCount(int nThreads) {
library.llmodel_setThreadCount(this.model, nThreads);
}
public int threadCount() {
return library.llmodel_threadCount(this.model);
}
/**
* Generate text after the prompt
*
* @param prompt The text prompt to complete
* @param generationConfig What generation settings to use while generating text
* @return String The complete generated text
*/
public String generate(String prompt, GenerationConfig generationConfig) {
return generate(prompt, generationConfig, false);
}
/**
* Generate text after the prompt
*
* @param prompt The text prompt to complete
* @param generationConfig What generation settings to use while generating text
* @param streamToStdOut Should the generation be streamed to standard output. Useful for troubleshooting.
* @return String The complete generated text
*/
public String generate(String prompt, GenerationConfig generationConfig, boolean streamToStdOut) {
ByteArrayOutputStream bufferingForStdOutStream = new ByteArrayOutputStream();
ByteArrayOutputStream bufferingForWholeGeneration = new ByteArrayOutputStream();
LLModelLibrary.ResponseCallback responseCallback = getResponseCallback(streamToStdOut, bufferingForStdOutStream, bufferingForWholeGeneration);
library.llmodel_prompt(this.model,
prompt,
(int tokenID) -> {
if(LLModel.OUTPUT_DEBUG)
System.out.println("token " + tokenID);
return true; // continue processing
},
responseCallback,
(boolean isRecalculating) -> {
if(LLModel.OUTPUT_DEBUG)
System.out.println("recalculating");
return isRecalculating; // continue generating
},
generationConfig);
return bufferingForWholeGeneration.toString(StandardCharsets.UTF_8);
}
/**
* Callback method to be used by prompt method as text is generated.
*
* @param streamToStdOut Should send generated text to standard out.
* @param bufferingForStdOutStream Output stream used for buffering bytes for standard output.
* @param bufferingForWholeGeneration Output stream used for buffering a complete generation.
* @return LLModelLibrary.ResponseCallback lambda function that is invoked by response callback.
*/
static LLModelLibrary.ResponseCallback getResponseCallback(boolean streamToStdOut, ByteArrayOutputStream bufferingForStdOutStream, ByteArrayOutputStream bufferingForWholeGeneration) {
return (int tokenID, Pointer response) -> {
if(LLModel.OUTPUT_DEBUG)
System.out.print("Response token " + tokenID + " " );
// For all models if input sequence in tokens is longer then model context length
// the error is generated.
if(tokenID==-1){
throw new PromptIsTooLongException(response.getString(0, 1000, StandardCharsets.UTF_8));
}
long len = 0;
byte nextByte;
do{
try {
nextByte = response.getByte(len);
} catch(IndexOutOfBoundsException e){
// Not sure if this can ever happen but just in case
// the generation does not terminate in a Null (0) value.
throw new RuntimeException("Empty array or not null terminated");
}
len++;
if(nextByte!=0) {
bufferingForWholeGeneration.write(nextByte);
if(streamToStdOut){
bufferingForStdOutStream.write(nextByte);
// Test if Buffer is UTF8 valid string.
byte[] currentBytes = bufferingForStdOutStream.toByteArray();
String validString = Util.getValidUtf8(currentBytes);
if(validString!=null){ // is valid string
System.out.print(validString);
// reset the buffer for next utf8 sequence to buffer
bufferingForStdOutStream.reset();
}
}
}
} while(nextByte != 0);
return true; // continue generating
};
}
/**
* The array of messages for the conversation.
*/
public static class Messages {
private final List<PromptMessage> messages = new ArrayList<>();
public Messages(PromptMessage...messages) {
this.messages.addAll(Arrays.asList(messages));
}
public Messages(List<PromptMessage> messages) {
this.messages.addAll(messages);
}
public Messages addPromptMessage(PromptMessage promptMessage) {
this.messages.add(promptMessage);
return this;
}
List<PromptMessage> toList() {
return Collections.unmodifiableList(this.messages);
}
List<Map<String, String>> toListMap() {
return messages.stream()
.map(PromptMessage::toMap).collect(Collectors.toList());
}
}
/**
* A message in the conversation, identical to OpenAI's chat message.
*/
public static class PromptMessage {
private static final String ROLE = "role";
private static final String CONTENT = "content";
private final Map<String, String> message = new HashMap<>();
public PromptMessage() {
}
public PromptMessage(Role role, String content) {
addRole(role);
addContent(content);
}
public PromptMessage addRole(Role role) {
return this.addParameter(ROLE, role.type());
}
public PromptMessage addContent(String content) {
return this.addParameter(CONTENT, content);
}
public PromptMessage addParameter(String key, String value) {
this.message.put(key, value);
return this;
}
public String content() {
return this.parameter(CONTENT);
}
public Role role() {
String role = this.parameter(ROLE);
return Role.from(role);
}
public String parameter(String key) {
return this.message.get(key);
}
Map<String, String> toMap() {
return Collections.unmodifiableMap(this.message);
}
}
public enum Role {
SYSTEM("system"), ASSISTANT("assistant"), USER("user");
private final String type;
String type() {
return this.type;
}
static Role from(String type) {
if (type == null) {
return null;
}
switch (type) {
case "system": return SYSTEM;
case "assistant": return ASSISTANT;
case "user": return USER;
default: throw new IllegalArgumentException(
String.format("You passed %s type but only %s are supported",
type, Arrays.toString(Role.values())
)
);
}
}
Role(String type) {
this.type = type;
}
@Override
public String toString() {
return type();
}
}
/**
* The result of the completion, similar to OpenAI's format.
*/
public static class CompletionReturn {
private String model;
private Usage usage;
private Choices choices;
public CompletionReturn(String model, Usage usage, Choices choices) {
this.model = model;
this.usage = usage;
this.choices = choices;
}
public Choices choices() {
return choices;
}
public String model() {
return model;
}
public Usage usage() {
return usage;
}
}
/**
* The generated completions.
*/
public static class Choices {
private final List<CompletionChoice> choices = new ArrayList<>();
public Choices(List<CompletionChoice> choices) {
this.choices.addAll(choices);
}
public Choices(CompletionChoice...completionChoices){
this.choices.addAll(Arrays.asList(completionChoices));
}
public Choices addCompletionChoice(CompletionChoice completionChoice) {
this.choices.add(completionChoice);
return this;
}
public CompletionChoice first() {
return this.choices.get(0);
}
public int totalChoices() {
return this.choices.size();
}
public CompletionChoice get(int index) {
return this.choices.get(index);
}
public List<CompletionChoice> choices() {
return Collections.unmodifiableList(choices);
}
}
/**
* A completion choice, similar to OpenAI's format.
*/
public static class CompletionChoice extends PromptMessage {
public CompletionChoice(Role role, String content) {
super(role, content);
}
}
public static class ChatCompletionResponse {
public String model;
public Usage usage;
public List<Map<String, String>> choices;
// Getters and setters
}
public static class Usage {
public int promptTokens;
public int completionTokens;
public int totalTokens;
// Getters and setters
}
public CompletionReturn chatCompletionResponse(Messages messages,
GenerationConfig generationConfig) {
return chatCompletion(messages, generationConfig, false, false);
}
/**
* chatCompletion formats the existing chat conversation into a template to be
* easier to process for chat UIs. It is not absolutely necessary as generate method
* may be directly used to make generations with gpt models.
*
* @param messages object to create theMessages to send to GPT model
* @param generationConfig How to decode/process the generation.
* @param streamToStdOut Send tokens as they are calculated Standard output.
* @param outputFullPromptToStdOut Should full prompt built out of messages be sent to Standard output.
* @return CompletionReturn contains stats and generated Text.
*/
public CompletionReturn chatCompletion(Messages messages,
GenerationConfig generationConfig, boolean streamToStdOut,
boolean outputFullPromptToStdOut) {
String fullPrompt = buildPrompt(messages.toListMap());
if(outputFullPromptToStdOut)
System.out.print(fullPrompt);
String generatedText = generate(fullPrompt, generationConfig, streamToStdOut);
final CompletionChoice promptMessage = new CompletionChoice(Role.ASSISTANT, generatedText);
final Choices choices = new Choices(promptMessage);
final Usage usage = getUsage(fullPrompt, generatedText);
return new CompletionReturn(this.modelName, usage, choices);
}
public ChatCompletionResponse chatCompletion(List<Map<String, String>> messages,
GenerationConfig generationConfig) {
return chatCompletion(messages, generationConfig, false, false);
}
/**
* chatCompletion formats the existing chat conversation into a template to be
* easier to process for chat UIs. It is not absolutely necessary as generate method
* may be directly used to make generations with gpt models.
*
* @param messages List of Maps "role"-&gt;"user", "content"-&gt;"...", "role"-&gt; "assistant"-&gt;"..."
* @param generationConfig How to decode/process the generation.
* @param streamToStdOut Send tokens as they are calculated Standard output.
* @param outputFullPromptToStdOut Should full prompt built out of messages be sent to Standard output.
* @return ChatCompletionResponse contains stats and generated Text.
*/
public ChatCompletionResponse chatCompletion(List<Map<String, String>> messages,
GenerationConfig generationConfig, boolean streamToStdOut,
boolean outputFullPromptToStdOut) {
String fullPrompt = buildPrompt(messages);
if(outputFullPromptToStdOut)
System.out.print(fullPrompt);
String generatedText = generate(fullPrompt, generationConfig, streamToStdOut);
ChatCompletionResponse response = new ChatCompletionResponse();
response.model = this.modelName;
response.usage = getUsage(fullPrompt, generatedText);
Map<String, String> message = new HashMap<>();
message.put("role", "assistant");
message.put("content", generatedText);
response.choices = List.of(message);
return response;
}
private Usage getUsage(String fullPrompt, String generatedText) {
Usage usage = new Usage();
usage.promptTokens = fullPrompt.length();
usage.completionTokens = generatedText.length();
usage.totalTokens = fullPrompt.length() + generatedText.length();
return usage;
}
protected static String buildPrompt(List<Map<String, String>> messages) {
StringBuilder fullPrompt = new StringBuilder();
for (Map<String, String> message : messages) {
if ("system".equals(message.get("role"))) {
String systemMessage = message.get("content") + "\n";
fullPrompt.append(systemMessage);
}
}
fullPrompt.append("### Instruction: \n" +
"The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.\n" +
"### Prompt: ");
for (Map<String, String> message : messages) {
if ("user".equals(message.get("role"))) {
String userMessage = "\n" + message.get("content");
fullPrompt.append(userMessage);
}
if ("assistant".equals(message.get("role"))) {
String assistantMessage = "\n### Response: " + message.get("content");
fullPrompt.append(assistantMessage);
}
}
fullPrompt.append("\n### Response:");
return fullPrompt.toString();
}
@Override
public void close() throws Exception {
library.llmodel_model_destroy(model);
}
}

View File

@ -1,81 +0,0 @@
package com.hexadevlabs.gpt4all;
import jnr.ffi.Pointer;
import jnr.ffi.byref.PointerByReference;
import jnr.ffi.Struct;
import jnr.ffi.annotations.Delegate;
import jnr.ffi.annotations.Encoding;
import jnr.ffi.annotations.In;
import jnr.ffi.annotations.Out;
import jnr.ffi.types.u_int64_t;
/**
* The basic Native library interface the provides all the LLM functions.
*/
public interface LLModelLibrary {
interface PromptCallback {
@Delegate
boolean invoke(int token_id);
}
interface ResponseCallback {
@Delegate
boolean invoke(int token_id, Pointer response);
}
interface RecalculateCallback {
@Delegate
boolean invoke(boolean is_recalculating);
}
class LLModelError extends Struct {
public final Struct.AsciiStringRef message = new Struct.AsciiStringRef();
public final int32_t status = new int32_t();
public LLModelError(jnr.ffi.Runtime runtime) {
super(runtime);
}
}
class LLModelPromptContext extends Struct {
public final Pointer logits = new Pointer();
public final ssize_t logits_size = new ssize_t();
public final Pointer tokens = new Pointer();
public final ssize_t tokens_size = new ssize_t();
public final int32_t n_past = new int32_t();
public final int32_t n_ctx = new int32_t();
public final int32_t n_predict = new int32_t();
public final int32_t top_k = new int32_t();
public final Float top_p = new Float();
public final Float min_p = new Float();
public final Float temp = new Float();
public final int32_t n_batch = new int32_t();
public final Float repeat_penalty = new Float();
public final int32_t repeat_last_n = new int32_t();
public final Float context_erase = new Float();
public LLModelPromptContext(jnr.ffi.Runtime runtime) {
super(runtime);
}
}
Pointer llmodel_model_create2(String model_path, String build_variant, PointerByReference error);
void llmodel_model_destroy(Pointer model);
boolean llmodel_loadModel(Pointer model, String model_path, int n_ctx, int ngl);
boolean llmodel_isModelLoaded(Pointer model);
@u_int64_t long llmodel_get_state_size(Pointer model);
@u_int64_t long llmodel_save_state_data(Pointer model, Pointer dest);
@u_int64_t long llmodel_restore_state_data(Pointer model, Pointer src);
void llmodel_set_implementation_search_path(String path);
// ctx was an @Out ... without @Out crash
void llmodel_prompt(Pointer model, @Encoding("UTF-8") String prompt,
PromptCallback prompt_callback,
ResponseCallback response_callback,
RecalculateCallback recalculate_callback,
@In LLModelPromptContext ctx);
void llmodel_setThreadCount(Pointer model, int n_threads);
int llmodel_threadCount(Pointer model);
}

View File

@ -1,7 +0,0 @@
package com.hexadevlabs.gpt4all;
public class PromptIsTooLongException extends RuntimeException {
public PromptIsTooLongException(String message) {
super(message);
}
}

View File

@ -1,160 +0,0 @@
package com.hexadevlabs.gpt4all;
import jnr.ffi.LibraryLoader;
import jnr.ffi.LibraryOption;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class Util {
private static final Logger logger = LoggerFactory.getLogger(Util.class);
private static final CharsetDecoder cs = StandardCharsets.UTF_8.newDecoder();
public static LLModelLibrary loadSharedLibrary(String librarySearchPath){
String libraryName = "llmodel";
Map<LibraryOption, Object> libraryOptions = new HashMap<>();
libraryOptions.put(LibraryOption.LoadNow, true); // load immediately instead of lazily (ie on first use)
libraryOptions.put(LibraryOption.IgnoreError, false); // calls shouldn't save last errno after call
if(librarySearchPath!=null) {
Map<String, List<String>> searchPaths = new HashMap<>();
searchPaths.put(libraryName, List.of(librarySearchPath));
return LibraryLoader.loadLibrary(LLModelLibrary.class,
libraryOptions,
searchPaths,
libraryName
);
}else {
return LibraryLoader.loadLibrary(LLModelLibrary.class,
libraryOptions,
libraryName
);
}
}
/**
* Copy over shared library files from resource package to
* target Temp directory.
*
* @return Path path to the temp directory holding the shared libraries
*/
public static Path copySharedLibraries() {
try {
// Identify the OS and architecture
String osName = System.getProperty("os.name").toLowerCase();
boolean isWindows = osName.startsWith("windows");
boolean isMac = osName.startsWith("mac os x");
boolean isLinux = osName.startsWith("linux");
if(isWindows) osName = "windows";
if(isMac) osName = "macos";
if(isLinux) osName = "linux";
//String osArch = System.getProperty("os.arch");
// Create a temporary directory
Path tempDirectory = Files.createTempDirectory("nativeLibraries");
tempDirectory.toFile().deleteOnExit();
String[] libraryNames = {
"gptj-default",
"gptj-avxonly",
"llmodel",
"mpt-default",
"llamamodel-230511-default",
"llamamodel-230519-default",
"llamamodel-mainline-default",
"llamamodel-mainline-metal",
"replit-mainline-default",
"replit-mainline-metal",
"ggml-metal.metal",
"falcon-default"
};
for (String libraryName : libraryNames) {
if(!isMac && (
libraryName.equals("replit-mainline-metal")
|| libraryName.equals("llamamodel-mainline-metal")
|| libraryName.equals("ggml-metal.metal"))
) continue;
if(isWindows){
libraryName = libraryName + ".dll";
} else if(isMac){
if(!libraryName.equals("ggml-metal.metal"))
libraryName = "lib" + libraryName + ".dylib";
} else if(isLinux) {
libraryName = "lib"+ libraryName + ".so";
}
// Construct the resource path based on the OS and architecture
String nativeLibraryPath = "/native/" + osName + "/" + libraryName;
// Get the library resource as a stream
InputStream in = Util.class.getResourceAsStream(nativeLibraryPath);
if (in == null) {
throw new RuntimeException("Unable to find native library: " + nativeLibraryPath);
}
// Create a file in the temporary directory with the original library name
Path tempLibraryPath = tempDirectory.resolve(libraryName);
// Use Files.copy to copy the library to the temporary file
Files.copy(in, tempLibraryPath, StandardCopyOption.REPLACE_EXISTING);
// Close the input stream
in.close();
}
// Add shutdown hook to delete tempDir on JVM exit
// On Windows deleting dll files that are loaded into memory is not possible.
if(!isWindows) {
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
try {
Files.walk(tempDirectory)
.sorted(Comparator.reverseOrder())
.map(Path::toFile)
.forEach(file -> {
try {
Files.delete(file.toPath());
} catch (IOException e) {
logger.error("Deleting temp library file", e);
}
});
} catch (IOException e) {
logger.error("Deleting temp directory for libraries", e);
}
}));
}
return tempDirectory;
} catch (IOException e) {
throw new RuntimeException("Failed to load native libraries", e);
}
}
public static String getValidUtf8(byte[] bytes) {
try {
return cs.decode(ByteBuffer.wrap(bytes)).toString();
} catch (CharacterCodingException e) {
return null;
}
}
}

View File

@ -1,182 +0,0 @@
package com.hexadevlabs.gpt4all;
import jnr.ffi.Memory;
import jnr.ffi.Pointer;
import jnr.ffi.Runtime;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mockito;
import org.mockito.junit.jupiter.MockitoExtension;
import java.io.ByteArrayOutputStream;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.*;
/**
* These tests only test the Java implementation as the underlying backend can't be mocked.
* These tests do serve the purpose of validating the java bits that do
* not directly have to do with the function of the underlying gp4all library.
*/
@ExtendWith(MockitoExtension.class)
public class BasicTests {
@Test
public void simplePromptWithObject(){
LLModel model = Mockito.spy(new LLModel());
LLModel.GenerationConfig config =
LLModel.config()
.withNPredict(20)
.build();
// The generate method will return "4"
doReturn("4").when( model ).generate(anyString(), eq(config), eq(true));
LLModel.PromptMessage promptMessage1 = new LLModel.PromptMessage(LLModel.Role.SYSTEM, "You are a helpful assistant");
LLModel.PromptMessage promptMessage2 = new LLModel.PromptMessage(LLModel.Role.USER, "Add 2+2");
LLModel.Messages messages = new LLModel.Messages(promptMessage1, promptMessage2);
LLModel.CompletionReturn response = model.chatCompletion(
messages, config, true, true);
assertTrue( response.choices().first().content().contains("4") );
// Verifies the prompt and response are certain length.
assertEquals( 224 , response.usage().totalTokens );
}
@Test
public void simplePrompt(){
LLModel model = Mockito.spy(new LLModel());
LLModel.GenerationConfig config =
LLModel.config()
.withNPredict(20)
.build();
// The generate method will return "4"
doReturn("4").when( model ).generate(anyString(), eq(config), eq(true));
LLModel.ChatCompletionResponse response= model.chatCompletion(
List.of(Map.of("role", "system", "content", "You are a helpful assistant"),
Map.of("role", "user", "content", "Add 2+2")), config, true, true);
assertTrue( response.choices.get(0).get("content").contains("4") );
// Verifies the prompt and response are certain length.
assertEquals( 224 , response.usage.totalTokens );
}
@Test
public void testResponseCallback(){
ByteArrayOutputStream bufferingForStdOutStream = new ByteArrayOutputStream();
ByteArrayOutputStream bufferingForWholeGeneration = new ByteArrayOutputStream();
LLModelLibrary.ResponseCallback responseCallback = LLModel.getResponseCallback(false, bufferingForStdOutStream, bufferingForWholeGeneration);
// Get the runtime instance
Runtime runtime = Runtime.getSystemRuntime();
// Allocate memory for the byte array. Has to be null terminated
// UTF-8 Encoding of the character: 0xF0 0x9F 0x92 0xA9
byte[] utf8ByteArray = {(byte) 0xF0, (byte) 0x9F, (byte) 0x92, (byte) 0xA9, 0x00}; // Adding null termination
// Optional: Converting the byte array back to a String to print the character
String decodedString = new String(utf8ByteArray, 0, utf8ByteArray.length - 1, java.nio.charset.StandardCharsets.UTF_8);
Pointer pointer = Memory.allocateDirect(runtime, utf8ByteArray.length);
// Copy the byte array to the allocated memory
pointer.put(0, utf8ByteArray, 0, utf8ByteArray.length);
responseCallback.invoke(1, pointer);
String result = bufferingForWholeGeneration.toString(StandardCharsets.UTF_8);
assertEquals(decodedString, result);
}
@Test
public void testResponseCallbackTwoTokens(){
ByteArrayOutputStream bufferingForStdOutStream = new ByteArrayOutputStream();
ByteArrayOutputStream bufferingForWholeGeneration = new ByteArrayOutputStream();
LLModelLibrary.ResponseCallback responseCallback = LLModel.getResponseCallback(false, bufferingForStdOutStream, bufferingForWholeGeneration);
// Get the runtime instance
Runtime runtime = Runtime.getSystemRuntime();
// Allocate memory for the byte array. Has to be null terminated
// UTF-8 Encoding of the character: 0xF0 0x9F 0x92 0xA9
byte[] utf8ByteArray = { (byte) 0xF0, (byte) 0x9F, 0x00}; // Adding null termination
byte[] utf8ByteArray2 = { (byte) 0x92, (byte) 0xA9, 0x00}; // Adding null termination
// Optional: Converting the byte array back to a String to print the character
Pointer pointer = Memory.allocateDirect(runtime, utf8ByteArray.length);
// Copy the byte array to the allocated memory
pointer.put(0, utf8ByteArray, 0, utf8ByteArray.length);
responseCallback.invoke(1, pointer);
// Copy the byte array to the allocated memory
pointer.put(0, utf8ByteArray2, 0, utf8ByteArray2.length);
responseCallback.invoke(2, pointer);
String result = bufferingForWholeGeneration.toString(StandardCharsets.UTF_8);
assertEquals("\uD83D\uDCA9", result);
}
@Test
public void testResponseCallbackExpectError(){
ByteArrayOutputStream bufferingForStdOutStream = new ByteArrayOutputStream();
ByteArrayOutputStream bufferingForWholeGeneration = new ByteArrayOutputStream();
LLModelLibrary.ResponseCallback responseCallback = LLModel.getResponseCallback(false, bufferingForStdOutStream, bufferingForWholeGeneration);
// Get the runtime instance
Runtime runtime = Runtime.getSystemRuntime();
// UTF-8 Encoding of the character: 0xF0 0x9F 0x92 0xA9
byte[] utf8ByteArray = {(byte) 0xF0, (byte) 0x9F, (byte) 0x92, (byte) 0xA9}; // No null termination
Pointer pointer = Memory.allocateDirect(runtime, utf8ByteArray.length);
// Copy the byte array to the allocated memory
pointer.put(0, utf8ByteArray, 0, utf8ByteArray.length);
Exception exception = assertThrows(RuntimeException.class, () -> responseCallback.invoke(1, pointer));
assertEquals("Empty array or not null terminated", exception.getMessage());
// With empty array
utf8ByteArray = new byte[0];
pointer.put(0, utf8ByteArray, 0, utf8ByteArray.length);
Exception exceptionN = assertThrows(RuntimeException.class, () -> responseCallback.invoke(1, pointer));
assertEquals("Empty array or not null terminated", exceptionN.getMessage());
}
}

View File

@ -1,30 +0,0 @@
package com.hexadevlabs.gpt4all;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
/**
* GPTJ chat completion, multiple messages
*/
public class Example1 {
public static void main(String[] args) {
// Optionally in case override to location of shared libraries is necessary
//LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\";
try ( LLModel gptjModel = new LLModel(Path.of("C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\ggml-gpt4all-j-v1.3-groovy.bin")) ){
LLModel.GenerationConfig config = LLModel.config()
.withNPredict(4096).build();
gptjModel.chatCompletion(
List.of(Map.of("role", "user", "content", "Add 2+2"),
Map.of("role", "assistant", "content", "4"),
Map.of("role", "user", "content", "Multiply 4 * 5")), config, true, true);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}

View File

@ -1,31 +0,0 @@
package com.hexadevlabs.gpt4all;
import java.nio.file.Path;
/**
* Generation with MPT model
*/
public class Example2 {
public static void main(String[] args) {
String prompt = "### Human:\nWhat is the meaning of life\n### Assistant:";
// Optionally in case override to location of shared libraries is necessary
//LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\";
try (LLModel mptModel = new LLModel(Path.of("C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\ggml-mpt-7b-instruct.bin"))) {
LLModel.GenerationConfig config =
LLModel.config()
.withNPredict(4096)
.withRepeatLastN(64)
.build();
mptModel.generate(prompt, config, true);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}

View File

@ -1,33 +0,0 @@
package com.hexadevlabs.gpt4all;
import jnr.ffi.LibraryLoader;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
/**
* GPTJ chat completion with system message
*/
public class Example3 {
public static void main(String[] args) {
// Optionally in case override to location of shared libraries is necessary
//LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\";
try ( LLModel gptjModel = new LLModel(Path.of("C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\ggml-gpt4all-j-v1.3-groovy.bin")) ){
LLModel.GenerationConfig config = LLModel.config()
.withNPredict(4096).build();
// String result = gptjModel.generate(prompt, config, true);
gptjModel.chatCompletion(
List.of(Map.of("role", "system", "content", "You are a helpful assistant"),
Map.of("role", "user", "content", "Add 2+2")), config, true, true);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}

View File

@ -1,43 +0,0 @@
package com.hexadevlabs.gpt4all;
import java.nio.file.Path;
public class Example4 {
public static void main(String[] args) {
String prompt = "### Human:\nWhat is the meaning of life\n### Assistant:";
// The emoji is poop emoji. The Unicode character is encoded as surrogate pair for Java string.
// LLM should correctly identify it as poop emoji in the description
//String prompt = "### Human:\nDescribe the meaning of this emoji \uD83D\uDCA9\n### Assistant:";
//String prompt = "### Human:\nOutput the unicode character of smiley face emoji\n### Assistant:";
// Optionally in case override to location of shared libraries is necessary
//LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\";
String model = "ggml-vicuna-7b-1.1-q4_2.bin";
//String model = "ggml-gpt4all-j-v1.3-groovy.bin";
//String model = "ggml-mpt-7b-instruct.bin";
String basePath = "C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\";
//String basePath = "/Users/fzaslavs/Library/Application Support/nomic.ai/GPT4All/";
try (LLModel mptModel = new LLModel(Path.of(basePath + model))) {
LLModel.GenerationConfig config =
LLModel.config()
.withNPredict(4096)
.withRepeatLastN(64)
.build();
String result = mptModel.generate(prompt, config, true);
System.out.println("Code points:");
result.codePoints().forEach(System.out::println);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}

View File

@ -1,47 +0,0 @@
package com.hexadevlabs.gpt4all;
import java.nio.file.Path;
public class Example5 {
public static void main(String[] args) {
// String prompt = "### Human:\nWhat is the meaning of life\n### Assistant:";
// The emoji is poop emoji. The Unicode character is encoded as surrogate pair for Java string.
// LLM should correctly identify it as poop emoji in the description
//String prompt = "### Human:\nDescribe the meaning of this emoji \uD83D\uDCA9\n### Assistant:";
//String prompt = "### Human:\nOutput the unicode character of smiley face emoji\n### Assistant:";
// Optionally in case override to location of shared libraries is necessary
//LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\";
StringBuffer b = new StringBuffer();
b.append("The ".repeat(2060));
String prompt = b.toString();
String model = "ggml-vicuna-7b-1.1-q4_2.bin";
//String model = "ggml-gpt4all-j-v1.3-groovy.bin";
//String model = "ggml-mpt-7b-instruct.bin";
String basePath = "C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\";
//String basePath = "/Users/fzaslavs/Library/Application Support/nomic.ai/GPT4All/";
try (LLModel mptModel = new LLModel(Path.of(basePath + model))) {
LLModel.GenerationConfig config =
LLModel.config()
.withNPredict(4096)
.withRepeatLastN(64)
.build();
String result = mptModel.generate(prompt, config, true);
System.out.println("Code points:");
result.codePoints().forEach(System.out::println);
} catch (Exception e) {
System.out.println(e.getMessage());
throw new RuntimeException(e);
}
}
}