diff --git a/.circleci/config.yml b/.circleci/config.yml index 2afc1d66..26bcbd6e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,6 +15,5 @@ workflows: gpt4all-backend/.* run-all-workflows true gpt4all-bindings/python/.* run-python-workflow true gpt4all-bindings/typescript/.* run-ts-workflow true - gpt4all-bindings/csharp/.* run-csharp-workflow true gpt4all-chat/.* run-chat-workflow true .* run-default-workflow true diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 9e8ce63e..c174ccfd 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -20,9 +20,6 @@ parameters: run-ts-workflow: type: boolean default: false - run-csharp-workflow: - type: boolean - default: false jobs: default-job: @@ -620,57 +617,6 @@ jobs: - runtimes/osx-x64/*.metal build-bindings-backend-windows: - executor: - name: win/default - size: large - shell: powershell.exe -ExecutionPolicy Bypass - steps: - - checkout - - run: - name: Update Submodules - command: | - git submodule sync - git submodule update --init --recursive - - run: - name: Install MinGW64 - command: choco install -y mingw --force --no-progress - - run: - name: Install VulkanSDK - command: | - Invoke-WebRequest -Uri https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe -OutFile VulkanSDK-1.3.261.1-Installer.exe - .\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install - - run: - name: Install CUDA Toolkit - command: | - Invoke-WebRequest -Uri https://developer.download.nvidia.com/compute/cuda/12.4.1/network_installers/cuda_12.4.1_windows_network.exe -OutFile cuda_12.4.1_windows_network.exe - .\cuda_12.4.1_windows_network.exe -s cudart_12.4 nvcc_12.4 cublas_12.4 cublas_dev_12.4 - - run: - name: Install dependencies - command: | - choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System' - - run: - name: Build Libraries - command: | - $MinGWBin = "C:\ProgramData\mingw64\mingw64\bin" - $Env:Path += ";$MinGwBin" - $Env:Path += ";C:\Program Files\CMake\bin" - $Env:Path += ";C:\VulkanSDK\1.3.261.1\bin" - $Env:VULKAN_SDK = "C:\VulkanSDK\1.3.261.1" - cd gpt4all-backend - mkdir runtimes/win-x64 - cd runtimes/win-x64 - cmake -G "MinGW Makefiles" -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON ../.. - cmake --build . --parallel --config Release - cp "$MinGWBin\libgcc*.dll" . - cp "$MinGWBin\libstdc++*.dll" . - cp "$MinGWBin\libwinpthread*.dll" . - cp bin/*.dll . - - persist_to_workspace: - root: gpt4all-backend - paths: - - runtimes/win-x64/*.dll - - build-bindings-backend-windows-msvc: machine: image: 'windows-server-2022-gui:2023.03.1' resource_class: windows.large @@ -713,182 +659,6 @@ jobs: paths: - runtimes/win-x64_msvc/*.dll - build-csharp-linux: - docker: - - image: mcr.microsoft.com/dotnet/sdk:8.0 - steps: - - checkout - - attach_workspace: - at: /tmp/workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes/linux-x64/native - cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ - ls -R runtimes - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-nix - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet restore Gpt4All - - save_cache: - paths: - - ~/.nuget/packages - key: gpt4all-csharp-nuget-packages-nix - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - export PATH="$PATH:$HOME/.dotnet/tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults - - build-csharp-windows: - executor: - name: win/default - size: large - shell: powershell.exe -ExecutionPolicy Bypass - steps: - - checkout - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-win - - attach_workspace: - at: C:\Users\circleci\workspace - - run: - name: "Install .NET" - command: | - choco install -y dotnet-8.0-sdk - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes\win-x64\native - cp C:\Users\circleci\workspace\runtimes\win-x64\*.dll runtimes\win-x64\native\ - ls -R runtimes - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet.exe restore Gpt4All - - save_cache: - paths: - - C:\Users\circleci\.nuget\packages - key: gpt4all-csharp-nuget-packages-win - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet.exe build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults - - build-csharp-macos: - macos: - xcode: "14.0.0" - steps: - - checkout - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-nix - - run: - name: Install dependencies - command: | - brew tap isen-ng/dotnet-sdk-versions - brew install --cask dotnet-sdk8-0-100 - - attach_workspace: - at: /tmp/workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes/osx/native - cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/ - cp /tmp/workspace/runtimes/osx-x64/*.metal runtimes/osx/native/ - ls -R runtimes - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet restore Gpt4All - - save_cache: - paths: - - ~/.nuget/packages - key: gpt4all-csharp-nuget-packages-nix - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - export PATH="$PATH:$HOME/.dotnet/tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults - - store-and-upload-nupkgs: - docker: - - image: mcr.microsoft.com/dotnet/sdk:8.0 - steps: - - attach_workspace: - at: /tmp/workspace - - checkout - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-nix - - run: - name: NuGet Pack - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes/linux-x64/native - cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ - mkdir -p runtimes/win-x64/native - cp /tmp/workspace/runtimes/win-x64/*.dll runtimes/win-x64/native/ - #mkdir -p runtimes/osx/native - #cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/ - #cp /tmp/workspace/runtimes/osx-x64/*.metal runtimes/osx/native/ - dotnet pack ./Gpt4All/Gpt4All.csproj -p:IncludeSymbols=true -p:SymbolPackageFormat=snupkg -c Release - dotnet nuget push ./Gpt4All/bin/Release/Gpt4All.*.nupkg -s $NUGET_URL -k $NUGET_TOKEN --skip-duplicate - - store_artifacts: - path: gpt4all-bindings/csharp/Gpt4All/bin/Release - build-nodejs-linux: docker: - image: cimg/base:stable @@ -1153,13 +923,10 @@ workflows: or: - << pipeline.parameters.run-all-workflows >> - << pipeline.parameters.run-python-workflow >> - - << pipeline.parameters.run-csharp-workflow >> - << pipeline.parameters.run-ts-workflow >> jobs: - hold: type: approval - - csharp-hold: - type: approval - nuget-hold: type: approval - nodejs-hold: @@ -1184,12 +951,6 @@ workflows: only: requires: - hold - - build-bindings-backend-windows-msvc: - filters: - branches: - only: - requires: - - hold # NodeJs Jobs - prepare-npm-pkg: @@ -1214,7 +975,7 @@ workflows: only: requires: - nodejs-hold - - build-bindings-backend-windows-msvc + - build-bindings-backend-windows - build-nodejs-macos: filters: branches: @@ -1222,36 +983,3 @@ workflows: requires: - nodejs-hold - build-bindings-backend-macos - - - # CSharp Jobs - - build-csharp-linux: - filters: - branches: - only: - requires: - - csharp-hold - - build-bindings-backend-linux - - build-csharp-windows: - filters: - branches: - only: - requires: - - csharp-hold - - build-bindings-backend-windows - - build-csharp-macos: - filters: - branches: - only: - requires: - - csharp-hold - - build-bindings-backend-macos - - store-and-upload-nupkgs: - filters: - branches: - only: - requires: - - nuget-hold - - build-csharp-windows - - build-csharp-linux - #- build-csharp-macos diff --git a/gpt4all-bindings/README.md b/gpt4all-bindings/README.md index 1969027e..722159fd 100644 --- a/gpt4all-bindings/README.md +++ b/gpt4all-bindings/README.md @@ -1,3 +1,21 @@ -# GPT4All Bindings -This directory will contain language specific bindings on top of the C/C++ model backends. -We will have one directory per language binding (e.g. Python, Typescript, Golang, etc.). \ No newline at end of file +# GPT4All Language Bindings +These are the language bindings for the GPT4All backend. They provide functionality to load GPT4All models (and other llama.cpp models), generate text, and (in the case of the Python bindings) embed text as a vector representation. + +See their respective folders for language-specific documentation. + +### Languages +- [Python](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python) (Nomic official, maintained by [@cebtenzzre](https://github.com/cebtenzzre)) +- [Node.js/Typescript](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/typescript) (community, maintained by [@jacoobes](https://github.com/jacoobes) and [@iimez](https://github.com/iimez)) + +
+
+ +
Archived Bindings +
+ +The following bindings have been removed from this repository due to lack of maintenance. If adopted, they can be brought back—feel free to message a developer on Dicsord if you are interested in maintaining one of them. Below are links to their last available version (not necessarily the last working version). +- C#: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/csharp) +- Java: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/java) +- Go: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/golang) + +
diff --git a/gpt4all-bindings/csharp/.editorconfig b/gpt4all-bindings/csharp/.editorconfig deleted file mode 100644 index 919f066e..00000000 --- a/gpt4all-bindings/csharp/.editorconfig +++ /dev/null @@ -1,348 +0,0 @@ -# EditorConfig is awesome: https://EditorConfig.org - -# top-most EditorConfig file -root = true - -# Don't use tabs for indentation. -[*] -indent_style = space -# (Please don't specify an indent_size here; that has too many unintended consequences.) - -# Code files -[*.{cs,csx,vb,vbx}] -indent_size = 4 -insert_final_newline = true -charset = utf-8-bom - -# XML project files -[*.{csproj,vbproj,vcxproj,vcxproj.filters,proj,projitems,shproj}] -indent_size = 4 - -# XML config files -[*.{props,targets,ruleset,config,nuspec,resx,vsixmanifest,vsct}] -indent_size = 2 - -# JSON files -[*.json] -indent_size = 2 - -# Powershell files -[*.ps1] -indent_size = 2 - -# Shell script files -[*.sh] -end_of_line = lf -indent_size = 2 -insert_final_newline = true - -# Dotnet code style settings: -[*.{cs,vb}] - -# IDE0055: Fix formatting -dotnet_diagnostic.IDE0055.severity = error -dotnet_diagnostic.CS1573.severity = suggestion -dotnet_diagnostic.CS1591.severity = suggestion - -# Sort using and Import directives with System.* appearing first -dotnet_sort_system_directives_first = true -dotnet_separate_import_directive_groups = false - -# Avoid "this." and "Me." if not necessary -dotnet_style_qualification_for_field = false:suggestion -dotnet_style_qualification_for_property = false:suggestion -dotnet_style_qualification_for_method = false:suggestion -dotnet_style_qualification_for_event = false:suggestion - -# Use language keywords instead of framework type names for type references -dotnet_style_predefined_type_for_locals_parameters_members = true:warning -dotnet_style_predefined_type_for_member_access = true:warning - -# Suggest more modern language features when available -dotnet_style_object_initializer = true:suggestion -dotnet_style_collection_initializer = true:suggestion -dotnet_style_coalesce_expression = true:suggestion -dotnet_style_null_propagation = true:suggestion -dotnet_style_explicit_tuple_names = true:suggestion - -# Whitespace options -dotnet_style_allow_multiple_blank_lines_experimental = false - -# Private fields are camelCase with '_' prefix -dotnet_naming_rule.private_members_with_underscore.symbols = private_fields -dotnet_naming_rule.private_members_with_underscore.style = prefix_underscore -dotnet_naming_rule.private_members_with_underscore.severity = error -dotnet_naming_symbols.private_fields.applicable_kinds = field -dotnet_naming_symbols.private_fields.applicable_accessibilities = private -dotnet_naming_style.prefix_underscore.capitalization = camel_case -dotnet_naming_style.prefix_underscore.required_prefix = _ - -# Non-private static fields are PascalCase -dotnet_naming_rule.non_private_static_fields_should_be_pascal_case.severity = suggestion -dotnet_naming_rule.non_private_static_fields_should_be_pascal_case.symbols = non_private_static_fields -dotnet_naming_rule.non_private_static_fields_should_be_pascal_case.style = non_private_static_field_style - -dotnet_naming_symbols.non_private_static_fields.applicable_kinds = field -dotnet_naming_symbols.non_private_static_fields.applicable_accessibilities = public, protected, internal, protected_internal, private_protected -dotnet_naming_symbols.non_private_static_fields.required_modifiers = static - -dotnet_naming_style.non_private_static_field_style.capitalization = pascal_case - -# Non-private readonly fields are PascalCase -dotnet_naming_rule.non_private_readonly_fields_should_be_pascal_case.severity = suggestion -dotnet_naming_rule.non_private_readonly_fields_should_be_pascal_case.symbols = non_private_readonly_fields -dotnet_naming_rule.non_private_readonly_fields_should_be_pascal_case.style = non_private_static_field_style - -dotnet_naming_symbols.non_private_readonly_fields.applicable_kinds = field -dotnet_naming_symbols.non_private_readonly_fields.applicable_accessibilities = public, protected, internal, protected_internal, private_protected -dotnet_naming_symbols.non_private_readonly_fields.required_modifiers = readonly - -dotnet_naming_style.non_private_readonly_field_style.capitalization = pascal_case - -# Constants are PascalCase -dotnet_naming_rule.constants_should_be_pascal_case.severity = suggestion -dotnet_naming_rule.constants_should_be_pascal_case.symbols = constants -dotnet_naming_rule.constants_should_be_pascal_case.style = non_private_static_field_style - -dotnet_naming_symbols.constants.applicable_kinds = field, local -dotnet_naming_symbols.constants.required_modifiers = const - -dotnet_naming_style.constant_style.capitalization = pascal_case - -# Static fields are camelCase and start with s_ -dotnet_naming_rule.static_fields_should_be_camel_case.severity = none -dotnet_naming_rule.static_fields_should_be_camel_case.symbols = static_fields -dotnet_naming_rule.static_fields_should_be_camel_case.style = static_field_style - -dotnet_naming_symbols.static_fields.applicable_kinds = field -dotnet_naming_symbols.static_fields.required_modifiers = static - -dotnet_naming_style.static_field_style.capitalization = camel_case -dotnet_naming_style.static_field_style.required_prefix = s_ - -# Instance fields are camelCase and start with _ -dotnet_naming_rule.instance_fields_should_be_camel_case.severity = none -dotnet_naming_rule.instance_fields_should_be_camel_case.symbols = instance_fields -dotnet_naming_rule.instance_fields_should_be_camel_case.style = instance_field_style - -dotnet_naming_symbols.instance_fields.applicable_kinds = field - -dotnet_naming_style.instance_field_style.capitalization = camel_case -dotnet_naming_style.instance_field_style.required_prefix = _ - -# Locals and parameters are camelCase -dotnet_naming_rule.locals_should_be_camel_case.severity = suggestion -dotnet_naming_rule.locals_should_be_camel_case.symbols = locals_and_parameters -dotnet_naming_rule.locals_should_be_camel_case.style = camel_case_style - -dotnet_naming_symbols.locals_and_parameters.applicable_kinds = parameter, local - -dotnet_naming_style.camel_case_style.capitalization = camel_case - -# Local functions are PascalCase -dotnet_naming_rule.local_functions_should_be_pascal_case.severity = suggestion -dotnet_naming_rule.local_functions_should_be_pascal_case.symbols = local_functions -dotnet_naming_rule.local_functions_should_be_pascal_case.style = non_private_static_field_style - -dotnet_naming_symbols.local_functions.applicable_kinds = local_function - -dotnet_naming_style.local_function_style.capitalization = pascal_case - -# By default, name items with PascalCase -dotnet_naming_rule.members_should_be_pascal_case.severity = suggestion -dotnet_naming_rule.members_should_be_pascal_case.symbols = all_members -dotnet_naming_rule.members_should_be_pascal_case.style = non_private_static_field_style - -dotnet_naming_symbols.all_members.applicable_kinds = * - -dotnet_naming_style.pascal_case_style.capitalization = pascal_case - -# error RS2008: Enable analyzer release tracking for the analyzer project containing rule '{0}' -dotnet_diagnostic.RS2008.severity = none - -# IDE0073: File header -dotnet_diagnostic.IDE0073.severity = none -#file_header_template = Licensed to the .NET Foundation under one or more agreements.\nThe .NET Foundation licenses this file to you under the MIT license.\nSee the LICENSE file in the project root for more information. - -# IDE0035: Remove unreachable code -dotnet_diagnostic.IDE0035.severity = warning - -# IDE0036: Order modifiers -dotnet_diagnostic.IDE0036.severity = warning - -# IDE0043: Format string contains invalid placeholder -dotnet_diagnostic.IDE0043.severity = warning - -# IDE0044: Make field readonly -dotnet_diagnostic.IDE0044.severity = warning - -# IDE1006: Naming rule violation -#dotnet_diagnostic.IDE1006.severity = none - -# RS0016: Only enable if API files are present -dotnet_public_api_analyzer.require_api_files = true -dotnet_style_operator_placement_when_wrapping = beginning_of_line -tab_width = 4 -end_of_line = crlf -dotnet_style_prefer_is_null_check_over_reference_equality_method = true:suggestion -dotnet_style_prefer_auto_properties = true:silent -dotnet_style_prefer_simplified_boolean_expressions = true:suggestion -dotnet_style_prefer_conditional_expression_over_assignment = true:silent -dotnet_style_prefer_conditional_expression_over_return = true:silent -dotnet_style_prefer_inferred_tuple_names = true:suggestion -dotnet_style_prefer_inferred_anonymous_type_member_names = true:suggestion -dotnet_style_prefer_compound_assignment = true:suggestion -dotnet_style_prefer_simplified_interpolation = true:suggestion -dotnet_style_namespace_match_folder = true:suggestion - -# CSharp code style settings: -[*.cs] -# Newline settings -csharp_new_line_before_open_brace = all -csharp_new_line_before_else = true -csharp_new_line_before_catch = true -csharp_new_line_before_finally = true -csharp_new_line_before_members_in_object_initializers = true -csharp_new_line_before_members_in_anonymous_types = true -csharp_new_line_between_query_expression_clauses = true - -# Indentation preferences -csharp_indent_block_contents = true -csharp_indent_braces = false -csharp_indent_case_contents = true -csharp_indent_case_contents_when_block = true -csharp_indent_switch_labels = true -csharp_indent_labels = flush_left - -# Whitespace options -csharp_style_allow_embedded_statements_on_same_line_experimental = false -csharp_style_allow_blank_lines_between_consecutive_braces_experimental = false -csharp_style_allow_blank_line_after_colon_in_constructor_initializer_experimental = false - -# Prefer "var" everywhere -csharp_style_var_for_built_in_types = true:suggestion -csharp_style_var_when_type_is_apparent = true:suggestion -csharp_style_var_elsewhere = true:suggestion - -# Prefer method-like constructs to have a block body -csharp_style_expression_bodied_methods = false:none -csharp_style_expression_bodied_constructors = false:none -csharp_style_expression_bodied_operators = false:none - -# Prefer property-like constructs to have an expression-body -csharp_style_expression_bodied_properties = true:none -csharp_style_expression_bodied_indexers = true:none -csharp_style_expression_bodied_accessors = true:none - -# Suggest more modern language features when available -csharp_style_pattern_matching_over_is_with_cast_check = true:suggestion -csharp_style_pattern_matching_over_as_with_null_check = true:suggestion -csharp_style_inlined_variable_declaration = true:suggestion -csharp_style_throw_expression = true:suggestion -csharp_style_conditional_delegate_call = true:suggestion - -# Space preferences -csharp_space_after_cast = false -csharp_space_after_colon_in_inheritance_clause = true -csharp_space_after_comma = true -csharp_space_after_dot = false -csharp_space_after_keywords_in_control_flow_statements = true -csharp_space_after_semicolon_in_for_statement = true -csharp_space_around_binary_operators = before_and_after -csharp_space_around_declaration_statements = do_not_ignore -csharp_space_before_colon_in_inheritance_clause = true -csharp_space_before_comma = false -csharp_space_before_dot = false -csharp_space_before_open_square_brackets = false -csharp_space_before_semicolon_in_for_statement = false -csharp_space_between_empty_square_brackets = false -csharp_space_between_method_call_empty_parameter_list_parentheses = false -csharp_space_between_method_call_name_and_opening_parenthesis = false -csharp_space_between_method_call_parameter_list_parentheses = false -csharp_space_between_method_declaration_empty_parameter_list_parentheses = false -csharp_space_between_method_declaration_name_and_open_parenthesis = false -csharp_space_between_method_declaration_parameter_list_parentheses = false -csharp_space_between_parentheses = false -csharp_space_between_square_brackets = false - -# Blocks are allowed -csharp_prefer_braces = true:silent -csharp_preserve_single_line_blocks = true -csharp_preserve_single_line_statements = true - -# Target-type new expressio -csharp_style_implicit_object_creation_when_type_is_apparent = true:suggestion - -# Currently only enabled for C# due to crash in VB analyzer. VB can be enabled once -# https://github.com/dotnet/roslyn/pull/54259 has been published. -dotnet_style_allow_statement_immediately_after_block_experimental = false -dotnet_diagnostic.RCS0003.severity=warning -dotnet_diagnostic.RCS1036.severity=error -dotnet_diagnostic.IDE0005.severity=warning -dotnet_diagnostic.IDE0007.severity=error -csharp_using_directive_placement = outside_namespace:silent -csharp_prefer_simple_using_statement = true:suggestion -csharp_style_namespace_declarations = block_scoped:silent -csharp_style_expression_bodied_lambdas = true:silent -csharp_style_expression_bodied_local_functions = false:silent -csharp_style_prefer_null_check_over_type_check = true:suggestion -dotnet_diagnostic.RCS1075.severity = suggestion - -[src/CodeStyle/**.{cs,vb}] -# warning RS0005: Do not use generic CodeAction.Create to create CodeAction -dotnet_diagnostic.RS0005.severity = none - -[src/{Analyzers,CodeStyle,Features,Workspaces,EditorFeatures,VisualStudio}/**/*.{cs,vb}] - -# IDE0011: Add braces -csharp_prefer_braces = when_multiline:warning -# NOTE: We need the below severity entry for Add Braces due to https://github.com/dotnet/roslyn/issues/44201 -dotnet_diagnostic.IDE0011.severity = warning - -# IDE0040: Add accessibility modifiers -dotnet_diagnostic.IDE0040.severity = warning - -# CONSIDER: Are IDE0051 and IDE0052 too noisy to be warnings for IDE editing scenarios? Should they be made build-only warnings? -# IDE0051: Remove unused private member -dotnet_diagnostic.IDE0051.severity = warning - -# IDE0052: Remove unread private member -dotnet_diagnostic.IDE0052.severity = warning - -# IDE0059: Unnecessary assignment to a value -dotnet_diagnostic.IDE0059.severity = warning - -# IDE0060: Remove unused parameter -dotnet_diagnostic.IDE0060.severity = warning - -# CA1012: Abstract types should not have public constructors -dotnet_diagnostic.CA1012.severity = warning - -# CA1822: Make member static -dotnet_diagnostic.CA1822.severity = warning - -# Prefer "var" everywhere -dotnet_diagnostic.IDE0007.severity = warning -csharp_style_var_for_built_in_types = true:warning -csharp_style_var_when_type_is_apparent = true:warning -csharp_style_var_elsewhere = true:warning - -# dotnet_style_allow_multiple_blank_lines_experimental -dotnet_diagnostic.IDE2000.severity = warning - -# csharp_style_allow_embedded_statements_on_same_line_experimental -dotnet_diagnostic.IDE2001.severity = warning - -# csharp_style_allow_blank_lines_between_consecutive_braces_experimental -dotnet_diagnostic.IDE2002.severity = warning - -# dotnet_style_allow_statement_immediately_after_block_experimental -dotnet_diagnostic.IDE2003.severity = warning - -# csharp_style_allow_blank_line_after_colon_in_constructor_initializer_experimental -dotnet_diagnostic.IDE2004.severity = warning - -[src/{VisualStudio}/**/*.{cs,vb}] -# CA1822: Make member static -# There is a risk of accidentally breaking an internal API that partners rely on though IVT. -dotnet_code_quality.CA1822.api_surface = private diff --git a/gpt4all-bindings/csharp/.gitignore b/gpt4all-bindings/csharp/.gitignore deleted file mode 100644 index 04306510..00000000 --- a/gpt4all-bindings/csharp/.gitignore +++ /dev/null @@ -1,379 +0,0 @@ -## Ignore Visual Studio temporary files, build results, and -## files generated by popular Visual Studio add-ons. -## -## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore - -runtimes -**/*nuget - -*.zip -include/ -*.exp -*.lib -*.dll - -# User-specific files -*.rsuser -*.suo -*.user -*.userosscache -*.sln.docstates - -# User-specific files (MonoDevelop/Xamarin Studio) -*.userprefs - -# Mono auto generated files -mono_crash.* -Tests/**/launchSettings.json - -# Build results -[Dd]ebug/ -[Dd]ebugPublic/ -[Rr]elease/ -[Rr]eleases/ -x64/ -x86/ -[Ww][Ii][Nn]32/ -[Aa][Rr][Mm]/ -[Aa][Rr][Mm]64/ -bld/ -[Bb]in/ -[Oo]bj/ -[Oo]ut/ -[Ll]og/ -[Ll]ogs/ - -# Visual Studio 2015/2017 cache/options directory -.vs/ -# Uncomment if you have tasks that create the project's static files in wwwroot -#wwwroot/ - -# Visual Studio 2017 auto generated files -Generated\ Files/ - -# MSTest test Results -[Tt]est[Rr]esult*/ -[Bb]uild[Ll]og.* - -# NUnit -*.VisualState.xml -TestResult.xml -nunit-*.xml - -# Build Results of an ATL Project -[Dd]ebugPS/ -[Rr]eleasePS/ -dlldata.c - -# Benchmark Results -BenchmarkDotNet.Artifacts/ - -# .NET Core -project.lock.json -project.fragment.lock.json -artifacts/ - -# ASP.NET Scaffolding -ScaffoldingReadMe.txt - -# StyleCop -StyleCopReport.xml - -# Files built by Visual Studio -*_i.c -*_p.c -*_h.h -*.ilk -*.meta -*.obj -*.iobj -*.pch -*.pdb -*.ipdb -*.pgc -*.pgd -*.rsp -*.sbr -*.tlb -*.tli -*.tlh -*.tmp -*.tmp_proj -*_wpftmp.csproj -*.log -*.vspscc -*.vssscc -.builds -*.pidb -*.svclog -*.scc - -# Chutzpah Test files -_Chutzpah* - -# Visual C++ cache files -ipch/ -*.aps -*.ncb -*.opendb -*.opensdf -*.sdf -*.cachefile -*.VC.db -*.VC.VC.opendb - -# Visual Studio profiler -*.psess -*.vsp -*.vspx -*.sap - -# Visual Studio Trace Files -*.e2e - -# TFS 2012 Local Workspace -$tf/ - -# Guidance Automation Toolkit -*.gpState - -# ReSharper is a .NET coding add-in -_ReSharper*/ -*.[Rr]e[Ss]harper -*.DotSettings.user - -# TeamCity is a build add-in -_TeamCity* - -# DotCover is a Code Coverage Tool -*.dotCover - -# AxoCover is a Code Coverage Tool -.axoCover/* -!.axoCover/settings.json - -# Coverlet is a free, cross platform Code Coverage Tool -coverage*.json -coverage*.xml -coverage*.info - -# Visual Studio code coverage results -*.coverage -*.coveragexml - -# NCrunch -_NCrunch_* -.*crunch*.local.xml -nCrunchTemp_* - -# MightyMoose -*.mm.* -AutoTest.Net/ - -# Web workbench (sass) -.sass-cache/ - -# Installshield output folder -[Ee]xpress/ - -# DocProject is a documentation generator add-in -DocProject/buildhelp/ -DocProject/Help/*.HxT -DocProject/Help/*.HxC -DocProject/Help/*.hhc -DocProject/Help/*.hhk -DocProject/Help/*.hhp -DocProject/Help/Html2 -DocProject/Help/html - -# Click-Once directory -publish/ - -# Publish Web Output -*.[Pp]ublish.xml -*.azurePubxml -# Note: Comment the next line if you want to checkin your web deploy settings, -# but database connection strings (with potential passwords) will be unencrypted -*.pubxml -*.publishproj - -# Microsoft Azure Web App publish settings. Comment the next line if you want to -# checkin your Azure Web App publish settings, but sensitive information contained -# in these scripts will be unencrypted -PublishScripts/ - -# NuGet Packages -*.nupkg -# NuGet Symbol Packages -*.snupkg -# The packages folder can be ignored because of Package Restore -**/[Pp]ackages/* -# except build/, which is used as an MSBuild target. -!**/[Pp]ackages/build/ -# Uncomment if necessary however generally it will be regenerated when needed -#!**/[Pp]ackages/repositories.config -# NuGet v3's project.json files produces more ignorable files -*.nuget.props -*.nuget.targets - -# Microsoft Azure Build Output -csx/ -*.build.csdef - -# Microsoft Azure Emulator -ecf/ -rcf/ - -# Windows Store app package directories and files -AppPackages/ -BundleArtifacts/ -Package.StoreAssociation.xml -_pkginfo.txt -*.appx -*.appxbundle -*.appxupload - -# Visual Studio cache files -# files ending in .cache can be ignored -*.[Cc]ache -# but keep track of directories ending in .cache -!?*.[Cc]ache/ - -# Others -ClientBin/ -~$* -*~ -*.dbmdl -*.dbproj.schemaview -*.jfm -*.pfx -*.publishsettings -orleans.codegen.cs - -# Including strong name files can present a security risk -# (https://github.com/github/gitignore/pull/2483#issue-259490424) -#*.snk - -# Since there are multiple workflows, uncomment next line to ignore bower_components -# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) -#bower_components/ - -# RIA/Silverlight projects -Generated_Code/ - -# Backup & report files from converting an old project file -# to a newer Visual Studio version. Backup files are not needed, -# because we have git ;-) -_UpgradeReport_Files/ -Backup*/ -UpgradeLog*.XML -UpgradeLog*.htm -ServiceFabricBackup/ -*.rptproj.bak - -# SQL Server files -*.mdf -*.ldf -*.ndf - -# Business Intelligence projects -*.rdl.data -*.bim.layout -*.bim_*.settings -*.rptproj.rsuser -*- [Bb]ackup.rdl -*- [Bb]ackup ([0-9]).rdl -*- [Bb]ackup ([0-9][0-9]).rdl - -# Microsoft Fakes -FakesAssemblies/ - -# GhostDoc plugin setting file -*.GhostDoc.xml - -# Node.js Tools for Visual Studio -.ntvs_analysis.dat -node_modules/ - -# Visual Studio 6 build log -*.plg - -# Visual Studio 6 workspace options file -*.opt - -# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) -*.vbw - -# Visual Studio LightSwitch build output -**/*.HTMLClient/GeneratedArtifacts -**/*.DesktopClient/GeneratedArtifacts -**/*.DesktopClient/ModelManifest.xml -**/*.Server/GeneratedArtifacts -**/*.Server/ModelManifest.xml -_Pvt_Extensions - -# Paket dependency manager -.paket/paket.exe -paket-files/ - -# FAKE - F# Make -.fake/ - -# CodeRush personal settings -.cr/personal - -# Python Tools for Visual Studio (PTVS) -__pycache__/ -*.pyc - -# Cake - Uncomment if you are using it -# tools/** -# !tools/packages.config - -# Tabs Studio -*.tss - -# Telerik's JustMock configuration file -*.jmconfig - -# BizTalk build output -*.btp.cs -*.btm.cs -*.odx.cs -*.xsd.cs - -# OpenCover UI analysis results -OpenCover/ - -# Azure Stream Analytics local run output -ASALocalRun/ - -# MSBuild Binary and Structured Log -*.binlog - -# NVidia Nsight GPU debugger configuration file -*.nvuser - -# MFractors (Xamarin productivity tool) working folder -.mfractor/ - -# Local History for Visual Studio -.localhistory/ - -# BeatPulse healthcheck temp database -healthchecksdb - -# Backup folder for Package Reference Convert tool in Visual Studio 2017 -MigrationBackup/ - -# Ionide (cross platform F# VS Code tools) working folder -.ionide/ - -# Fody - auto-generated XML schema -FodyWeavers.xsd - -# JetBrains Rider -.idea - -# Visual Studio Code -.vscode \ No newline at end of file diff --git a/gpt4all-bindings/csharp/Directory.Build.props b/gpt4all-bindings/csharp/Directory.Build.props deleted file mode 100644 index 8b307516..00000000 --- a/gpt4all-bindings/csharp/Directory.Build.props +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - en-US - 0.6.4-alpha - $(VersionSuffix) - $(Version)$(VersionSuffix) - true - - git - true - true - latest-minimum - true - - - - - - - - preview - strict - - - - - all - runtime; build; native; contentfiles; analyzers - - - all - runtime; build; native; contentfiles; analyzers - - - all - runtime; build; native; contentfiles; analyzers - - - - diff --git a/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj b/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj deleted file mode 100644 index 8e6d325a..00000000 --- a/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj +++ /dev/null @@ -1,33 +0,0 @@ - - - - Exe - net8.0 - enable - enable - true - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/gpt4all-bindings/csharp/Gpt4All.Samples/Program.cs b/gpt4all-bindings/csharp/Gpt4All.Samples/Program.cs deleted file mode 100644 index ac4ae80e..00000000 --- a/gpt4all-bindings/csharp/Gpt4All.Samples/Program.cs +++ /dev/null @@ -1,22 +0,0 @@ -using Gpt4All; - -var modelFactory = new Gpt4AllModelFactory(); -if (args.Length < 2) -{ - Console.WriteLine($"Usage: Gpt4All.Samples "); - return; -} - -var modelPath = args[0]; -var prompt = args[1]; - -using var model = modelFactory.LoadModel(modelPath); - -var result = await model.GetStreamingPredictionAsync( - prompt, - PredictRequestOptions.Defaults); - -await foreach (var token in result.GetPredictionStreamingAsync()) -{ - Console.Write(token); -} diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/Constants.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/Constants.cs deleted file mode 100644 index a326f43c..00000000 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/Constants.cs +++ /dev/null @@ -1,9 +0,0 @@ -namespace Gpt4All.Tests; - -public static class Constants -{ - public const string MODELS_BASE_DIR = "../../../models"; - public const string LLAMA_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-gpt4all-l13b-snoozy.bin"; - public const string GPTJ_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-gpt4all-j-v1.3-groovy.bin"; - public const string MPT_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-mpt-7b-chat.bin"; -} diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj b/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj deleted file mode 100644 index 76f61f92..00000000 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj +++ /dev/null @@ -1,60 +0,0 @@ - - - - net8.0 - enable - - false - true - - - - - - - runtime; build; native; contentfiles; analyzers; buildtransitive - all - - - runtime; build; native; contentfiles; analyzers; buildtransitive - all - - - - - - - - - - - - - - - - - - - - - - - - - - - - all - runtime; build; native; contentfiles; analyzers - - - all - runtime; build; native; contentfiles; analyzers - - - all - runtime; build; native; contentfiles; analyzers - - - diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs deleted file mode 100644 index d7b0569e..00000000 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs +++ /dev/null @@ -1,34 +0,0 @@ -using Xunit; - -namespace Gpt4All.Tests; - -public class ModelFactoryTests -{ - private readonly Gpt4AllModelFactory _modelFactory; - - public ModelFactoryTests() - { - _modelFactory = new Gpt4AllModelFactory(); - } - - [Fact] - [Trait(Traits.SkipOnCI, "True")] - public void CanLoadLlamaModel() - { - using var model = _modelFactory.LoadModel(Constants.LLAMA_MODEL_PATH); - } - - [Fact] - [Trait(Traits.SkipOnCI, "True")] - public void CanLoadGptjModel() - { - using var model = _modelFactory.LoadModel(Constants.GPTJ_MODEL_PATH); - } - - [Fact] - [Trait(Traits.SkipOnCI, "True")] - public void CanLoadMptModel() - { - using var model = _modelFactory.LoadModel(Constants.MPT_MODEL_PATH); - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs deleted file mode 100644 index 7d564593..00000000 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs +++ /dev/null @@ -1,56 +0,0 @@ -using System.IO; -using Gpt4All.LibraryLoader; -using Xunit; - -namespace Gpt4All.Tests; - -public class NativeLibraryLoaderTests -{ - [Fact] - public void NativeLibraryShouldLoad() - { - var result = NativeLibraryLoader.LoadNativeLibrary(bypassLoading: false); - Assert.True(result.IsSuccess); - } - - private const string LLModelLib = "libllmodel.{0}"; - - [PlatformSpecificFact(Platforms.Windows)] - public void NativeLibraryShouldLoad_Windows() - { - var libraryLoader = new WindowsLibraryLoader(); - - var libraryPath = Path.Combine( - Environment.CurrentDirectory, - string.Format(LLModelLib, "dll")); - - var result = libraryLoader.OpenLibrary(libraryPath); - Assert.True(result.IsSuccess); - } - - [PlatformSpecificFact(Platforms.Linux)] - public void NativeLibraryShouldLoad_Linux() - { - var libraryLoader = new LinuxLibraryLoader(); - - var libraryPath = Path.Combine( - Environment.CurrentDirectory, - string.Format(LLModelLib, "so")); - - var result = libraryLoader.OpenLibrary(libraryPath); - Assert.True(result.IsSuccess); - } - - [PlatformSpecificFact(Platforms.MacOS)] - public void NativeLibraryShouldLoad_MacOS() - { - var libraryLoader = new MacOsLibraryLoader(); - - var libraryPath = Path.Combine( - Environment.CurrentDirectory, - string.Format(LLModelLib, "dylib")); - - var result = libraryLoader.OpenLibrary(libraryPath); - Assert.True(result.IsSuccess); - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/PlatformSpecificFactAttribute.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/PlatformSpecificFactAttribute.cs deleted file mode 100644 index 9f322f6a..00000000 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/PlatformSpecificFactAttribute.cs +++ /dev/null @@ -1,27 +0,0 @@ -using Xunit; - -namespace Gpt4All.Tests; - -public static class Platforms -{ - public const string Windows = "windows"; - public const string Linux = "linux"; - public const string MacOS = "macOS"; -} - -/// -/// This attribute ensures the Fact is only run on the specified platform. -/// -/// -/// for info about the platform string. -/// -public class PlatformSpecificFactAttribute : FactAttribute -{ - public PlatformSpecificFactAttribute(string platform) - { - if (!OperatingSystem.IsOSPlatform(platform)) - { - Skip = $"Test only runs on {platform}."; - } - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/Traits.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/Traits.cs deleted file mode 100644 index 572fb1c1..00000000 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/Traits.cs +++ /dev/null @@ -1,6 +0,0 @@ -namespace Gpt4All.Tests; - -public static class Traits -{ - public const string SkipOnCI = "SKIP_ON_CI"; -} diff --git a/gpt4all-bindings/csharp/Gpt4All.sln b/gpt4all-bindings/csharp/Gpt4All.sln deleted file mode 100644 index 65bcc7ce..00000000 --- a/gpt4all-bindings/csharp/Gpt4All.sln +++ /dev/null @@ -1,47 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 17 -VisualStudioVersion = 17.5.33516.290 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Gpt4All.Samples", "Gpt4All.Samples\Gpt4All.Samples.csproj", "{59864AE8-E45D-42F7-A7C0-1308EF185F39}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{DA396C11-CEAD-4368-8234-FB12255A30D2}" - ProjectSection(SolutionItems) = preProject - .gitignore = .gitignore - build_linux.sh = build_linux.sh - build_win-mingw.ps1 = build_win-mingw.ps1 - build_win-msvc.ps1 = build_win-msvc.ps1 - docs\gpt4all_csharp.md = docs\gpt4all_csharp.md - README.md = README.md - EndProjectSection -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Gpt4All", "Gpt4All\Gpt4All.csproj", "{6015C62B-2008-426B-A334-740D6F1FE38B}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Gpt4All.Tests", "Gpt4All.Tests\Gpt4All.Tests.csproj", "{33A72341-52C1-4EAE-878B-A98BC77F686A}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Release|Any CPU = Release|Any CPU - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {59864AE8-E45D-42F7-A7C0-1308EF185F39}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {59864AE8-E45D-42F7-A7C0-1308EF185F39}.Debug|Any CPU.Build.0 = Debug|Any CPU - {59864AE8-E45D-42F7-A7C0-1308EF185F39}.Release|Any CPU.ActiveCfg = Release|Any CPU - {59864AE8-E45D-42F7-A7C0-1308EF185F39}.Release|Any CPU.Build.0 = Release|Any CPU - {6015C62B-2008-426B-A334-740D6F1FE38B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {6015C62B-2008-426B-A334-740D6F1FE38B}.Debug|Any CPU.Build.0 = Debug|Any CPU - {6015C62B-2008-426B-A334-740D6F1FE38B}.Release|Any CPU.ActiveCfg = Release|Any CPU - {6015C62B-2008-426B-A334-740D6F1FE38B}.Release|Any CPU.Build.0 = Release|Any CPU - {33A72341-52C1-4EAE-878B-A98BC77F686A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {33A72341-52C1-4EAE-878B-A98BC77F686A}.Debug|Any CPU.Build.0 = Debug|Any CPU - {33A72341-52C1-4EAE-878B-A98BC77F686A}.Release|Any CPU.ActiveCfg = Release|Any CPU - {33A72341-52C1-4EAE-878B-A98BC77F686A}.Release|Any CPU.Build.0 = Release|Any CPU - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {17632027-F4C2-4903-B88F-310CE3DE386B} - EndGlobalSection -EndGlobal diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/ILLModel.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/ILLModel.cs deleted file mode 100644 index dc293cae..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Bindings/ILLModel.cs +++ /dev/null @@ -1,29 +0,0 @@ -namespace Gpt4All.Bindings; - -/// -/// Represents the interface exposed by the universal wrapper for GPT4All language models built around llmodel C-API. -/// -public interface ILLModel : IDisposable -{ - ulong GetStateSizeBytes(); - - int GetThreadCount(); - - void SetThreadCount(int threadCount); - - bool IsLoaded(); - - bool Load(string modelPath); - - void Prompt( - string text, - LLModelPromptContext context, - Func? promptCallback = null, - Func? responseCallback = null, - Func? recalculateCallback = null, - CancellationToken cancellationToken = default); - - unsafe ulong RestoreStateData(byte* destination); - - unsafe ulong SaveStateData(byte* source); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/LLModel.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/LLModel.cs deleted file mode 100644 index a56b38a5..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Bindings/LLModel.cs +++ /dev/null @@ -1,212 +0,0 @@ -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; - -namespace Gpt4All.Bindings; - -/// -/// Arguments for the response processing callback -/// -/// The token id of the response -/// The response string. NOTE: a token_id of -1 indicates the string is an error string -/// -/// A bool indicating whether the model should keep generating -/// -public record ModelResponseEventArgs(int TokenId, string Response) -{ - public bool IsError => TokenId == -1; -} - -/// -/// Arguments for the prompt processing callback -/// -/// The token id of the prompt -/// -/// A bool indicating whether the model should keep processing -/// -public record ModelPromptEventArgs(int TokenId) -{ -} - -/// -/// Arguments for the recalculating callback -/// -/// whether the model is recalculating the context. -/// -/// A bool indicating whether the model should keep generating -/// -public record ModelRecalculatingEventArgs(bool IsRecalculating); - -/// -/// Base class and universal wrapper for GPT4All language models built around llmodel C-API. -/// -public class LLModel : ILLModel -{ - protected readonly IntPtr _handle; - private readonly ILogger _logger; - private bool _disposed; - - internal LLModel(IntPtr handle, ILogger? logger = null) - { - _handle = handle; - _logger = logger ?? NullLogger.Instance; - } - - /// - /// Create a new model from a pointer - /// - /// Pointer to underlying model - public static LLModel Create(IntPtr handle, ILogger? logger = null) - { - return new LLModel(handle, logger: logger); - } - - /// - /// Generate a response using the model - /// - /// The input promp - /// The context - /// A callback function for handling the processing of prompt - /// A callback function for handling the generated response - /// A callback function for handling recalculation requests - /// - public void Prompt( - string text, - LLModelPromptContext context, - Func? promptCallback = null, - Func? responseCallback = null, - Func? recalculateCallback = null, - CancellationToken cancellationToken = default) - { - GC.KeepAlive(promptCallback); - GC.KeepAlive(responseCallback); - GC.KeepAlive(recalculateCallback); - GC.KeepAlive(cancellationToken); - - _logger.LogInformation("Prompt input='{Prompt}' ctx={Context}", text, context.Dump()); - - NativeMethods.llmodel_prompt( - _handle, - text, - (tokenId) => - { - if (cancellationToken.IsCancellationRequested) return false; - if (promptCallback == null) return true; - var args = new ModelPromptEventArgs(tokenId); - return promptCallback(args); - }, - (tokenId, response) => - { - if (cancellationToken.IsCancellationRequested) - { - _logger.LogDebug("ResponseCallback evt=CancellationRequested"); - return false; - } - - if (responseCallback == null) return true; - var args = new ModelResponseEventArgs(tokenId, response); - return responseCallback(args); - }, - (isRecalculating) => - { - if (cancellationToken.IsCancellationRequested) return false; - if (recalculateCallback == null) return true; - var args = new ModelRecalculatingEventArgs(isRecalculating); - return recalculateCallback(args); - }, - ref context.UnderlyingContext - ); - } - - /// - /// Set the number of threads to be used by the model. - /// - /// The new thread count - public void SetThreadCount(int threadCount) - { - NativeMethods.llmodel_setThreadCount(_handle, threadCount); - } - - /// - /// Get the number of threads used by the model. - /// - /// the number of threads used by the model - public int GetThreadCount() - { - return NativeMethods.llmodel_threadCount(_handle); - } - - /// - /// Get the size of the internal state of the model. - /// - /// - /// This state data is specific to the type of model you have created. - /// - /// the size in bytes of the internal state of the model - public ulong GetStateSizeBytes() - { - return NativeMethods.llmodel_get_state_size(_handle); - } - - /// - /// Saves the internal state of the model to the specified destination address. - /// - /// A pointer to the src - /// The number of bytes copied - public unsafe ulong SaveStateData(byte* source) - { - return NativeMethods.llmodel_save_state_data(_handle, source); - } - - /// - /// Restores the internal state of the model using data from the specified address. - /// - /// A pointer to destination - /// the number of bytes read - public unsafe ulong RestoreStateData(byte* destination) - { - return NativeMethods.llmodel_restore_state_data(_handle, destination); - } - - /// - /// Check if the model is loaded. - /// - /// true if the model was loaded successfully, false otherwise. - public bool IsLoaded() - { - return NativeMethods.llmodel_isModelLoaded(_handle); - } - - /// - /// Load the model from a file. - /// - /// The path to the model file. - /// true if the model was loaded successfully, false otherwise. - public bool Load(string modelPath) - { - return NativeMethods.llmodel_loadModel(_handle, modelPath, 2048, 100); - } - - protected void Destroy() - { - NativeMethods.llmodel_model_destroy(_handle); - } - protected virtual void Dispose(bool disposing) - { - if (_disposed) return; - - if (disposing) - { - // dispose managed state - } - - Destroy(); - - _disposed = true; - } - - public void Dispose() - { - Dispose(disposing: true); - GC.SuppressFinalize(this); - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/LLPromptContext.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/LLPromptContext.cs deleted file mode 100644 index 002972b2..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Bindings/LLPromptContext.cs +++ /dev/null @@ -1,147 +0,0 @@ -namespace Gpt4All.Bindings; - -/// -/// Wrapper around the llmodel_prompt_context structure for holding the prompt context. -/// -/// -/// The implementation takes care of all the memory handling of the raw logits pointer and the -/// raw tokens pointer.Attempting to resize them or modify them in any way can lead to undefined behavior -/// -public unsafe class LLModelPromptContext -{ - private llmodel_prompt_context _ctx; - - internal ref llmodel_prompt_context UnderlyingContext => ref _ctx; - - public LLModelPromptContext() - { - _ctx = new(); - } - - /// - /// logits of current context - /// - public Span Logits => new(_ctx.logits, (int)_ctx.logits_size); - - /// - /// the size of the raw logits vector - /// - public nuint LogitsSize - { - get => _ctx.logits_size; - set => _ctx.logits_size = value; - } - - /// - /// current tokens in the context window - /// - public Span Tokens => new(_ctx.tokens, (int)_ctx.tokens_size); - - /// - /// the size of the raw tokens vector - /// - public nuint TokensSize - { - get => _ctx.tokens_size; - set => _ctx.tokens_size = value; - } - - /// - /// top k logits to sample from - /// - public int TopK - { - get => _ctx.top_k; - set => _ctx.top_k = value; - } - - /// - /// nucleus sampling probability threshold - /// - public float TopP - { - get => _ctx.top_p; - set => _ctx.top_p = value; - } - - /// - /// min p sampling probability threshold - /// - public float MinP - { - get => _ctx.min_p; - set => _ctx.min_p = value; - } - - /// - /// temperature to adjust model's output distribution - /// - public float Temperature - { - get => _ctx.temp; - set => _ctx.temp = value; - } - - /// - /// number of tokens in past conversation - /// - public int PastNum - { - get => _ctx.n_past; - set => _ctx.n_past = value; - } - - /// - /// number of predictions to generate in parallel - /// - public int Batches - { - get => _ctx.n_batch; - set => _ctx.n_batch = value; - } - - /// - /// number of tokens to predict - /// - public int TokensToPredict - { - get => _ctx.n_predict; - set => _ctx.n_predict = value; - } - - /// - /// penalty factor for repeated tokens - /// - public float RepeatPenalty - { - get => _ctx.repeat_penalty; - set => _ctx.repeat_penalty = value; - } - - /// - /// last n tokens to penalize - /// - public int RepeatLastN - { - get => _ctx.repeat_last_n; - set => _ctx.repeat_last_n = value; - } - - /// - /// number of tokens possible in context window - /// - public int ContextSize - { - get => _ctx.n_ctx; - set => _ctx.n_ctx = value; - } - - /// - /// percent of context to erase if we exceed the context window - /// - public float ContextErase - { - get => _ctx.context_erase; - set => _ctx.context_erase = value; - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs deleted file mode 100644 index 2e61d933..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs +++ /dev/null @@ -1,112 +0,0 @@ -using System.Runtime.InteropServices; - -namespace Gpt4All.Bindings; - -public unsafe partial struct llmodel_prompt_context -{ - public float* logits; - - [NativeTypeName("size_t")] - public nuint logits_size; - - [NativeTypeName("int32_t *")] - public int* tokens; - - [NativeTypeName("size_t")] - public nuint tokens_size; - - [NativeTypeName("int32_t")] - public int n_past; - - [NativeTypeName("int32_t")] - public int n_ctx; - - [NativeTypeName("int32_t")] - public int n_predict; - - [NativeTypeName("int32_t")] - public int top_k; - - public float top_p; - - public float min_p; - - public float temp; - - [NativeTypeName("int32_t")] - public int n_batch; - - public float repeat_penalty; - - [NativeTypeName("int32_t")] - public int repeat_last_n; - - public float context_erase; -} -#pragma warning disable CA2101 -internal static unsafe partial class NativeMethods -{ - [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - [return: MarshalAs(UnmanagedType.I1)] - public delegate bool LlmodelResponseCallback(int token_id, [MarshalAs(UnmanagedType.LPUTF8Str)] string response); - - [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - [return: MarshalAs(UnmanagedType.I1)] - public delegate bool LlmodelPromptCallback(int token_id); - - [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - [return: MarshalAs(UnmanagedType.I1)] - public delegate bool LlmodelRecalculateCallback(bool isRecalculating); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true, BestFitMapping = false, ThrowOnUnmappableChar = true)] - [return: NativeTypeName("llmodel_model")] - public static extern IntPtr llmodel_model_create2( - [NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string model_path, - [NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string build_variant, - out IntPtr error); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] - public static extern void llmodel_model_destroy([NativeTypeName("llmodel_model")] IntPtr model); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true, BestFitMapping = false, ThrowOnUnmappableChar = true)] - [return: MarshalAs(UnmanagedType.I1)] - public static extern bool llmodel_loadModel( - [NativeTypeName("llmodel_model")] IntPtr model, - [NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string model_path, - [NativeTypeName("int32_t")] int n_ctx, - [NativeTypeName("int32_t")] int ngl); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] - - [return: MarshalAs(UnmanagedType.I1)] - public static extern bool llmodel_isModelLoaded([NativeTypeName("llmodel_model")] IntPtr model); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] - [return: NativeTypeName("uint64_t")] - public static extern ulong llmodel_get_state_size([NativeTypeName("llmodel_model")] IntPtr model); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] - [return: NativeTypeName("uint64_t")] - public static extern ulong llmodel_save_state_data([NativeTypeName("llmodel_model")] IntPtr model, [NativeTypeName("uint8_t *")] byte* dest); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] - [return: NativeTypeName("uint64_t")] - public static extern ulong llmodel_restore_state_data([NativeTypeName("llmodel_model")] IntPtr model, [NativeTypeName("const uint8_t *")] byte* src); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true, BestFitMapping = false, ThrowOnUnmappableChar = true)] - public static extern void llmodel_prompt( - [NativeTypeName("llmodel_model")] IntPtr model, - [NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string prompt, - LlmodelPromptCallback prompt_callback, - LlmodelResponseCallback response_callback, - LlmodelRecalculateCallback recalculate_callback, - ref llmodel_prompt_context ctx); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] - public static extern void llmodel_setThreadCount([NativeTypeName("llmodel_model")] IntPtr model, [NativeTypeName("int32_t")] int n_threads); - - [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] - [return: NativeTypeName("int32_t")] - public static extern int llmodel_threadCount([NativeTypeName("llmodel_model")] IntPtr model); -} -#pragma warning restore CA2101 diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeTypeNameAttribute.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeTypeNameAttribute.cs deleted file mode 100644 index f9917bc0..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeTypeNameAttribute.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System.Diagnostics; - -namespace Gpt4All.Bindings; - -/// Defines the type of a member as it was used in the native signature. -[AttributeUsage(AttributeTargets.Struct | AttributeTargets.Enum | AttributeTargets.Property | AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.ReturnValue, AllowMultiple = false, Inherited = true)] -[Conditional("DEBUG")] -internal sealed partial class NativeTypeNameAttribute : Attribute -{ - private readonly string _name; - - /// Initializes a new instance of the class. - /// The name of the type that was used in the native signature. - public NativeTypeNameAttribute(string name) - { - _name = name; - } - - /// Gets the name of the type that was used in the native signature. - public string Name => _name; -} \ No newline at end of file diff --git a/gpt4all-bindings/csharp/Gpt4All/Extensions/LLPromptContextExtensions.cs b/gpt4all-bindings/csharp/Gpt4All/Extensions/LLPromptContextExtensions.cs deleted file mode 100644 index 5581e458..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Extensions/LLPromptContextExtensions.cs +++ /dev/null @@ -1,27 +0,0 @@ -using Gpt4All.Bindings; - -namespace Gpt4All; - -internal static class LLPromptContextExtensions -{ - public static string Dump(this LLModelPromptContext context) - { - var ctx = context.UnderlyingContext; - return @$" - {{ - logits_size = {ctx.logits_size} - tokens_size = {ctx.tokens_size} - n_past = {ctx.n_past} - n_ctx = {ctx.n_ctx} - n_predict = {ctx.n_predict} - top_k = {ctx.top_k} - top_p = {ctx.top_p} - min_p = {ctx.min_p} - temp = {ctx.temp} - n_batch = {ctx.n_batch} - repeat_penalty = {ctx.repeat_penalty} - repeat_last_n = {ctx.repeat_last_n} - context_erase = {ctx.context_erase} - }}"; - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Extensions/PredictRequestOptionsExtensions.cs b/gpt4all-bindings/csharp/Gpt4All/Extensions/PredictRequestOptionsExtensions.cs deleted file mode 100644 index 07d1e104..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Extensions/PredictRequestOptionsExtensions.cs +++ /dev/null @@ -1,26 +0,0 @@ -using Gpt4All.Bindings; - -namespace Gpt4All; - -public static class PredictRequestOptionsExtensions -{ - public static LLModelPromptContext ToPromptContext(this PredictRequestOptions opts) - { - return new LLModelPromptContext - { - LogitsSize = opts.LogitsSize, - TokensSize = opts.TokensSize, - TopK = opts.TopK, - TopP = opts.TopP, - MinP = opts.MinP, - PastNum = opts.PastConversationTokensNum, - RepeatPenalty = opts.RepeatPenalty, - Temperature = opts.Temperature, - RepeatLastN = opts.RepeatLastN, - Batches = opts.Batches, - ContextErase = opts.ContextErase, - ContextSize = opts.ContextSize, - TokensToPredict = opts.TokensToPredict - }; - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/GenLLModelBindings.rsp b/gpt4all-bindings/csharp/Gpt4All/GenLLModelBindings.rsp deleted file mode 100644 index 4364a1f2..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/GenLLModelBindings.rsp +++ /dev/null @@ -1,21 +0,0 @@ ---config -exclude-funcs-with-body ---with-access-specifier -*=Public ---include-directory -..\..\..\gpt4all-backend\ ---file -..\..\..\gpt4all-backend\llmodel_c.h ---libraryPath -libllmodel ---remap -sbyte*=IntPtr -void*=IntPtr ---namespace -Gpt4All.Bindings ---methodClassName -NativeMethods ---output -.\Bindings\NativeMethods.cs ---output-mode -CSharp \ No newline at end of file diff --git a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs deleted file mode 100644 index f24f5ba1..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs +++ /dev/null @@ -1,135 +0,0 @@ -using System.Diagnostics; -using System.Runtime.CompilerServices; -using Gpt4All.Bindings; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; - -[assembly: InternalsVisibleTo("Gpt4All.Tests")] - -namespace Gpt4All; - -public class Gpt4All : IGpt4AllModel -{ - private readonly ILLModel _model; - private readonly ILogger _logger; - - private const string ResponseErrorMessage = - "The model reported an error during token generation error={ResponseError}"; - - /// - public IPromptFormatter? PromptFormatter { get; set; } - - internal Gpt4All(ILLModel model, ILogger? logger = null) - { - _model = model; - _logger = logger ?? NullLogger.Instance; - PromptFormatter = new DefaultPromptFormatter(); - } - - private string FormatPrompt(string prompt) - { - if (PromptFormatter == null) return prompt; - - return PromptFormatter.FormatPrompt(prompt); - } - - public Task GetPredictionAsync(string text, PredictRequestOptions opts, CancellationToken cancellationToken = default) - { - ArgumentNullException.ThrowIfNull(text); - - return Task.Run(() => - { - _logger.LogInformation("Start prediction task"); - - var sw = Stopwatch.StartNew(); - var result = new TextPredictionResult(); - var context = opts.ToPromptContext(); - var prompt = FormatPrompt(text); - - try - { - _model.Prompt(prompt, context, responseCallback: e => - { - if (e.IsError) - { - _logger.LogWarning(ResponseErrorMessage, e.Response); - result.Success = false; - result.ErrorMessage = e.Response; - return false; - } - result.Append(e.Response); - return true; - }, cancellationToken: cancellationToken); - } - catch (Exception e) - { - _logger.LogError(e, "Prompt error"); - result.Success = false; - } - - sw.Stop(); - _logger.LogInformation("Prediction task completed elapsed={Elapsed}s", sw.Elapsed.TotalSeconds); - - return (ITextPredictionResult)result; - }, CancellationToken.None); - } - - public Task GetStreamingPredictionAsync(string text, PredictRequestOptions opts, CancellationToken cancellationToken = default) - { - ArgumentNullException.ThrowIfNull(text); - - var result = new TextPredictionStreamingResult(); - - _ = Task.Run(() => - { - _logger.LogInformation("Start streaming prediction task"); - var sw = Stopwatch.StartNew(); - - try - { - var context = opts.ToPromptContext(); - var prompt = FormatPrompt(text); - - _model.Prompt(prompt, context, responseCallback: e => - { - if (e.IsError) - { - _logger.LogWarning(ResponseErrorMessage, e.Response); - result.Success = false; - result.ErrorMessage = e.Response; - return false; - } - result.Append(e.Response); - return true; - }, cancellationToken: cancellationToken); - } - catch (Exception e) - { - _logger.LogError(e, "Prompt error"); - result.Success = false; - } - finally - { - result.Complete(); - sw.Stop(); - _logger.LogInformation("Prediction task completed elapsed={Elapsed}s", sw.Elapsed.TotalSeconds); - } - }, CancellationToken.None); - - return Task.FromResult((ITextPredictionStreamingResult)result); - } - - protected virtual void Dispose(bool disposing) - { - if (disposing) - { - _model.Dispose(); - } - } - - public void Dispose() - { - Dispose(true); - GC.SuppressFinalize(this); - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj deleted file mode 100644 index af338f82..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj +++ /dev/null @@ -1,23 +0,0 @@ - - - enable - enable - true - true - net8.0 - - - - - - - - - - true - - - - - - diff --git a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/ILibraryLoader.cs b/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/ILibraryLoader.cs deleted file mode 100644 index c4e462f8..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/ILibraryLoader.cs +++ /dev/null @@ -1,6 +0,0 @@ -namespace Gpt4All.LibraryLoader; - -public interface ILibraryLoader -{ - LoadResult OpenLibrary(string? fileName); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/LinuxLibraryLoader.cs b/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/LinuxLibraryLoader.cs deleted file mode 100644 index d7f6834a..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/LinuxLibraryLoader.cs +++ /dev/null @@ -1,53 +0,0 @@ -using System.Runtime.InteropServices; - -namespace Gpt4All.LibraryLoader; - -internal class LinuxLibraryLoader : ILibraryLoader -{ -#pragma warning disable CA2101 - [DllImport("libdl.so", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlopen")] -#pragma warning restore CA2101 - public static extern IntPtr NativeOpenLibraryLibdl(string? filename, int flags); - -#pragma warning disable CA2101 - [DllImport("libdl.so.2", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlopen")] -#pragma warning restore CA2101 - public static extern IntPtr NativeOpenLibraryLibdl2(string? filename, int flags); - - [DllImport("libdl.so", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlerror")] - public static extern IntPtr GetLoadError(); - - [DllImport("libdl.so.2", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlerror")] - public static extern IntPtr GetLoadError2(); - - public LoadResult OpenLibrary(string? fileName) - { - IntPtr loadedLib; - try - { - // open with rtls lazy flag - loadedLib = NativeOpenLibraryLibdl2(fileName, 0x00001); - } - catch (DllNotFoundException) - { - loadedLib = NativeOpenLibraryLibdl(fileName, 0x00001); - } - - if (loadedLib == IntPtr.Zero) - { - string errorMessage; - try - { - errorMessage = Marshal.PtrToStringAnsi(GetLoadError2()) ?? "Unknown error"; - } - catch (DllNotFoundException) - { - errorMessage = Marshal.PtrToStringAnsi(GetLoadError()) ?? "Unknown error"; - } - - return LoadResult.Failure(errorMessage); - } - - return LoadResult.Success; - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/LoadResult.cs b/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/LoadResult.cs deleted file mode 100644 index 3dccf358..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/LoadResult.cs +++ /dev/null @@ -1,20 +0,0 @@ -namespace Gpt4All.LibraryLoader; - -public class LoadResult -{ - private LoadResult(bool isSuccess, string? errorMessage) - { - IsSuccess = isSuccess; - ErrorMessage = errorMessage; - } - - public static LoadResult Success { get; } = new(true, null); - - public static LoadResult Failure(string errorMessage) - { - return new(false, errorMessage); - } - - public bool IsSuccess { get; } - public string? ErrorMessage { get; } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/MacOsLibraryLoader.cs b/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/MacOsLibraryLoader.cs deleted file mode 100644 index 6577d979..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/MacOsLibraryLoader.cs +++ /dev/null @@ -1,28 +0,0 @@ -using System.Runtime.InteropServices; - -namespace Gpt4All.LibraryLoader; - -internal class MacOsLibraryLoader : ILibraryLoader -{ -#pragma warning disable CA2101 - [DllImport("libdl.dylib", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlopen")] -#pragma warning restore CA2101 - public static extern IntPtr NativeOpenLibraryLibdl(string? filename, int flags); - - [DllImport("libdl.dylib", ExactSpelling = true, CharSet = CharSet.Auto, EntryPoint = "dlerror")] - public static extern IntPtr GetLoadError(); - - public LoadResult OpenLibrary(string? fileName) - { - var loadedLib = NativeOpenLibraryLibdl(fileName, 0x00001); - - if (loadedLib == IntPtr.Zero) - { - var errorMessage = Marshal.PtrToStringAnsi(GetLoadError()) ?? "Unknown error"; - - return LoadResult.Failure(errorMessage); - } - - return LoadResult.Success; - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/NativeLibraryLoader.cs b/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/NativeLibraryLoader.cs deleted file mode 100644 index 85353738..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/NativeLibraryLoader.cs +++ /dev/null @@ -1,81 +0,0 @@ -#if !IOS && !MACCATALYST && !TVOS && !ANDROID -using System.Runtime.InteropServices; -#endif - -namespace Gpt4All.LibraryLoader; - -public static class NativeLibraryLoader -{ - private static ILibraryLoader? defaultLibraryLoader; - - /// - /// Sets the library loader used to load the native libraries. Overwrite this only if you want some custom loading. - /// - /// The library loader to be used. - public static void SetLibraryLoader(ILibraryLoader libraryLoader) - { - defaultLibraryLoader = libraryLoader; - } - - internal static LoadResult LoadNativeLibrary(string? path = default, bool bypassLoading = true) - { - // If the user has handled loading the library themselves, we don't need to do anything. - if (bypassLoading) - { - return LoadResult.Success; - } - - var architecture = RuntimeInformation.OSArchitecture switch - { - Architecture.X64 => "x64", - Architecture.X86 => "x86", - Architecture.Arm => "arm", - Architecture.Arm64 => "arm64", - _ => throw new PlatformNotSupportedException( - $"Unsupported OS platform, architecture: {RuntimeInformation.OSArchitecture}") - }; - - var (platform, extension) = Environment.OSVersion.Platform switch - { - _ when RuntimeInformation.IsOSPlatform(OSPlatform.Windows) => ("win", "dll"), - _ when RuntimeInformation.IsOSPlatform(OSPlatform.Linux) => ("linux", "so"), - _ when RuntimeInformation.IsOSPlatform(OSPlatform.OSX) => ("osx", "dylib"), - _ => throw new PlatformNotSupportedException( - $"Unsupported OS platform, architecture: {RuntimeInformation.OSArchitecture}") - }; - - // If the user hasn't set the path, we'll try to find it ourselves. - if (string.IsNullOrEmpty(path)) - { - var libraryName = "libllmodel"; - var assemblySearchPath = new[] - { - AppDomain.CurrentDomain.RelativeSearchPath, - Path.GetDirectoryName(typeof(NativeLibraryLoader).Assembly.Location), - Path.GetDirectoryName(Environment.GetCommandLineArgs()[0]) - }.FirstOrDefault(it => !string.IsNullOrEmpty(it)); - // Search for the library dll within the assembly search path. If it doesn't exist, for whatever reason, use the default path. - path = Directory.EnumerateFiles(assemblySearchPath ?? string.Empty, $"{libraryName}.{extension}", SearchOption.AllDirectories).FirstOrDefault() ?? Path.Combine("runtimes", $"{platform}-{architecture}", $"{libraryName}.{extension}"); - } - - if (defaultLibraryLoader != null) - { - return defaultLibraryLoader.OpenLibrary(path); - } - - if (!File.Exists(path)) - { - throw new FileNotFoundException($"Native Library not found in path {path}. " + - $"Verify you have have included the native Gpt4All library in your application."); - } - - ILibraryLoader libraryLoader = platform switch - { - "win" => new WindowsLibraryLoader(), - "osx" => new MacOsLibraryLoader(), - "linux" => new LinuxLibraryLoader(), - _ => throw new PlatformNotSupportedException($"Currently {platform} platform is not supported") - }; - return libraryLoader.OpenLibrary(path); - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/WindowsLibraryLoader.cs b/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/WindowsLibraryLoader.cs deleted file mode 100644 index d2479aa4..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/LibraryLoader/WindowsLibraryLoader.cs +++ /dev/null @@ -1,24 +0,0 @@ -using System.ComponentModel; -using System.Runtime.InteropServices; - -namespace Gpt4All.LibraryLoader; - -internal class WindowsLibraryLoader : ILibraryLoader -{ - public LoadResult OpenLibrary(string? fileName) - { - var loadedLib = LoadLibrary(fileName); - - if (loadedLib == IntPtr.Zero) - { - var errorCode = Marshal.GetLastWin32Error(); - var errorMessage = new Win32Exception(errorCode).Message; - return LoadResult.Failure(errorMessage); - } - - return LoadResult.Success; - } - - [DllImport("kernel32", SetLastError = true, CharSet = CharSet.Auto)] - private static extern IntPtr LoadLibrary([MarshalAs(UnmanagedType.LPWStr)] string? lpFileName); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/DefaultPromptFormatter.cs b/gpt4all-bindings/csharp/Gpt4All/Model/DefaultPromptFormatter.cs deleted file mode 100644 index 5be9cd2a..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Model/DefaultPromptFormatter.cs +++ /dev/null @@ -1,16 +0,0 @@ -namespace Gpt4All; - -public class DefaultPromptFormatter : IPromptFormatter -{ - public string FormatPrompt(string prompt) - { - return $""" - ### Instruction: - The prompt below is a question to answer, a task to complete, or a conversation - to respond to; decide which and write an appropriate response. - ### Prompt: - {prompt} - ### Response: - """; - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/Gpt4AllModelFactory.cs b/gpt4all-bindings/csharp/Gpt4All/Model/Gpt4AllModelFactory.cs deleted file mode 100644 index 938f44d8..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Model/Gpt4AllModelFactory.cs +++ /dev/null @@ -1,62 +0,0 @@ -using System.Diagnostics; -using Microsoft.Extensions.Logging.Abstractions; -using Microsoft.Extensions.Logging; -using Gpt4All.Bindings; -using Gpt4All.LibraryLoader; -using System.Runtime.InteropServices; - -namespace Gpt4All; - -public class Gpt4AllModelFactory : IGpt4AllModelFactory -{ - private readonly ILoggerFactory _loggerFactory; - private readonly ILogger _logger; - private static bool bypassLoading; - private static string? libraryPath; - - private static readonly Lazy libraryLoaded = new(() => - { - return NativeLibraryLoader.LoadNativeLibrary(Gpt4AllModelFactory.libraryPath, Gpt4AllModelFactory.bypassLoading); - }, true); - - public Gpt4AllModelFactory(string? libraryPath = default, bool bypassLoading = true, ILoggerFactory? loggerFactory = null) - { - _loggerFactory = loggerFactory ?? NullLoggerFactory.Instance; - _logger = _loggerFactory.CreateLogger(); - Gpt4AllModelFactory.libraryPath = libraryPath; - Gpt4AllModelFactory.bypassLoading = bypassLoading; - - if (!libraryLoaded.Value.IsSuccess) - { - throw new Exception($"Failed to load native gpt4all library. Error: {libraryLoaded.Value.ErrorMessage}"); - } - } - - private Gpt4All CreateModel(string modelPath) - { - _logger.LogInformation("Creating model path={ModelPath}", modelPath); - IntPtr error; - var handle = NativeMethods.llmodel_model_create2(modelPath, "auto", out error); - if (error != IntPtr.Zero) - { - throw new Exception(Marshal.PtrToStringAnsi(error)); - } - _logger.LogDebug("Model created handle=0x{ModelHandle:X8}", handle); - _logger.LogInformation("Model loading started"); - var loadedSuccessfully = NativeMethods.llmodel_loadModel(handle, modelPath, 2048, 100); - _logger.LogInformation("Model loading completed success={ModelLoadSuccess}", loadedSuccessfully); - if (!loadedSuccessfully) - { - throw new Exception($"Failed to load model: '{modelPath}'"); - } - - var logger = _loggerFactory.CreateLogger(); - var underlyingModel = LLModel.Create(handle, logger: logger); - - Debug.Assert(underlyingModel.IsLoaded()); - - return new Gpt4All(underlyingModel, logger: logger); - } - - public IGpt4AllModel LoadModel(string modelPath) => CreateModel(modelPath); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModel.cs b/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModel.cs deleted file mode 100644 index 11bdd1b2..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModel.cs +++ /dev/null @@ -1,10 +0,0 @@ -namespace Gpt4All; - -public interface IGpt4AllModel : ITextPrediction, IDisposable -{ - /// - /// The prompt formatter used to format the prompt before - /// feeding it to the model, if null no transformation is applied - /// - IPromptFormatter? PromptFormatter { get; set; } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModelFactory.cs b/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModelFactory.cs deleted file mode 100644 index 90c54d5e..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModelFactory.cs +++ /dev/null @@ -1,6 +0,0 @@ -namespace Gpt4All; - -public interface IGpt4AllModelFactory -{ - IGpt4AllModel LoadModel(string modelPath); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/IPromptFormatter.cs b/gpt4all-bindings/csharp/Gpt4All/Model/IPromptFormatter.cs deleted file mode 100644 index f6bc19aa..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Model/IPromptFormatter.cs +++ /dev/null @@ -1,14 +0,0 @@ -namespace Gpt4All; - -/// -/// Formats a prompt -/// -public interface IPromptFormatter -{ - /// - /// Format the provided prompt - /// - /// the input prompt - /// The formatted prompt - string FormatPrompt(string prompt); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/ModelOptions.cs b/gpt4all-bindings/csharp/Gpt4All/Model/ModelOptions.cs deleted file mode 100644 index 1b7e50b4..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Model/ModelOptions.cs +++ /dev/null @@ -1,6 +0,0 @@ -namespace Gpt4All; - -public record ModelOptions -{ - public int Threads { get; init; } = 4; -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPrediction.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPrediction.cs deleted file mode 100644 index 47ed3847..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPrediction.cs +++ /dev/null @@ -1,31 +0,0 @@ -namespace Gpt4All; - -/// -/// Interface for text prediction services -/// -public interface ITextPrediction -{ - /// - /// Get prediction results for the prompt and provided options. - /// - /// The text to complete - /// The prediction settings - /// The for cancellation requests. The default is . - /// The prediction result generated by the model - Task GetPredictionAsync( - string text, - PredictRequestOptions opts, - CancellationToken cancellation = default); - - /// - /// Get streaming prediction results for the prompt and provided options. - /// - /// The text to complete - /// The prediction settings - /// The for cancellation requests. The default is . - /// The prediction result generated by the model - Task GetStreamingPredictionAsync( - string text, - PredictRequestOptions opts, - CancellationToken cancellationToken = default); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionResult.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionResult.cs deleted file mode 100644 index 96cd58dd..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionResult.cs +++ /dev/null @@ -1,10 +0,0 @@ -namespace Gpt4All; - -public interface ITextPredictionResult -{ - bool Success { get; } - - string? ErrorMessage { get; } - - Task GetPredictionAsync(CancellationToken cancellationToken = default); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionStreamingResult.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionStreamingResult.cs deleted file mode 100644 index 6ce49d0d..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionStreamingResult.cs +++ /dev/null @@ -1,6 +0,0 @@ -namespace Gpt4All; - -public interface ITextPredictionStreamingResult : ITextPredictionResult -{ - IAsyncEnumerable GetPredictionStreamingAsync(CancellationToken cancellationToken = default); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/PredictRequestOptions.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/PredictRequestOptions.cs deleted file mode 100644 index c151a5b6..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Prediction/PredictRequestOptions.cs +++ /dev/null @@ -1,32 +0,0 @@ -namespace Gpt4All; - -public record PredictRequestOptions -{ - public nuint LogitsSize { get; init; } = 0; - - public nuint TokensSize { get; init; } = 0; - - public int PastConversationTokensNum { get; init; } = 0; - - public int ContextSize { get; init; } = 1024; - - public int TokensToPredict { get; init; } = 128; - - public int TopK { get; init; } = 40; - - public float TopP { get; init; } = 0.9f; - - public float MinP { get; init; } = 0.0f; - - public float Temperature { get; init; } = 0.1f; - - public int Batches { get; init; } = 8; - - public float RepeatPenalty { get; init; } = 1.2f; - - public int RepeatLastN { get; init; } = 10; - - public float ContextErase { get; init; } = 0.5f; - - public static readonly PredictRequestOptions Defaults = new(); -} diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionResult.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionResult.cs deleted file mode 100644 index 707bdcd9..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionResult.cs +++ /dev/null @@ -1,27 +0,0 @@ -using System.Text; - -namespace Gpt4All; - -public record TextPredictionResult : ITextPredictionResult -{ - private readonly StringBuilder _result; - - public bool Success { get; internal set; } = true; - - public string? ErrorMessage { get; internal set; } - - internal TextPredictionResult() - { - _result = new StringBuilder(); - } - - internal void Append(string token) - { - _result.Append(token); - } - - public Task GetPredictionAsync(CancellationToken cancellationToken = default) - { - return Task.FromResult(_result.ToString()); - } -} \ No newline at end of file diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionStreamingResult.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionStreamingResult.cs deleted file mode 100644 index 03723d57..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionStreamingResult.cs +++ /dev/null @@ -1,49 +0,0 @@ -using System.Text; -using System.Threading.Channels; - -namespace Gpt4All; - -public record TextPredictionStreamingResult : ITextPredictionStreamingResult -{ - private readonly Channel _channel; - - public bool Success { get; internal set; } = true; - - public string? ErrorMessage { get; internal set; } - - public Task Completion => _channel.Reader.Completion; - - internal TextPredictionStreamingResult() - { - _channel = Channel.CreateUnbounded(); - } - - internal bool Append(string token) - { - return _channel.Writer.TryWrite(token); - } - - internal void Complete() - { - _channel.Writer.Complete(); - } - - public async Task GetPredictionAsync(CancellationToken cancellationToken = default) - { - var sb = new StringBuilder(); - - var tokens = GetPredictionStreamingAsync(cancellationToken).ConfigureAwait(false); - - await foreach (var token in tokens) - { - sb.Append(token); - } - - return sb.ToString(); - } - - public IAsyncEnumerable GetPredictionStreamingAsync(CancellationToken cancellationToken = default) - { - return _channel.Reader.ReadAllAsync(cancellationToken); - } -} diff --git a/gpt4all-bindings/csharp/Gpt4All/gen_bindings.ps1 b/gpt4all-bindings/csharp/Gpt4All/gen_bindings.ps1 deleted file mode 100644 index ec29f531..00000000 --- a/gpt4all-bindings/csharp/Gpt4All/gen_bindings.ps1 +++ /dev/null @@ -1 +0,0 @@ -ClangSharpPInvokeGenerator @(Get-Content .\GenLLModelBindings.rsp) \ No newline at end of file diff --git a/gpt4all-bindings/csharp/README.md b/gpt4all-bindings/csharp/README.md deleted file mode 100644 index af8d4e9a..00000000 --- a/gpt4all-bindings/csharp/README.md +++ /dev/null @@ -1,124 +0,0 @@ -# C# GPT4All - -This package contains a set of C# bindings around the `llmodel` C-API. - -## Documentation -TBD - -## Installation - -Windows and Linux builds are available on NuGet: https://www.nuget.org/packages/Gpt4All - -macOS is WIP due to code signing issues, contributions are welcome. - -## Project Structure -``` -gpt4all-bindings/ -└── csharp -   ├── Gpt4All // .NET Bindigs -   ├── Gpt4All.Samples // Sample project - ├── build_win-msvc.ps1 // Native build scripts - ├── build_win-mingw.ps1 - ├── build_linux.sh - └── runtimes // [POST-BUILD] Platform-specific native libraries - ├── win-x64 - ├── ... - └── linux-x64 -``` - -## Prerequisites - -On Windows and Linux, building GPT4All requires the complete Vulkan SDK. You may download it from here: https://vulkan.lunarg.com/sdk/home - -macOS users do not need Vulkan, as GPT4All will use Metal instead. - -## Local Build Instructions -> **Note** -> Tested On: -> - Windows 11 22H + VS2022 (CE) x64 -> - Linux Ubuntu x64 -> - Linux Ubuntu (WSL2) x64 - -1. Setup the repository -2. Build the native libraries for the platform of choice (see below) -3. Build the C# Bindings (NET6+ SDK is required) -``` -git clone --recurse-submodules https://github.com/nomic-ai/gpt4all -cd gpt4all/gpt4all-bindings/csharp -``` -### Linux -1. Setup build environment and install NET6+ SDK with the appropriate procedure for your distribution -``` -sudo apt-get update -sudo apt-get install -y cmake build-essential -chmod +x ./build_linux.sh -``` -2. `./build_linux.sh` -3. The native libraries should be present at `.\native\linux-x64` - -### Windows - MinGW64 -#### Additional requirements - - [MinGW64](https://www.mingw-w64.org/) - - CMAKE -1. Setup -``` -choco install mingw -$env:Path += ";C:\ProgramData\mingw64\mingw64\bin" -choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System' -``` -2. Run the `./build_win-mingw.ps1` build script -3. The native libraries should be present at `.\native\win-x64` - -### Windows - MSVC -#### Additional requirements - - Visual Studio 2022 -1. Open a terminal using the `x64 Native Tools Command Prompt for VS 2022` (`vcvars64.bat`) -2. Run the `./build_win-msvc.ps1` build script -3. `libllmodel.dll` and `libllama.dll` should be present at `.\native\win-x64` - -> **Warning** -> If the build fails with: '**error C7555: use of designated initializers requires at least '/std:c++20'**' -> -> Modify `cd gpt4all/gpt4all-backends/CMakeLists.txt` adding `CXX_STANDARD_20` to `llmodel` properties. -> ```cmake -> set_target_properties(llmodel PROPERTIES -> VERSION ${PROJECT_VERSION} -> CXX_STANDARD 20 # <---- ADD THIS ----------------------- -> SOVERSION ${PROJECT_VERSION_MAJOR}) -> ``` -## C# Bindings Build Instructions -Build the `Gpt4All` (or `Gpt4All.Samples`) projects from within VisualStudio. -### Try the bindings -```csharp -using Gpt4All; - -// load the model -var modelFactory = new ModelFactory(); - -using var model = modelFactory.LoadModel("./path/to/ggml-gpt4all-j-v1.3-groovy.bin"); - -var input = "Name 3 Colors"; - -// request a prediction -var result = await model.GetStreamingPredictionAsync( - input, - PredictRequestOptions.Defaults); - -// asynchronously print the tokens as soon as they are produces by the model -await foreach(var token in result.GetPredictionStreamingAsync()) -{ - Console.Write(token); -} -``` -Output: -``` -gptj_model_load: loading model from 'ggml-gpt4all-j-v1.3-groovy.bin' - please wait ... -gptj_model_load: n_vocab = 50400 -[...TRUNCATED...] -gptj_model_load: ggml ctx size = 5401.45 MB -gptj_model_load: kv self size = 896.00 MB -gptj_model_load: ................................... done -gptj_model_load: model size = 3609.38 MB / num tensors = 285 - -Black, Blue and White -``` diff --git a/gpt4all-bindings/csharp/build_linux.sh b/gpt4all-bindings/csharp/build_linux.sh deleted file mode 100755 index f7ee05bb..00000000 --- a/gpt4all-bindings/csharp/build_linux.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -mkdir -p runtimes -rm -rf runtimes/linux-x64 -mkdir -p runtimes/linux-x64/native -mkdir runtimes/linux-x64/build -cmake -S ../../gpt4all-backend -B runtimes/linux-x64/build -cmake --build runtimes/linux-x64/build --parallel --config Release -cp runtimes/linux-x64/build/libllmodel.so runtimes/linux-x64/native/libllmodel.so -cp runtimes/linux-x64/build/libgptj*.so runtimes/linux-x64/native/ -cp runtimes/linux-x64/build/libllama*.so runtimes/linux-x64/native/ diff --git a/gpt4all-bindings/csharp/build_win-mingw.ps1 b/gpt4all-bindings/csharp/build_win-mingw.ps1 deleted file mode 100644 index 0e9e5d95..00000000 --- a/gpt4all-bindings/csharp/build_win-mingw.ps1 +++ /dev/null @@ -1,16 +0,0 @@ -$ROOT_DIR = '.\runtimes\win-x64' -$BUILD_DIR = '.\runtimes\win-x64\build\mingw' -$LIBS_DIR = '.\runtimes\win-x64\native' - -# cleanup env -Remove-Item -Force -Recurse $ROOT_DIR -ErrorAction SilentlyContinue | Out-Null -mkdir $BUILD_DIR | Out-Null -mkdir $LIBS_DIR | Out-Null - -# build -cmake -G "MinGW Makefiles" -S ..\..\gpt4all-backend -B $BUILD_DIR -cmake --build $BUILD_DIR --parallel --config Release - -# copy native dlls -cp "C:\ProgramData\mingw64\mingw64\bin\*dll" $LIBS_DIR -cp "$BUILD_DIR\bin\*.dll" $LIBS_DIR diff --git a/gpt4all-bindings/csharp/build_win-msvc.ps1 b/gpt4all-bindings/csharp/build_win-msvc.ps1 deleted file mode 100644 index 8d44f3a7..00000000 --- a/gpt4all-bindings/csharp/build_win-msvc.ps1 +++ /dev/null @@ -1,6 +0,0 @@ -Remove-Item -Force -Recurse .\runtimes\win-x64\msvc -ErrorAction SilentlyContinue -mkdir .\runtimes\win-x64\msvc\build | Out-Null -cmake -G "Visual Studio 17 2022" -A X64 -S ..\..\gpt4all-backend -B .\runtimes\win-x64\msvc\build -cmake --build .\runtimes\win-x64\msvc\build --parallel --config Release -cp .\runtimes\win-x64\msvc\build\bin\Release\*.dll .\runtimes\win-x64 -mv .\runtimes\win-x64\llmodel.dll .\runtimes\win-x64\libllmodel.dll \ No newline at end of file diff --git a/gpt4all-bindings/csharp/docs/gpt4all_csharp.md b/gpt4all-bindings/csharp/docs/gpt4all_csharp.md deleted file mode 100644 index 0a69ed5c..00000000 --- a/gpt4all-bindings/csharp/docs/gpt4all_csharp.md +++ /dev/null @@ -1 +0,0 @@ -# GPT4All C# API \ No newline at end of file diff --git a/gpt4all-bindings/golang/Makefile b/gpt4all-bindings/golang/Makefile deleted file mode 100644 index b101fb2d..00000000 --- a/gpt4all-bindings/golang/Makefile +++ /dev/null @@ -1,163 +0,0 @@ -INCLUDE_PATH := $(abspath ./) -LIBRARY_PATH := $(abspath ./) -CMAKEFLAGS= - -ifndef UNAME_S -UNAME_S := $(shell uname -s) -endif - -ifndef UNAME_P -UNAME_P := $(shell uname -p) -endif - -ifndef UNAME_M -UNAME_M := $(shell uname -m) -endif - -CCV := $(shell $(CC) --version | head -n 1) -CXXV := $(shell $(CXX) --version | head -n 1) - -# Mac OS + Arm can report x86_64 -# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789 -ifeq ($(UNAME_S),Darwin) - ifneq ($(UNAME_P),arm) - SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null) - ifeq ($(SYSCTL_M),1) - # UNAME_P := arm - # UNAME_M := arm64 - warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789) - endif - endif -endif - -# -# Compile flags -# - -# keep standard at C11 and C++11 -CFLAGS = -I. -I../../gpt4all-backend/llama.cpp -I../../gpt4all-backend -I -O3 -DNDEBUG -std=c11 -fPIC -CXXFLAGS = -I. -I../../gpt4all-backend/llama.cpp -I../../gpt4all-backend -O3 -DNDEBUG -std=c++17 -fPIC -LDFLAGS = - -# warnings -CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wno-unused-function -CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar - -# OS specific -# TODO: support Windows -ifeq ($(UNAME_S),Linux) - CFLAGS += -pthread - CXXFLAGS += -pthread -endif -ifeq ($(UNAME_S),Darwin) - CFLAGS += -pthread - CXXFLAGS += -pthread -endif -ifeq ($(UNAME_S),FreeBSD) - CFLAGS += -pthread - CXXFLAGS += -pthread -endif -ifeq ($(UNAME_S),NetBSD) - CFLAGS += -pthread - CXXFLAGS += -pthread -endif -ifeq ($(UNAME_S),OpenBSD) - CFLAGS += -pthread - CXXFLAGS += -pthread -endif -ifeq ($(UNAME_S),Haiku) - CFLAGS += -pthread - CXXFLAGS += -pthread -endif - -# Architecture specific -# TODO: probably these flags need to be tweaked on some architectures -# feel free to update the Makefile for your architecture and send a pull request or issue -ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686)) - # Use all CPU extensions that are available: - CFLAGS += -march=native -mtune=native - CXXFLAGS += -march=native -mtune=native -endif -ifneq ($(filter ppc64%,$(UNAME_M)),) - POWER9_M := $(shell grep "POWER9" /proc/cpuinfo) - ifneq (,$(findstring POWER9,$(POWER9_M))) - CFLAGS += -mcpu=power9 - CXXFLAGS += -mcpu=power9 - endif - # Require c++23's std::byteswap for big-endian support. - ifeq ($(UNAME_M),ppc64) - CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN - endif -endif -ifndef LLAMA_NO_ACCELERATE - # Mac M1 - include Accelerate framework. - # `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time). - ifeq ($(UNAME_S),Darwin) - CFLAGS += -DGGML_USE_ACCELERATE - LDFLAGS += -framework Accelerate - endif -endif -ifdef LLAMA_OPENBLAS - CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas - LDFLAGS += -lopenblas -endif -ifdef LLAMA_GPROF - CFLAGS += -pg - CXXFLAGS += -pg -endif -ifneq ($(filter aarch64%,$(UNAME_M)),) - CFLAGS += -mcpu=native - CXXFLAGS += -mcpu=native -endif -ifneq ($(filter armv6%,$(UNAME_M)),) - # Raspberry Pi 1, 2, 3 - CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -endif -ifneq ($(filter armv7%,$(UNAME_M)),) - # Raspberry Pi 4 - CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations -endif -ifneq ($(filter armv8%,$(UNAME_M)),) - # Raspberry Pi 4 - CFLAGS += -mfp16-format=ieee -mno-unaligned-access -endif - -# -# Print build information -# - -$(info I go-gpt4all build info: ) -$(info I UNAME_S: $(UNAME_S)) -$(info I UNAME_P: $(UNAME_P)) -$(info I UNAME_M: $(UNAME_M)) -$(info I CFLAGS: $(CFLAGS)) -$(info I CXXFLAGS: $(CXXFLAGS)) -$(info I LDFLAGS: $(LDFLAGS)) -$(info I CMAKEFLAGS: $(CMAKEFLAGS)) -$(info I CC: $(CCV)) -$(info I CXX: $(CXXV)) -$(info ) - -llmodel.o: - [ -e buildllm ] || mkdir buildllm - cd buildllm && cmake ../../../gpt4all-backend/ $(CMAKEFLAGS) && make - cd buildllm && cp -rf CMakeFiles/llmodel.dir/llmodel_c.cpp.o ../llmodel_c.o - cd buildllm && cp -rf CMakeFiles/llmodel.dir/llmodel.cpp.o ../llmodel.o - -clean: - rm -f *.o - rm -f *.a - rm -rf buildllm - rm -rf example/main - -binding.o: binding.cpp binding.h - $(CXX) $(CXXFLAGS) binding.cpp -o binding.o -c $(LDFLAGS) - -libgpt4all.a: binding.o llmodel.o - ar src libgpt4all.a llmodel.o binding.o - -test: libgpt4all.a - @C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} go test -v ./... - -example/main: libgpt4all.a - C_INCLUDE_PATH=$(INCLUDE_PATH) LIBRARY_PATH=$(INCLUDE_PATH) go build -o example/main ./example/ diff --git a/gpt4all-bindings/golang/README.md b/gpt4all-bindings/golang/README.md deleted file mode 100644 index 38a41867..00000000 --- a/gpt4all-bindings/golang/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# GPT4All Golang bindings - -The golang bindings have been tested on: -- MacOS -- Linux - -### Usage - -``` -import ( - "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang" -) - -func main() { - // Load the model - model, err := gpt4all.New("model.bin", gpt4all.SetModelType(gpt4all.GPTJType)) - if err != nil { - panic(err) - } - defer model.Free() - - model.SetTokenCallback(func(s string) bool { - fmt.Print(s) - return true - }) - - _, err = model.Predict("Here are 4 steps to create a website:", "", "", gpt4all.SetTemperature(0.1)) - if err != nil { - panic(err) - } -} -``` - -## Building - -In order to use the bindings you will need to build `libgpt4all.a`: - -``` -git clone --recurse-submodules https://github.com/nomic-ai/gpt4all -cd gpt4all/gpt4all-bindings/golang -make libgpt4all.a -``` - -To use the bindings in your own software: - -- Import `github.com/nomic-ai/gpt4all/gpt4all-bindings/golang`; -- Compile `libgpt4all.a` (you can use `make libgpt4all.a` in the bindings/go directory); -- Link your go binary by setting the environment variables `C_INCLUDE_PATH` and `LIBRARY_PATH` to point to the `binding.h` file directory and `libgpt4all.a` file directory respectively. -- Note: you need to have *.so/*.dynlib/*.dll files of the implementation nearby the binary produced by the binding in order to make this to work - -## Testing - -To run tests, run `make test`: - -``` -git clone https://github.com/nomic-ai/gpt4all -cd gpt4all/gpt4all-bindings/golang -make test -``` diff --git a/gpt4all-bindings/golang/binding.cpp b/gpt4all-bindings/golang/binding.cpp deleted file mode 100644 index e3f47b56..00000000 --- a/gpt4all-bindings/golang/binding.cpp +++ /dev/null @@ -1,107 +0,0 @@ -#include "../../gpt4all-backend/llmodel_c.h" -#include "../../gpt4all-backend/llmodel.h" -#include "../../gpt4all-backend/llmodel_c.cpp" - -#include "binding.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -void* load_model(const char *fname, int n_threads) { - // load the model - const char *new_error; - auto model = llmodel_model_create2(fname, "auto", &new_error); - if (model == nullptr) { - fprintf(stderr, "%s: error '%s'\n", __func__, new_error); - return nullptr; - } - if (!llmodel_loadModel(model, fname, 2048, 100)) { - llmodel_model_destroy(model); - return nullptr; - } - - llmodel_setThreadCount(model, n_threads); - return model; -} - -std::string res = ""; -void * mm; - -void model_prompt(const char *prompt, const char *prompt_template, int special, const char *fake_reply, - void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, - int top_k, float top_p, float min_p, float temp, int n_batch,float ctx_erase) -{ - llmodel_model* model = (llmodel_model*) m; - - // std::string res = ""; - - auto lambda_prompt = [](int token_id) { - return true; - }; - - mm=model; - res=""; - - auto lambda_response = [](int token_id, const char *responsechars) { - res.append((char*)responsechars); - return !!getTokenCallback(mm, (char*)responsechars); - }; - - auto lambda_recalculate = [](bool is_recalculating) { - // You can handle recalculation requests here if needed - return is_recalculating; - }; - - llmodel_prompt_context* prompt_context = new llmodel_prompt_context{ - .logits = NULL, - .logits_size = 0, - .tokens = NULL, - .tokens_size = 0, - .n_past = 0, - .n_ctx = 1024, - .n_predict = 50, - .top_k = 10, - .top_p = 0.9, - .min_p = 0.0, - .temp = 1.0, - .n_batch = 1, - .repeat_penalty = 1.2, - .repeat_last_n = 10, - .context_erase = 0.5 - }; - - prompt_context->n_predict = tokens; - prompt_context->repeat_last_n = repeat_last_n; - prompt_context->repeat_penalty = repeat_penalty; - prompt_context->n_ctx = n_ctx; - prompt_context->top_k = top_k; - prompt_context->context_erase = ctx_erase; - prompt_context->top_p = top_p; - prompt_context->min_p = min_p; - prompt_context->temp = temp; - prompt_context->n_batch = n_batch; - - llmodel_prompt(model, prompt, prompt_template, - lambda_prompt, - lambda_response, - lambda_recalculate, - prompt_context, special, fake_reply); - - strcpy(result, res.c_str()); - - free(prompt_context); -} - -void free_model(void *state_ptr) { - llmodel_model* ctx = (llmodel_model*) state_ptr; - llmodel_model_destroy(*ctx); -} - diff --git a/gpt4all-bindings/golang/binding.h b/gpt4all-bindings/golang/binding.h deleted file mode 100644 index 990f10e8..00000000 --- a/gpt4all-bindings/golang/binding.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifdef __cplusplus -extern "C" { -#endif - -#include - -void* load_model(const char *fname, int n_threads); - -void model_prompt(const char *prompt, const char *prompt_template, int special, const char *fake_reply, - void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, - int top_k, float top_p, float min_p, float temp, int n_batch,float ctx_erase); - -void free_model(void *state_ptr); - -extern unsigned char getTokenCallback(void *, char *); - -#ifdef __cplusplus -} -#endif diff --git a/gpt4all-bindings/golang/example/main.go b/gpt4all-bindings/golang/example/main.go deleted file mode 100644 index 7351e855..00000000 --- a/gpt4all-bindings/golang/example/main.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "bufio" - "flag" - "fmt" - "io" - "os" - "runtime" - "strings" - - gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang" -) - -var ( - threads = 4 - tokens = 128 -) - -func main() { - var model string - - flags := flag.NewFlagSet(os.Args[0], flag.ExitOnError) - flags.StringVar(&model, "m", "./models/7B/ggml-model-q4_0.bin", "path to q4_0.bin model file to load") - flags.IntVar(&threads, "t", runtime.NumCPU(), "number of threads to use during computation") - flags.IntVar(&tokens, "n", 512, "number of tokens to predict") - - err := flags.Parse(os.Args[1:]) - if err != nil { - fmt.Printf("Parsing program arguments failed: %s", err) - os.Exit(1) - } - l, err := gpt4all.New(model, gpt4all.SetThreads(threads)) - if err != nil { - fmt.Println("Loading the model failed:", err.Error()) - os.Exit(1) - } - fmt.Printf("Model loaded successfully.\n") - - l.SetTokenCallback(func(token string) bool { - fmt.Print(token) - return true - }) - - reader := bufio.NewReader(os.Stdin) - - for { - text := readMultiLineInput(reader) - - _, err := l.Predict(text, "", "", gpt4all.SetTokens(tokens), gpt4all.SetTopK(90), gpt4all.SetTopP(0.86)) - if err != nil { - panic(err) - } - fmt.Printf("\n\n") - } -} - -// readMultiLineInput reads input until an empty line is entered. -func readMultiLineInput(reader *bufio.Reader) string { - var lines []string - fmt.Print(">>> ") - - for { - line, err := reader.ReadString('\n') - if err != nil { - if err == io.EOF { - os.Exit(0) - } - fmt.Printf("Reading the prompt failed: %s", err) - os.Exit(1) - } - - if len(strings.TrimSpace(line)) == 0 { - break - } - - lines = append(lines, line) - } - - text := strings.Join(lines, "") - return text -} diff --git a/gpt4all-bindings/golang/go.mod b/gpt4all-bindings/golang/go.mod deleted file mode 100644 index e45c3dad..00000000 --- a/gpt4all-bindings/golang/go.mod +++ /dev/null @@ -1,20 +0,0 @@ -module github.com/nomic-ai/gpt4all/gpt4all-bindings/golang - -go 1.19 - -require ( - github.com/onsi/ginkgo/v2 v2.9.4 - github.com/onsi/gomega v1.27.6 -) - -require ( - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/text v0.9.0 // indirect - golang.org/x/tools v0.8.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/gpt4all-bindings/golang/go.sum b/gpt4all-bindings/golang/go.sum deleted file mode 100644 index fa0bcd86..00000000 --- a/gpt4all-bindings/golang/go.sum +++ /dev/null @@ -1,40 +0,0 @@ -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/gpt4all-bindings/golang/gpt4all.go b/gpt4all-bindings/golang/gpt4all.go deleted file mode 100644 index 57604cf4..00000000 --- a/gpt4all-bindings/golang/gpt4all.go +++ /dev/null @@ -1,112 +0,0 @@ -package gpt4all - -// #cgo CFLAGS: -I${SRCDIR}../../gpt4all-backend/ -I${SRCDIR}../../gpt4all-backend/llama.cpp -I./ -// #cgo CXXFLAGS: -std=c++17 -I${SRCDIR}../../gpt4all-backend/ -I${SRCDIR}../../gpt4all-backend/llama.cpp -I./ -// #cgo darwin LDFLAGS: -framework Accelerate -// #cgo darwin CXXFLAGS: -std=c++17 -// #cgo LDFLAGS: -lgpt4all -lm -lstdc++ -ldl -// void* load_model(const char *fname, int n_threads); -// void model_prompt( const char *prompt, const char *prompt_template, int special, const char *fake_reply, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k, -// float top_p, float min_p, float temp, int n_batch,float ctx_erase); -// void free_model(void *state_ptr); -// extern unsigned char getTokenCallback(void *, char *); -// void llmodel_set_implementation_search_path(const char *path); -import "C" -import ( - "fmt" - "runtime" - "strings" - "sync" - "unsafe" -) - -// The following code is https://github.com/go-skynet/go-llama.cpp with small adaptations -type Model struct { - state unsafe.Pointer -} - -func New(model string, opts ...ModelOption) (*Model, error) { - ops := NewModelOptions(opts...) - - if ops.LibrarySearchPath != "" { - C.llmodel_set_implementation_search_path(C.CString(ops.LibrarySearchPath)) - } - - state := C.load_model(C.CString(model), C.int(ops.Threads)) - - if state == nil { - return nil, fmt.Errorf("failed loading model") - } - - gpt := &Model{state: state} - // set a finalizer to remove any callbacks when the struct is reclaimed by the garbage collector. - runtime.SetFinalizer(gpt, func(g *Model) { - setTokenCallback(g.state, nil) - }) - - return gpt, nil -} - -func (l *Model) Predict(text, template, fakeReplyText string, opts ...PredictOption) (string, error) { - - po := NewPredictOptions(opts...) - - input := C.CString(text) - if po.Tokens == 0 { - po.Tokens = 99999999 - } - templateInput := C.CString(template) - fakeReplyInput := C.CString(fakeReplyText) - out := make([]byte, po.Tokens) - - C.model_prompt(input, templateInput, C.int(po.Special), fakeReplyInput, l.state, (*C.char)(unsafe.Pointer(&out[0])), - C.int(po.RepeatLastN), C.float(po.RepeatPenalty), C.int(po.ContextSize), C.int(po.Tokens), - C.int(po.TopK), C.float(po.TopP), C.float(po.MinP), C.float(po.Temperature), C.int(po.Batch), - C.float(po.ContextErase)) - - res := C.GoString((*C.char)(unsafe.Pointer(&out[0]))) - res = strings.TrimPrefix(res, " ") - res = strings.TrimPrefix(res, text) - res = strings.TrimPrefix(res, "\n") - res = strings.TrimSuffix(res, "<|endoftext|>") - - return res, nil -} - -func (l *Model) Free() { - C.free_model(l.state) -} - -func (l *Model) SetTokenCallback(callback func(token string) bool) { - setTokenCallback(l.state, callback) -} - -var ( - m sync.Mutex - callbacks = map[uintptr]func(string) bool{} -) - -//export getTokenCallback -func getTokenCallback(statePtr unsafe.Pointer, token *C.char) bool { - m.Lock() - defer m.Unlock() - - if callback, ok := callbacks[uintptr(statePtr)]; ok { - return callback(C.GoString(token)) - } - - return true -} - -// setCallback can be used to register a token callback for LLama. Pass in a nil callback to -// remove the callback. -func setTokenCallback(statePtr unsafe.Pointer, callback func(string) bool) { - m.Lock() - defer m.Unlock() - - if callback == nil { - delete(callbacks, uintptr(statePtr)) - } else { - callbacks[uintptr(statePtr)] = callback - } -} diff --git a/gpt4all-bindings/golang/gpt4all_suite_test.go b/gpt4all-bindings/golang/gpt4all_suite_test.go deleted file mode 100644 index 3f379b1e..00000000 --- a/gpt4all-bindings/golang/gpt4all_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package gpt4all_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestGPT(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "go-gpt4all-j test suite") -} diff --git a/gpt4all-bindings/golang/gpt4all_test.go b/gpt4all-bindings/golang/gpt4all_test.go deleted file mode 100644 index fd96584c..00000000 --- a/gpt4all-bindings/golang/gpt4all_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package gpt4all_test - -import ( - . "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("LLama binding", func() { - Context("Declaration", func() { - It("fails with no model", func() { - model, err := New("not-existing") - Expect(err).To(HaveOccurred()) - Expect(model).To(BeNil()) - }) - }) -}) diff --git a/gpt4all-bindings/golang/options.go b/gpt4all-bindings/golang/options.go deleted file mode 100644 index 56b0efc8..00000000 --- a/gpt4all-bindings/golang/options.go +++ /dev/null @@ -1,138 +0,0 @@ -package gpt4all - -type PredictOptions struct { - ContextSize, RepeatLastN, Tokens, TopK, Batch, Special int - TopP, MinP, Temperature, ContextErase, RepeatPenalty float64 -} - -type PredictOption func(p *PredictOptions) - -var DefaultOptions PredictOptions = PredictOptions{ - Tokens: 200, - TopK: 10, - TopP: 0.90, - MinP: 0.0, - Temperature: 0.96, - Batch: 1, - Special: 0, - ContextErase: 0.55, - ContextSize: 1024, - RepeatLastN: 10, - RepeatPenalty: 1.2, -} - -var DefaultModelOptions ModelOptions = ModelOptions{ - Threads: 4, -} - -type ModelOptions struct { - Threads int - LibrarySearchPath string -} -type ModelOption func(p *ModelOptions) - -// SetTokens sets the number of tokens to generate. -func SetTokens(tokens int) PredictOption { - return func(p *PredictOptions) { - p.Tokens = tokens - } -} - -// SetTopK sets the value for top-K sampling. -func SetTopK(topk int) PredictOption { - return func(p *PredictOptions) { - p.TopK = topk - } -} - -// SetTopP sets the value for nucleus sampling. -func SetTopP(topp float64) PredictOption { - return func(p *PredictOptions) { - p.TopP = topp - } -} - -// SetMinP sets the value for min p sampling -func SetMinP(minp float64) PredictOption { - return func(p *PredictOptions) { - p.MinP = minp - } -} - -// SetRepeatPenalty sets the repeat penalty. -func SetRepeatPenalty(ce float64) PredictOption { - return func(p *PredictOptions) { - p.RepeatPenalty = ce - } -} - -// SetRepeatLastN sets the RepeatLastN. -func SetRepeatLastN(ce int) PredictOption { - return func(p *PredictOptions) { - p.RepeatLastN = ce - } -} - -// SetContextErase sets the context erase %. -func SetContextErase(ce float64) PredictOption { - return func(p *PredictOptions) { - p.ContextErase = ce - } -} - -// SetTemperature sets the temperature value for text generation. -func SetTemperature(temp float64) PredictOption { - return func(p *PredictOptions) { - p.Temperature = temp - } -} - -// SetBatch sets the batch size. -func SetBatch(size int) PredictOption { - return func(p *PredictOptions) { - p.Batch = size - } -} - -// SetSpecial is true if special tokens in the prompt should be processed, false otherwise. -func SetSpecial(special bool) PredictOption { - return func(p *PredictOptions) { - if special { - p.Special = 1 - } else { - p.Special = 0 - } - } -} - -// Create a new PredictOptions object with the given options. -func NewPredictOptions(opts ...PredictOption) PredictOptions { - p := DefaultOptions - for _, opt := range opts { - opt(&p) - } - return p -} - -// SetThreads sets the number of threads to use for text generation. -func SetThreads(c int) ModelOption { - return func(p *ModelOptions) { - p.Threads = c - } -} - -// SetLibrarySearchPath sets the dynamic libraries used by gpt4all for the various ggml implementations. -func SetLibrarySearchPath(t string) ModelOption { - return func(p *ModelOptions) { - p.LibrarySearchPath = t - } -} - -// Create a new PredictOptions object with the given options. -func NewModelOptions(opts ...ModelOption) ModelOptions { - p := DefaultModelOptions - for _, opt := range opts { - opt(&p) - } - return p -} diff --git a/gpt4all-bindings/java/.gitignore b/gpt4all-bindings/java/.gitignore deleted file mode 100644 index 081e799c..00000000 --- a/gpt4all-bindings/java/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# Make sure native directory never gets commited to git for the project. -/src/main/resources/native - -# IntelliJ project file -*.iml \ No newline at end of file diff --git a/gpt4all-bindings/java/Developer_docs.md b/gpt4all-bindings/java/Developer_docs.md deleted file mode 100644 index a90dc68c..00000000 --- a/gpt4all-bindings/java/Developer_docs.md +++ /dev/null @@ -1,80 +0,0 @@ -# Java Bindings Developer documents. - -This document is meant to anyone looking to build the Java bindings from source, test a build locally and perform a release. - -## Building locally - -Maven is the build tool used by the project. Maven version of 3.8 or higher is recommended. Make sure the **mvn** -is available on the command path. - -The project builds to Java version 11 target so make sure that a JDK at version 11 or newer is installed. - -### Setting up location of native shared libraries -The property **native.libs.location** in pom.xml may need to be set: -``` - - ... - C:\Users\felix\dev\gpt4all_java_bins\release_1_1_3_Jun22_2023 - -``` -All the native shared libraries bundled with the Java binding jar will be copied from this location. -The directory structure is **native/linux**, **native/macos**, **native/windows**. These directories are copied -into the **src/main/resources** folder during the build process. - -For the purposes of local testing, none of these directories have to be present or just one OS type may be present. - -If none of the native libraries are present in **native.libs.location** the shared libraries will be searched for -in location path set by **LLModel.LIBRARY_SEARCH_PATH** static variable in Java source code that is using the bindings. - -Alternately you can copy the shared libraries into the **src/resources/native/linux** before -you build, but note **src/main/resources/native** is on the .gitignore, so it will not be committed to sources. - -### Building - -To package the bindings jar run: -``` -mvn package -``` -This will build two jars. One has only the Java bindings and the other is a fat jar that will have required dependencies included as well. - -To package and install the Java bindings to your local maven repository run: -``` -mvn install -``` - -### Using in a sample application - -You can check out a sample project that uses the java bindings here: -https://github.com/felix-zaslavskiy/gpt4all-java-bindings-sample.git - -1. First, update the dependency of java bindings to whatever you have installed in local repository such as **1.1.4-SNAPSHOT** -2. Second, update **Main.java** and set **baseModelPath** to the correct location of model weight files. - -3. To make a runnable jar run: -``` -mvn package -``` - -A fat jar is also created which is easy to run from command line: -``` -java -jar target/gpt4all-java-bindings-sample-1.0-SNAPSHOT-jar-with-dependencies.jar -``` - -### Publish a public release. - -For publishing a new version to maven central repository requires password and signing keys which F.Z. currently maintains, so -he is responsible for making a public release. - -The procedure is as follows: - -For a snapshot release -Run: -``` -mvn deploy -P signing-profile -``` - -For a non-snapshot release -Run: -``` -mvn clean deploy -P signing-profile,release -``` \ No newline at end of file diff --git a/gpt4all-bindings/java/README.md b/gpt4all-bindings/java/README.md deleted file mode 100644 index af996054..00000000 --- a/gpt4all-bindings/java/README.md +++ /dev/null @@ -1,126 +0,0 @@ -# Java bindings - -Java bindings let you load a gpt4all library into your Java application and execute text -generation using an intuitive and easy to use API. No GPU is required because gpt4all executes on the CPU. -The gpt4all models are quantized to easily fit into system RAM and use about 4 to 7GB of system RAM. - -## Getting Started -You can add Java bindings into your Java project by adding the following dependency to your project: - -**Maven** -``` - - com.hexadevlabs - gpt4all-java-binding - 1.1.5 - -``` -**Gradle** -``` -implementation 'com.hexadevlabs:gpt4all-java-binding:1.1.5' -``` - -To add the library dependency for another build system see [Maven Central Java bindings](https://central.sonatype.com/artifact/com.hexadevlabs/gpt4all-java-binding/). - -To download model binary weights file use a URL such as [`https://gpt4all.io/models/gguf/gpt4all-13b-snoozy-q4_0.gguf`](https://gpt4all.io/models/gguf/gpt4all-13b-snoozy-q4_0.gguf). - -For information about other models available see the [model file list](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-chat#manual-download-of-models). - -### Sample code -```java -public class Example { - public static void main(String[] args) { - - String prompt = "### Human:\nWhat is the meaning of life\n### Assistant:"; - - // Replace the hardcoded path with the actual path where your model file resides - String modelFilePath = "C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\ggml-gpt4all-j-v1.3-groovy.bin"; - - try (LLModel model = new LLModel(Path.of(modelFilePath))) { - - // May generate up to 4096 tokens but generally stops early - LLModel.GenerationConfig config = LLModel.config() - .withNPredict(4096).build(); - - // Will also stream to standard output - String fullGeneration = model.generate(prompt, config, true); - - } catch (Exception e) { - // Exceptions generally may happen if the model file fails to load - // for a number of reasons such as a file not found. - // It is possible that Java may not be able to dynamically load the native shared library or - // the llmodel shared library may not be able to dynamically load the backend - // implementation for the model file you provided. - // - // Once the LLModel class is successfully loaded into memory the text generation calls - // generally should not throw exceptions. - e.printStackTrace(); // Printing here but in a production system you may want to take some action. - } - } - -} -``` - -For a Maven-based sample project that uses this library see this [sample project](https://github.com/felix-zaslavskiy/gpt4all-java-bindings-sample) - -### Additional considerations -#### Logger warnings -The Java bindings library may produce a warning if you don't have a SLF4J binding included in your project: -``` -SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder". -SLF4J: Defaulting to no-operation (NOP) logger implementation -SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. -``` -The Java bindings only use logging for informational -purposes, so a logger is not essential to correctly use the library. You can ignore this warning if you don't have SLF4J bindings -in your project. - -To add a simple logger using a Maven dependency you may use: -``` - - org.slf4j - slf4j-simple - 1.7.36 - -``` - -#### Loading your native libraries -1. the Java bindings package JAR comes bundled with a native library files for Windows, macOS and Linux. These library files are -copied to a temporary directory and loaded at runtime. For advanced users who may want to package shared libraries into Docker containers -or want to use a custom build of the shared libraries and ignore the once bundled with the Java package they have option -to load libraries from your local directory by setting a static property to the location of library files. -There are no guarantees of compatibility if used in such a way so be careful if you really want to do it. - -For example: -```java -class Example { - public static void main(String[] args) { - // gpt4all native shared libraries location - LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\"; - // ... use the library normally - } -} -``` -2. Not every AVX-only shared library is bundled with the JAR right now to reduce size. Only libgptj-avx is included. -If you are running into issues please let us know using the [gpt4all project issue tracker](https://github.com/nomic-ai/gpt4all/issues). - -3. For Windows the native library included in jar depends on specific Microsoft C and C++ (MSVC) runtime libraries which may not be installed on your system. -If this is the case you can easily download and install the latest x64 Microsoft Visual C++ Redistributable package from https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170 - -4. When running Java in a Docker container it is advised to use eclipse-temurin:17-jre parent image. Alpine based parent images don't work due to the native library dependencies. - -## Version history -1. Version **1.1.2**: - - Java bindings is compatible with gpt4ll version 2.4.6 - - Initial stable release with the initial feature set -2. Version **1.1.3**: - - Java bindings is compatible with gpt4all version 2.4.8 - - Add static GPT4ALL_VERSION to signify gpt4all version of the bindings - - Add PromptIsTooLongException for prompts that are longer than context size. - - Replit model support to include Metal Mac hardware support. -3. Version **1.1.4**: - - Java bindings is compatible with gpt4all version 2.4.11 - - Falcon model support included. -4. Version **1.1.5**: - - Add a check for model file readability before loading model. - diff --git a/gpt4all-bindings/java/TODO.md b/gpt4all-bindings/java/TODO.md deleted file mode 100644 index 48342f78..00000000 --- a/gpt4all-bindings/java/TODO.md +++ /dev/null @@ -1,6 +0,0 @@ -## Needed -1. Integrate with circleci build pipeline like the C# binding. - -## These are just ideas -1. Better Chat completions function. -2. Chat completion that returns result in OpenAI compatible format. diff --git a/gpt4all-bindings/java/pom.xml b/gpt4all-bindings/java/pom.xml deleted file mode 100644 index 4687aa1a..00000000 --- a/gpt4all-bindings/java/pom.xml +++ /dev/null @@ -1,216 +0,0 @@ - - - 4.0.0 - - com.hexadevlabs - gpt4all-java-binding - 1.1.5 - jar - - - 11 - 11 - UTF-8 - C:\Users\felix\dev\gpt4all_java_bins\release_1_1_4_July8_2023 - - - ${project.groupId}:${project.artifactId} - Java bindings for GPT4ALL LLM - https://github.com/nomic-ai/gpt4all - - - The Apache License, Version 2.0 - https://github.com/nomic-ai/gpt4all/blob/main/LICENSE.txt - - - - - Felix Zaslavskiy - felixz@hexadevlabs.com - https://github.com/felix-zaslavskiy/ - - - - scm:git:git://github.com/nomic-ai/gpt4all.git - scm:git:ssh://github.com/nomic-ai/gpt4all.git - https://github.com/nomic-ai/gpt4all/tree/main - - - - - com.github.jnr - jnr-ffi - 2.2.13 - - - - org.slf4j - slf4j-api - 1.7.36 - - - - org.junit.jupiter - junit-jupiter-api - 5.9.2 - test - - - - org.mockito - mockito-junit-jupiter - 5.4.0 - test - - - - org.mockito - mockito-core - 5.4.0 - test - - - - - - ossrh - https://s01.oss.sonatype.org/content/repositories/snapshots - - - ossrh - https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/ - - - - - - - src/main/resources - - - ${project.build.directory}/generated-resources - - - - - org.apache.maven.plugins - maven-surefire-plugin - 3.0.0 - - 0 - - - - org.apache.maven.plugins - maven-resources-plugin - 3.3.1 - - - copy-resources - - validate - - copy-resources - - - ${project.build.directory}/generated-resources - - - ${native.libs.location} - - - - - - - - - - org.sonatype.plugins - nexus-staging-maven-plugin - 1.6.13 - true - - ossrh - https://s01.oss.sonatype.org/ - true - - - - org.apache.maven.plugins - maven-source-plugin - 2.2.1 - - - attach-sources - - jar-no-fork - - - - - - org.apache.maven.plugins - maven-javadoc-plugin - 3.5.0 - - - attach-javadocs - - jar - - - - - - - org.apache.maven.plugins - maven-assembly-plugin - 3.6.0 - - - jar-with-dependencies - - - - - make-assembly - package - - single - - - - - - - - - - - signing-profile - - - - - org.apache.maven.plugins - maven-gpg-plugin - 3.1.0 - - - sign-artifacts - verify - - sign - - - - - - - - - - \ No newline at end of file diff --git a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java deleted file mode 100644 index 6114cfad..00000000 --- a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java +++ /dev/null @@ -1,641 +0,0 @@ -package com.hexadevlabs.gpt4all; - -import jnr.ffi.Pointer; -import jnr.ffi.byref.PointerByReference; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.*; -import java.util.stream.Collectors; - -public class LLModel implements AutoCloseable { - - /** - * Config used for how to decode LLM outputs. - * High temperature closer to 1 gives more creative outputs - * while low temperature closer to 0 produce more precise outputs. - *

- * Use builder to set settings you want. - */ - public static class GenerationConfig extends LLModelLibrary.LLModelPromptContext { - - private GenerationConfig() { - super(jnr.ffi.Runtime.getSystemRuntime()); - logits_size.set(0); - tokens_size.set(0); - n_past.set(0); - n_ctx.set(1024); - n_predict.set(128); - top_k.set(40); - top_p.set(0.95); - min_p.set(0.0); - temp.set(0.28); - n_batch.set(8); - repeat_penalty.set(1.1); - repeat_last_n.set(10); - context_erase.set(0.55); - } - - public static class Builder { - private final GenerationConfig configToBuild; - - public Builder() { - configToBuild = new GenerationConfig(); - } - - public Builder withNPast(int n_past) { - configToBuild.n_past.set(n_past); - return this; - } - - public Builder withNCtx(int n_ctx) { - configToBuild.n_ctx.set(n_ctx); - return this; - } - - public Builder withNPredict(int n_predict) { - configToBuild.n_predict.set(n_predict); - return this; - } - - public Builder withTopK(int top_k) { - configToBuild.top_k.set(top_k); - return this; - } - - public Builder withTopP(float top_p) { - configToBuild.top_p.set(top_p); - return this; - } - - public Builder withMinP(float min_p) { - configToBuild.min_p.set(min_p); - return this; - } - - public Builder withTemp(float temp) { - configToBuild.temp.set(temp); - return this; - } - - public Builder withNBatch(int n_batch) { - configToBuild.n_batch.set(n_batch); - return this; - } - - public Builder withRepeatPenalty(float repeat_penalty) { - configToBuild.repeat_penalty.set(repeat_penalty); - return this; - } - - public Builder withRepeatLastN(int repeat_last_n) { - configToBuild.repeat_last_n.set(repeat_last_n); - return this; - } - - public Builder withContextErase(float context_erase) { - configToBuild.context_erase.set(context_erase); - return this; - } - - /** - * - * @return GenerationConfig build instance of the config - */ - public GenerationConfig build() { - return configToBuild; - } - } - } - - /** - * Shortcut for making GenerativeConfig builder. - * - * @return GenerationConfig.Builder - builder that can be used to make a GenerationConfig - */ - public static GenerationConfig.Builder config(){ - return new GenerationConfig.Builder(); - } - - /** - * This may be set before any Model instance classes are instantiated to - * set where the native shared libraries are to be found. - *

- * This may be needed if setting library search path by standard means is not available - * or the libraries loaded from the temp folder bundled with the binding jar is not desirable. - */ - public static String LIBRARY_SEARCH_PATH; - - - /** - * Generally for debugging purposes only. Will print - * the numerical tokens as they are generated instead of the string representations. - * Will also print out the processed input tokens as numbers to standard out. - */ - public static boolean OUTPUT_DEBUG = false; - - private static final Logger logger = LoggerFactory.getLogger(LLModel.class); - - /** - * Which version of GPT4ALL that this binding is built for. - * The binding is guaranteed to work with this version of - * GPT4ALL native libraries. The binding may work for older - * versions but that is not guaranteed. - */ - public static final String GPT4ALL_VERSION = "2.4.11"; - - protected static LLModelLibrary library; - - protected Pointer model; - - protected String modelName; - - /** - * Package private default constructor, for testing purposes. - */ - LLModel(){ - } - - public LLModel(Path modelPath) { - - logger.info("Java bindings for gpt4all version: " + GPT4ALL_VERSION); - - if(library==null) { - - if (LIBRARY_SEARCH_PATH != null){ - library = Util.loadSharedLibrary(LIBRARY_SEARCH_PATH); - library.llmodel_set_implementation_search_path(LIBRARY_SEARCH_PATH); - } else { - // Copy system libraries to Temp folder - Path tempLibraryDirectory = Util.copySharedLibraries(); - library = Util.loadSharedLibrary(tempLibraryDirectory.toString()); - - library.llmodel_set_implementation_search_path(tempLibraryDirectory.toString() ); - } - - } - - // modelType = type; - modelName = modelPath.getFileName().toString(); - String modelPathAbs = modelPath.toAbsolutePath().toString(); - - PointerByReference error = new PointerByReference(); - - // Check if model file exists - if(!Files.exists(modelPath)){ - throw new IllegalStateException("Model file does not exist: " + modelPathAbs); - } - - // Check if file is Readable - if(!Files.isReadable(modelPath)){ - throw new IllegalStateException("Model file cannot be read: " + modelPathAbs); - } - - // Create Model Struct. Will load dynamically the correct backend based on model type - model = library.llmodel_model_create2(modelPathAbs, "auto", error); - - if(model == null) { - throw new IllegalStateException("Could not load, gpt4all backend returned error: " + error.getValue().getString(0)); - } - library.llmodel_loadModel(model, modelPathAbs, 2048, 100); - - if(!library.llmodel_isModelLoaded(model)){ - throw new IllegalStateException("The model " + modelName + " could not be loaded"); - } - - } - - public void setThreadCount(int nThreads) { - library.llmodel_setThreadCount(this.model, nThreads); - } - - public int threadCount() { - return library.llmodel_threadCount(this.model); - } - - /** - * Generate text after the prompt - * - * @param prompt The text prompt to complete - * @param generationConfig What generation settings to use while generating text - * @return String The complete generated text - */ - public String generate(String prompt, GenerationConfig generationConfig) { - return generate(prompt, generationConfig, false); - } - - /** - * Generate text after the prompt - * - * @param prompt The text prompt to complete - * @param generationConfig What generation settings to use while generating text - * @param streamToStdOut Should the generation be streamed to standard output. Useful for troubleshooting. - * @return String The complete generated text - */ - public String generate(String prompt, GenerationConfig generationConfig, boolean streamToStdOut) { - - ByteArrayOutputStream bufferingForStdOutStream = new ByteArrayOutputStream(); - ByteArrayOutputStream bufferingForWholeGeneration = new ByteArrayOutputStream(); - - LLModelLibrary.ResponseCallback responseCallback = getResponseCallback(streamToStdOut, bufferingForStdOutStream, bufferingForWholeGeneration); - - library.llmodel_prompt(this.model, - prompt, - (int tokenID) -> { - if(LLModel.OUTPUT_DEBUG) - System.out.println("token " + tokenID); - return true; // continue processing - }, - responseCallback, - (boolean isRecalculating) -> { - if(LLModel.OUTPUT_DEBUG) - System.out.println("recalculating"); - return isRecalculating; // continue generating - }, - generationConfig); - - return bufferingForWholeGeneration.toString(StandardCharsets.UTF_8); - } - - /** - * Callback method to be used by prompt method as text is generated. - * - * @param streamToStdOut Should send generated text to standard out. - * @param bufferingForStdOutStream Output stream used for buffering bytes for standard output. - * @param bufferingForWholeGeneration Output stream used for buffering a complete generation. - * @return LLModelLibrary.ResponseCallback lambda function that is invoked by response callback. - */ - static LLModelLibrary.ResponseCallback getResponseCallback(boolean streamToStdOut, ByteArrayOutputStream bufferingForStdOutStream, ByteArrayOutputStream bufferingForWholeGeneration) { - return (int tokenID, Pointer response) -> { - - if(LLModel.OUTPUT_DEBUG) - System.out.print("Response token " + tokenID + " " ); - - // For all models if input sequence in tokens is longer then model context length - // the error is generated. - if(tokenID==-1){ - throw new PromptIsTooLongException(response.getString(0, 1000, StandardCharsets.UTF_8)); - } - - long len = 0; - byte nextByte; - do{ - try { - nextByte = response.getByte(len); - } catch(IndexOutOfBoundsException e){ - // Not sure if this can ever happen but just in case - // the generation does not terminate in a Null (0) value. - throw new RuntimeException("Empty array or not null terminated"); - } - len++; - if(nextByte!=0) { - bufferingForWholeGeneration.write(nextByte); - if(streamToStdOut){ - bufferingForStdOutStream.write(nextByte); - // Test if Buffer is UTF8 valid string. - byte[] currentBytes = bufferingForStdOutStream.toByteArray(); - String validString = Util.getValidUtf8(currentBytes); - if(validString!=null){ // is valid string - System.out.print(validString); - // reset the buffer for next utf8 sequence to buffer - bufferingForStdOutStream.reset(); - } - } - } - } while(nextByte != 0); - - return true; // continue generating - }; - } - - /** - * The array of messages for the conversation. - */ - public static class Messages { - - private final List messages = new ArrayList<>(); - - public Messages(PromptMessage...messages) { - this.messages.addAll(Arrays.asList(messages)); - } - - public Messages(List messages) { - this.messages.addAll(messages); - } - - public Messages addPromptMessage(PromptMessage promptMessage) { - this.messages.add(promptMessage); - return this; - } - - List toList() { - return Collections.unmodifiableList(this.messages); - } - - List> toListMap() { - return messages.stream() - .map(PromptMessage::toMap).collect(Collectors.toList()); - } - - } - - /** - * A message in the conversation, identical to OpenAI's chat message. - */ - public static class PromptMessage { - - private static final String ROLE = "role"; - private static final String CONTENT = "content"; - - private final Map message = new HashMap<>(); - - public PromptMessage() { - } - - public PromptMessage(Role role, String content) { - addRole(role); - addContent(content); - } - - public PromptMessage addRole(Role role) { - return this.addParameter(ROLE, role.type()); - } - - public PromptMessage addContent(String content) { - return this.addParameter(CONTENT, content); - } - - public PromptMessage addParameter(String key, String value) { - this.message.put(key, value); - return this; - } - - public String content() { - return this.parameter(CONTENT); - } - - public Role role() { - String role = this.parameter(ROLE); - return Role.from(role); - } - - public String parameter(String key) { - return this.message.get(key); - } - - Map toMap() { - return Collections.unmodifiableMap(this.message); - } - - } - - public enum Role { - - SYSTEM("system"), ASSISTANT("assistant"), USER("user"); - - private final String type; - - String type() { - return this.type; - } - - static Role from(String type) { - - if (type == null) { - return null; - } - - switch (type) { - case "system": return SYSTEM; - case "assistant": return ASSISTANT; - case "user": return USER; - default: throw new IllegalArgumentException( - String.format("You passed %s type but only %s are supported", - type, Arrays.toString(Role.values()) - ) - ); - } - } - - Role(String type) { - this.type = type; - } - - @Override - public String toString() { - return type(); - } - } - - /** - * The result of the completion, similar to OpenAI's format. - */ - public static class CompletionReturn { - private String model; - private Usage usage; - private Choices choices; - - public CompletionReturn(String model, Usage usage, Choices choices) { - this.model = model; - this.usage = usage; - this.choices = choices; - } - - public Choices choices() { - return choices; - } - - public String model() { - return model; - } - - public Usage usage() { - return usage; - } - } - - /** - * The generated completions. - */ - public static class Choices { - - private final List choices = new ArrayList<>(); - - public Choices(List choices) { - this.choices.addAll(choices); - } - - public Choices(CompletionChoice...completionChoices){ - this.choices.addAll(Arrays.asList(completionChoices)); - } - - public Choices addCompletionChoice(CompletionChoice completionChoice) { - this.choices.add(completionChoice); - return this; - } - - public CompletionChoice first() { - return this.choices.get(0); - } - - public int totalChoices() { - return this.choices.size(); - } - - public CompletionChoice get(int index) { - return this.choices.get(index); - } - - public List choices() { - return Collections.unmodifiableList(choices); - } - } - - /** - * A completion choice, similar to OpenAI's format. - */ - public static class CompletionChoice extends PromptMessage { - public CompletionChoice(Role role, String content) { - super(role, content); - } - } - - public static class ChatCompletionResponse { - public String model; - public Usage usage; - public List> choices; - - // Getters and setters - } - - public static class Usage { - public int promptTokens; - public int completionTokens; - public int totalTokens; - - // Getters and setters - } - - public CompletionReturn chatCompletionResponse(Messages messages, - GenerationConfig generationConfig) { - return chatCompletion(messages, generationConfig, false, false); - } - - /** - * chatCompletion formats the existing chat conversation into a template to be - * easier to process for chat UIs. It is not absolutely necessary as generate method - * may be directly used to make generations with gpt models. - * - * @param messages object to create theMessages to send to GPT model - * @param generationConfig How to decode/process the generation. - * @param streamToStdOut Send tokens as they are calculated Standard output. - * @param outputFullPromptToStdOut Should full prompt built out of messages be sent to Standard output. - * @return CompletionReturn contains stats and generated Text. - */ - public CompletionReturn chatCompletion(Messages messages, - GenerationConfig generationConfig, boolean streamToStdOut, - boolean outputFullPromptToStdOut) { - - String fullPrompt = buildPrompt(messages.toListMap()); - - if(outputFullPromptToStdOut) - System.out.print(fullPrompt); - - String generatedText = generate(fullPrompt, generationConfig, streamToStdOut); - - final CompletionChoice promptMessage = new CompletionChoice(Role.ASSISTANT, generatedText); - final Choices choices = new Choices(promptMessage); - - final Usage usage = getUsage(fullPrompt, generatedText); - return new CompletionReturn(this.modelName, usage, choices); - - } - - public ChatCompletionResponse chatCompletion(List> messages, - GenerationConfig generationConfig) { - return chatCompletion(messages, generationConfig, false, false); - } - - /** - * chatCompletion formats the existing chat conversation into a template to be - * easier to process for chat UIs. It is not absolutely necessary as generate method - * may be directly used to make generations with gpt models. - * - * @param messages List of Maps "role"->"user", "content"->"...", "role"-> "assistant"->"..." - * @param generationConfig How to decode/process the generation. - * @param streamToStdOut Send tokens as they are calculated Standard output. - * @param outputFullPromptToStdOut Should full prompt built out of messages be sent to Standard output. - * @return ChatCompletionResponse contains stats and generated Text. - */ - public ChatCompletionResponse chatCompletion(List> messages, - GenerationConfig generationConfig, boolean streamToStdOut, - boolean outputFullPromptToStdOut) { - String fullPrompt = buildPrompt(messages); - - if(outputFullPromptToStdOut) - System.out.print(fullPrompt); - - String generatedText = generate(fullPrompt, generationConfig, streamToStdOut); - - ChatCompletionResponse response = new ChatCompletionResponse(); - response.model = this.modelName; - - response.usage = getUsage(fullPrompt, generatedText); - - Map message = new HashMap<>(); - message.put("role", "assistant"); - message.put("content", generatedText); - - response.choices = List.of(message); - return response; - - } - - private Usage getUsage(String fullPrompt, String generatedText) { - Usage usage = new Usage(); - usage.promptTokens = fullPrompt.length(); - usage.completionTokens = generatedText.length(); - usage.totalTokens = fullPrompt.length() + generatedText.length(); - return usage; - } - - protected static String buildPrompt(List> messages) { - StringBuilder fullPrompt = new StringBuilder(); - - for (Map message : messages) { - if ("system".equals(message.get("role"))) { - String systemMessage = message.get("content") + "\n"; - fullPrompt.append(systemMessage); - } - } - - fullPrompt.append("### Instruction: \n" + - "The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.\n" + - "### Prompt: "); - - for (Map message : messages) { - if ("user".equals(message.get("role"))) { - String userMessage = "\n" + message.get("content"); - fullPrompt.append(userMessage); - } - if ("assistant".equals(message.get("role"))) { - String assistantMessage = "\n### Response: " + message.get("content"); - fullPrompt.append(assistantMessage); - } - } - - fullPrompt.append("\n### Response:"); - - return fullPrompt.toString(); - } - - @Override - public void close() throws Exception { - library.llmodel_model_destroy(model); - } - -} diff --git a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModelLibrary.java b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModelLibrary.java deleted file mode 100644 index d538a080..00000000 --- a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModelLibrary.java +++ /dev/null @@ -1,81 +0,0 @@ -package com.hexadevlabs.gpt4all; - -import jnr.ffi.Pointer; -import jnr.ffi.byref.PointerByReference; -import jnr.ffi.Struct; -import jnr.ffi.annotations.Delegate; -import jnr.ffi.annotations.Encoding; -import jnr.ffi.annotations.In; -import jnr.ffi.annotations.Out; -import jnr.ffi.types.u_int64_t; - - -/** - * The basic Native library interface the provides all the LLM functions. - */ -public interface LLModelLibrary { - - interface PromptCallback { - @Delegate - boolean invoke(int token_id); - } - - interface ResponseCallback { - @Delegate - boolean invoke(int token_id, Pointer response); - } - - interface RecalculateCallback { - @Delegate - boolean invoke(boolean is_recalculating); - } - - class LLModelError extends Struct { - public final Struct.AsciiStringRef message = new Struct.AsciiStringRef(); - public final int32_t status = new int32_t(); - public LLModelError(jnr.ffi.Runtime runtime) { - super(runtime); - } - } - - class LLModelPromptContext extends Struct { - public final Pointer logits = new Pointer(); - public final ssize_t logits_size = new ssize_t(); - public final Pointer tokens = new Pointer(); - public final ssize_t tokens_size = new ssize_t(); - public final int32_t n_past = new int32_t(); - public final int32_t n_ctx = new int32_t(); - public final int32_t n_predict = new int32_t(); - public final int32_t top_k = new int32_t(); - public final Float top_p = new Float(); - public final Float min_p = new Float(); - public final Float temp = new Float(); - public final int32_t n_batch = new int32_t(); - public final Float repeat_penalty = new Float(); - public final int32_t repeat_last_n = new int32_t(); - public final Float context_erase = new Float(); - - public LLModelPromptContext(jnr.ffi.Runtime runtime) { - super(runtime); - } - } - - Pointer llmodel_model_create2(String model_path, String build_variant, PointerByReference error); - void llmodel_model_destroy(Pointer model); - boolean llmodel_loadModel(Pointer model, String model_path, int n_ctx, int ngl); - boolean llmodel_isModelLoaded(Pointer model); - @u_int64_t long llmodel_get_state_size(Pointer model); - @u_int64_t long llmodel_save_state_data(Pointer model, Pointer dest); - @u_int64_t long llmodel_restore_state_data(Pointer model, Pointer src); - - void llmodel_set_implementation_search_path(String path); - - // ctx was an @Out ... without @Out crash - void llmodel_prompt(Pointer model, @Encoding("UTF-8") String prompt, - PromptCallback prompt_callback, - ResponseCallback response_callback, - RecalculateCallback recalculate_callback, - @In LLModelPromptContext ctx); - void llmodel_setThreadCount(Pointer model, int n_threads); - int llmodel_threadCount(Pointer model); -} diff --git a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/PromptIsTooLongException.java b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/PromptIsTooLongException.java deleted file mode 100644 index 82301696..00000000 --- a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/PromptIsTooLongException.java +++ /dev/null @@ -1,7 +0,0 @@ -package com.hexadevlabs.gpt4all; - -public class PromptIsTooLongException extends RuntimeException { - public PromptIsTooLongException(String message) { - super(message); - } -} diff --git a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/Util.java b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/Util.java deleted file mode 100644 index 9c50f9e7..00000000 --- a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/Util.java +++ /dev/null @@ -1,160 +0,0 @@ -package com.hexadevlabs.gpt4all; - -import jnr.ffi.LibraryLoader; -import jnr.ffi.LibraryOption; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.nio.charset.CharacterCodingException; -import java.nio.charset.CharsetDecoder; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class Util { - - private static final Logger logger = LoggerFactory.getLogger(Util.class); - private static final CharsetDecoder cs = StandardCharsets.UTF_8.newDecoder(); - - public static LLModelLibrary loadSharedLibrary(String librarySearchPath){ - String libraryName = "llmodel"; - Map libraryOptions = new HashMap<>(); - libraryOptions.put(LibraryOption.LoadNow, true); // load immediately instead of lazily (ie on first use) - libraryOptions.put(LibraryOption.IgnoreError, false); // calls shouldn't save last errno after call - - if(librarySearchPath!=null) { - Map> searchPaths = new HashMap<>(); - searchPaths.put(libraryName, List.of(librarySearchPath)); - - return LibraryLoader.loadLibrary(LLModelLibrary.class, - libraryOptions, - searchPaths, - libraryName - ); - }else { - - return LibraryLoader.loadLibrary(LLModelLibrary.class, - libraryOptions, - libraryName - ); - } - - } - - /** - * Copy over shared library files from resource package to - * target Temp directory. - * - * @return Path path to the temp directory holding the shared libraries - */ - public static Path copySharedLibraries() { - try { - // Identify the OS and architecture - String osName = System.getProperty("os.name").toLowerCase(); - boolean isWindows = osName.startsWith("windows"); - boolean isMac = osName.startsWith("mac os x"); - boolean isLinux = osName.startsWith("linux"); - if(isWindows) osName = "windows"; - if(isMac) osName = "macos"; - if(isLinux) osName = "linux"; - - //String osArch = System.getProperty("os.arch"); - - // Create a temporary directory - Path tempDirectory = Files.createTempDirectory("nativeLibraries"); - tempDirectory.toFile().deleteOnExit(); - - String[] libraryNames = { - "gptj-default", - "gptj-avxonly", - "llmodel", - "mpt-default", - "llamamodel-230511-default", - "llamamodel-230519-default", - "llamamodel-mainline-default", - "llamamodel-mainline-metal", - "replit-mainline-default", - "replit-mainline-metal", - "ggml-metal.metal", - "falcon-default" - }; - - for (String libraryName : libraryNames) { - - if(!isMac && ( - libraryName.equals("replit-mainline-metal") - || libraryName.equals("llamamodel-mainline-metal") - || libraryName.equals("ggml-metal.metal")) - ) continue; - - if(isWindows){ - libraryName = libraryName + ".dll"; - } else if(isMac){ - if(!libraryName.equals("ggml-metal.metal")) - libraryName = "lib" + libraryName + ".dylib"; - } else if(isLinux) { - libraryName = "lib"+ libraryName + ".so"; - } - - // Construct the resource path based on the OS and architecture - String nativeLibraryPath = "/native/" + osName + "/" + libraryName; - - // Get the library resource as a stream - InputStream in = Util.class.getResourceAsStream(nativeLibraryPath); - if (in == null) { - throw new RuntimeException("Unable to find native library: " + nativeLibraryPath); - } - - // Create a file in the temporary directory with the original library name - Path tempLibraryPath = tempDirectory.resolve(libraryName); - - // Use Files.copy to copy the library to the temporary file - Files.copy(in, tempLibraryPath, StandardCopyOption.REPLACE_EXISTING); - - // Close the input stream - in.close(); - } - - // Add shutdown hook to delete tempDir on JVM exit - // On Windows deleting dll files that are loaded into memory is not possible. - if(!isWindows) { - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - try { - Files.walk(tempDirectory) - .sorted(Comparator.reverseOrder()) - .map(Path::toFile) - .forEach(file -> { - try { - Files.delete(file.toPath()); - } catch (IOException e) { - logger.error("Deleting temp library file", e); - } - }); - } catch (IOException e) { - logger.error("Deleting temp directory for libraries", e); - } - })); - } - - return tempDirectory; - } catch (IOException e) { - throw new RuntimeException("Failed to load native libraries", e); - } - } - - public static String getValidUtf8(byte[] bytes) { - try { - return cs.decode(ByteBuffer.wrap(bytes)).toString(); - } catch (CharacterCodingException e) { - return null; - } - } -} diff --git a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/BasicTests.java b/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/BasicTests.java deleted file mode 100644 index 6f2be894..00000000 --- a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/BasicTests.java +++ /dev/null @@ -1,182 +0,0 @@ -package com.hexadevlabs.gpt4all; - - -import jnr.ffi.Memory; -import jnr.ffi.Pointer; -import jnr.ffi.Runtime; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mockito; - -import org.mockito.junit.jupiter.MockitoExtension; - - -import java.io.ByteArrayOutputStream; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.Map; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.*; - -/** - * These tests only test the Java implementation as the underlying backend can't be mocked. - * These tests do serve the purpose of validating the java bits that do - * not directly have to do with the function of the underlying gp4all library. - */ -@ExtendWith(MockitoExtension.class) -public class BasicTests { - - @Test - public void simplePromptWithObject(){ - - LLModel model = Mockito.spy(new LLModel()); - - LLModel.GenerationConfig config = - LLModel.config() - .withNPredict(20) - .build(); - - // The generate method will return "4" - doReturn("4").when( model ).generate(anyString(), eq(config), eq(true)); - - LLModel.PromptMessage promptMessage1 = new LLModel.PromptMessage(LLModel.Role.SYSTEM, "You are a helpful assistant"); - LLModel.PromptMessage promptMessage2 = new LLModel.PromptMessage(LLModel.Role.USER, "Add 2+2"); - - LLModel.Messages messages = new LLModel.Messages(promptMessage1, promptMessage2); - - LLModel.CompletionReturn response = model.chatCompletion( - messages, config, true, true); - - assertTrue( response.choices().first().content().contains("4") ); - - // Verifies the prompt and response are certain length. - assertEquals( 224 , response.usage().totalTokens ); - } - - @Test - public void simplePrompt(){ - - LLModel model = Mockito.spy(new LLModel()); - - LLModel.GenerationConfig config = - LLModel.config() - .withNPredict(20) - .build(); - - // The generate method will return "4" - doReturn("4").when( model ).generate(anyString(), eq(config), eq(true)); - - LLModel.ChatCompletionResponse response= model.chatCompletion( - List.of(Map.of("role", "system", "content", "You are a helpful assistant"), - Map.of("role", "user", "content", "Add 2+2")), config, true, true); - - assertTrue( response.choices.get(0).get("content").contains("4") ); - - // Verifies the prompt and response are certain length. - assertEquals( 224 , response.usage.totalTokens ); - } - - @Test - public void testResponseCallback(){ - - ByteArrayOutputStream bufferingForStdOutStream = new ByteArrayOutputStream(); - ByteArrayOutputStream bufferingForWholeGeneration = new ByteArrayOutputStream(); - - LLModelLibrary.ResponseCallback responseCallback = LLModel.getResponseCallback(false, bufferingForStdOutStream, bufferingForWholeGeneration); - - // Get the runtime instance - Runtime runtime = Runtime.getSystemRuntime(); - - // Allocate memory for the byte array. Has to be null terminated - - // UTF-8 Encoding of the character: 0xF0 0x9F 0x92 0xA9 - byte[] utf8ByteArray = {(byte) 0xF0, (byte) 0x9F, (byte) 0x92, (byte) 0xA9, 0x00}; // Adding null termination - - // Optional: Converting the byte array back to a String to print the character - String decodedString = new String(utf8ByteArray, 0, utf8ByteArray.length - 1, java.nio.charset.StandardCharsets.UTF_8); - - Pointer pointer = Memory.allocateDirect(runtime, utf8ByteArray.length); - - // Copy the byte array to the allocated memory - pointer.put(0, utf8ByteArray, 0, utf8ByteArray.length); - - responseCallback.invoke(1, pointer); - - String result = bufferingForWholeGeneration.toString(StandardCharsets.UTF_8); - - assertEquals(decodedString, result); - - } - - @Test - public void testResponseCallbackTwoTokens(){ - - ByteArrayOutputStream bufferingForStdOutStream = new ByteArrayOutputStream(); - ByteArrayOutputStream bufferingForWholeGeneration = new ByteArrayOutputStream(); - - LLModelLibrary.ResponseCallback responseCallback = LLModel.getResponseCallback(false, bufferingForStdOutStream, bufferingForWholeGeneration); - - // Get the runtime instance - Runtime runtime = Runtime.getSystemRuntime(); - - // Allocate memory for the byte array. Has to be null terminated - - // UTF-8 Encoding of the character: 0xF0 0x9F 0x92 0xA9 - byte[] utf8ByteArray = { (byte) 0xF0, (byte) 0x9F, 0x00}; // Adding null termination - byte[] utf8ByteArray2 = { (byte) 0x92, (byte) 0xA9, 0x00}; // Adding null termination - - // Optional: Converting the byte array back to a String to print the character - Pointer pointer = Memory.allocateDirect(runtime, utf8ByteArray.length); - - // Copy the byte array to the allocated memory - pointer.put(0, utf8ByteArray, 0, utf8ByteArray.length); - - responseCallback.invoke(1, pointer); - // Copy the byte array to the allocated memory - pointer.put(0, utf8ByteArray2, 0, utf8ByteArray2.length); - - responseCallback.invoke(2, pointer); - - String result = bufferingForWholeGeneration.toString(StandardCharsets.UTF_8); - - assertEquals("\uD83D\uDCA9", result); - - } - - - @Test - public void testResponseCallbackExpectError(){ - - ByteArrayOutputStream bufferingForStdOutStream = new ByteArrayOutputStream(); - ByteArrayOutputStream bufferingForWholeGeneration = new ByteArrayOutputStream(); - - LLModelLibrary.ResponseCallback responseCallback = LLModel.getResponseCallback(false, bufferingForStdOutStream, bufferingForWholeGeneration); - - // Get the runtime instance - Runtime runtime = Runtime.getSystemRuntime(); - - // UTF-8 Encoding of the character: 0xF0 0x9F 0x92 0xA9 - byte[] utf8ByteArray = {(byte) 0xF0, (byte) 0x9F, (byte) 0x92, (byte) 0xA9}; // No null termination - - Pointer pointer = Memory.allocateDirect(runtime, utf8ByteArray.length); - - // Copy the byte array to the allocated memory - pointer.put(0, utf8ByteArray, 0, utf8ByteArray.length); - - Exception exception = assertThrows(RuntimeException.class, () -> responseCallback.invoke(1, pointer)); - - assertEquals("Empty array or not null terminated", exception.getMessage()); - - // With empty array - utf8ByteArray = new byte[0]; - pointer.put(0, utf8ByteArray, 0, utf8ByteArray.length); - - Exception exceptionN = assertThrows(RuntimeException.class, () -> responseCallback.invoke(1, pointer)); - - assertEquals("Empty array or not null terminated", exceptionN.getMessage()); - - } - -} diff --git a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example1.java b/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example1.java deleted file mode 100644 index e8925a1f..00000000 --- a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example1.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.hexadevlabs.gpt4all; - -import java.nio.file.Path; -import java.util.List; -import java.util.Map; - -/** - * GPTJ chat completion, multiple messages - */ -public class Example1 { - public static void main(String[] args) { - - // Optionally in case override to location of shared libraries is necessary - //LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\"; - - try ( LLModel gptjModel = new LLModel(Path.of("C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\ggml-gpt4all-j-v1.3-groovy.bin")) ){ - - LLModel.GenerationConfig config = LLModel.config() - .withNPredict(4096).build(); - - gptjModel.chatCompletion( - List.of(Map.of("role", "user", "content", "Add 2+2"), - Map.of("role", "assistant", "content", "4"), - Map.of("role", "user", "content", "Multiply 4 * 5")), config, true, true); - - } catch (Exception e) { - throw new RuntimeException(e); - } - } -} \ No newline at end of file diff --git a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example2.java b/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example2.java deleted file mode 100644 index 35719d15..00000000 --- a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example2.java +++ /dev/null @@ -1,31 +0,0 @@ -package com.hexadevlabs.gpt4all; - -import java.nio.file.Path; - -/** - * Generation with MPT model - */ -public class Example2 { - public static void main(String[] args) { - - String prompt = "### Human:\nWhat is the meaning of life\n### Assistant:"; - - // Optionally in case override to location of shared libraries is necessary - //LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\"; - - try (LLModel mptModel = new LLModel(Path.of("C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\ggml-mpt-7b-instruct.bin"))) { - - LLModel.GenerationConfig config = - LLModel.config() - .withNPredict(4096) - .withRepeatLastN(64) - .build(); - - mptModel.generate(prompt, config, true); - - } catch (Exception e) { - throw new RuntimeException(e); - } - } - -} diff --git a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example3.java b/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example3.java deleted file mode 100644 index fd842cdc..00000000 --- a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example3.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.hexadevlabs.gpt4all; - -import jnr.ffi.LibraryLoader; - -import java.nio.file.Path; -import java.util.List; -import java.util.Map; - -/** - * GPTJ chat completion with system message - */ -public class Example3 { - public static void main(String[] args) { - - // Optionally in case override to location of shared libraries is necessary - //LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\"; - - try ( LLModel gptjModel = new LLModel(Path.of("C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\ggml-gpt4all-j-v1.3-groovy.bin")) ){ - - LLModel.GenerationConfig config = LLModel.config() - .withNPredict(4096).build(); - - // String result = gptjModel.generate(prompt, config, true); - gptjModel.chatCompletion( - List.of(Map.of("role", "system", "content", "You are a helpful assistant"), - Map.of("role", "user", "content", "Add 2+2")), config, true, true); - - - } catch (Exception e) { - throw new RuntimeException(e); - } - } -} diff --git a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example4.java b/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example4.java deleted file mode 100644 index f8fc0029..00000000 --- a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example4.java +++ /dev/null @@ -1,43 +0,0 @@ -package com.hexadevlabs.gpt4all; - -import java.nio.file.Path; - -public class Example4 { - - public static void main(String[] args) { - - String prompt = "### Human:\nWhat is the meaning of life\n### Assistant:"; - // The emoji is poop emoji. The Unicode character is encoded as surrogate pair for Java string. - // LLM should correctly identify it as poop emoji in the description - //String prompt = "### Human:\nDescribe the meaning of this emoji \uD83D\uDCA9\n### Assistant:"; - //String prompt = "### Human:\nOutput the unicode character of smiley face emoji\n### Assistant:"; - - // Optionally in case override to location of shared libraries is necessary - //LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\"; - - String model = "ggml-vicuna-7b-1.1-q4_2.bin"; - //String model = "ggml-gpt4all-j-v1.3-groovy.bin"; - //String model = "ggml-mpt-7b-instruct.bin"; - String basePath = "C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\"; - //String basePath = "/Users/fzaslavs/Library/Application Support/nomic.ai/GPT4All/"; - - try (LLModel mptModel = new LLModel(Path.of(basePath + model))) { - - LLModel.GenerationConfig config = - LLModel.config() - .withNPredict(4096) - .withRepeatLastN(64) - .build(); - - - String result = mptModel.generate(prompt, config, true); - - System.out.println("Code points:"); - result.codePoints().forEach(System.out::println); - - - } catch (Exception e) { - throw new RuntimeException(e); - } - } -} diff --git a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example5.java b/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example5.java deleted file mode 100644 index a2045cd0..00000000 --- a/gpt4all-bindings/java/src/test/java/com/hexadevlabs/gpt4all/Example5.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.hexadevlabs.gpt4all; - -import java.nio.file.Path; - -public class Example5 { - - public static void main(String[] args) { - - // String prompt = "### Human:\nWhat is the meaning of life\n### Assistant:"; - // The emoji is poop emoji. The Unicode character is encoded as surrogate pair for Java string. - // LLM should correctly identify it as poop emoji in the description - //String prompt = "### Human:\nDescribe the meaning of this emoji \uD83D\uDCA9\n### Assistant:"; - //String prompt = "### Human:\nOutput the unicode character of smiley face emoji\n### Assistant:"; - - // Optionally in case override to location of shared libraries is necessary - //LLModel.LIBRARY_SEARCH_PATH = "C:\\Users\\felix\\gpt4all\\lib\\"; - StringBuffer b = new StringBuffer(); - b.append("The ".repeat(2060)); - String prompt = b.toString(); - - - String model = "ggml-vicuna-7b-1.1-q4_2.bin"; - //String model = "ggml-gpt4all-j-v1.3-groovy.bin"; - //String model = "ggml-mpt-7b-instruct.bin"; - String basePath = "C:\\Users\\felix\\AppData\\Local\\nomic.ai\\GPT4All\\"; - //String basePath = "/Users/fzaslavs/Library/Application Support/nomic.ai/GPT4All/"; - - try (LLModel mptModel = new LLModel(Path.of(basePath + model))) { - - LLModel.GenerationConfig config = - LLModel.config() - .withNPredict(4096) - .withRepeatLastN(64) - .build(); - - String result = mptModel.generate(prompt, config, true); - - System.out.println("Code points:"); - result.codePoints().forEach(System.out::println); - - - } catch (Exception e) { - System.out.println(e.getMessage()); - throw new RuntimeException(e); - } - } -}