mirror of
https://github.com/google/fonts.git
synced 2024-11-28 11:00:21 +03:00
Dropped hyperglot dependency due to licensing.
- Removed SupportedLanguages method as it relies on hyperglot, which is under the GPLv3, to keep gftranslate under Apache 2.0 - The method is still available as a separate code-snippet (not part of gflanguages itself), and any program using that snippet will need to comply with the GPLv3. - For more details, see: https://github.com/googlefonts/fontbakery/pull/3617#issuecomment-1044898812
This commit is contained in:
parent
30c13b8637
commit
75e6907c9c
@ -1,5 +1,13 @@
|
||||
Below are the most important changes from each release.
|
||||
|
||||
## 0.2.0 (2022-Feb-18)
|
||||
### Release notes
|
||||
- dropped hyperglot dependency due to licensing. See below.
|
||||
- Removed SupportedLanguages method as it relies on hyperglot, which is under the GPLv3, to keep gftranslate under Apache 2.0
|
||||
- The method is still available as a separate code-snippet (not part of gflanguages itself), and any program using that snippet will need to comply with the GPLv3.
|
||||
- For more details, see: https://github.com/googlefonts/fontbakery/pull/3617#issuecomment-1044898812
|
||||
|
||||
|
||||
## 0.1.1 (2022-Feb-18)
|
||||
### Bugfix
|
||||
- updated and simplified the textproto definition to workaround this kind of problem when using the module on projects that also import `fonts_public_pb2.py`: https://github.com/protocolbuffers/protobuf/issues/3002
|
||||
|
@ -23,9 +23,7 @@ data on the Google Fonts collection.
|
||||
import glob
|
||||
import os
|
||||
|
||||
from fontTools.ttLib import TTFont
|
||||
from google.protobuf import text_format
|
||||
from hyperglot import parse as hyperglot_parse
|
||||
from pkg_resources import resource_filename
|
||||
|
||||
from gflanguages import languages_public_pb2
|
||||
@ -33,19 +31,6 @@ from gflanguages import languages_public_pb2
|
||||
DATA_DIR = resource_filename("gflanguages", "data")
|
||||
|
||||
|
||||
def _ParseFontChars(path):
|
||||
"""
|
||||
Open the provided font path and extract the codepoints encoded in the font
|
||||
@return list of characters
|
||||
"""
|
||||
font = TTFont(path, lazy=True)
|
||||
cmap = font["cmap"].getBestCmap()
|
||||
font.close()
|
||||
|
||||
# The cmap keys are int codepoints
|
||||
return [chr(c) for c in cmap.keys()]
|
||||
|
||||
|
||||
def LoadLanguages(languages_dir=None):
|
||||
if languages_dir is None:
|
||||
languages_dir = os.path.join(DATA_DIR, 'languages')
|
||||
@ -80,33 +65,3 @@ def LoadRegions(regions_dir=None):
|
||||
region = text_format.Parse(f.read(), languages_public_pb2.RegionProto())
|
||||
regions[region.id] = region
|
||||
return regions
|
||||
|
||||
|
||||
def SupportedLanguages(font_path, languages=None):
|
||||
"""
|
||||
Get languages supported by given font file.
|
||||
|
||||
Languages are pulled from the given set. Based on whether exemplar character
|
||||
sets are present in the given font.
|
||||
|
||||
Logic based on Hyperglot:
|
||||
https://github.com/rosettatype/hyperglot/blob/3172061ca05a62c0ff330eb802a17d4fad8b1a4d/lib/hyperglot/language.py#L273-L301
|
||||
"""
|
||||
if languages is None:
|
||||
languages = LoadLanguages()
|
||||
|
||||
chars = _ParseFontChars(font_path)
|
||||
|
||||
supported = []
|
||||
for lang in languages.values():
|
||||
if not lang.HasField('exemplar_chars') or \
|
||||
not lang.exemplar_chars.HasField('base'):
|
||||
continue
|
||||
|
||||
base = hyperglot_parse.parse_chars(lang.exemplar_chars.base,
|
||||
decompose=False,
|
||||
retainDecomposed=False)
|
||||
if set(base).issubset(chars):
|
||||
supported.append(lang)
|
||||
|
||||
return supported
|
||||
|
@ -1,5 +1,3 @@
|
||||
--index-url https://pypi.python.org/simple/
|
||||
fontTools==4.29.1
|
||||
hyperglot==0.3.7
|
||||
protobuf==3.19.4
|
||||
-e .
|
||||
|
2
setup.py
2
setup.py
@ -53,8 +53,6 @@ setup(
|
||||
python_requires=">=3.7",
|
||||
setup_requires=['setuptools_scm>=4,<6.1'],
|
||||
install_requires=[
|
||||
'FontTools',
|
||||
'hyperglot',
|
||||
'protobuf',
|
||||
'setuptools'
|
||||
]
|
||||
|
87
snippets/supported_languages.py
Normal file
87
snippets/supported_languages.py
Normal file
@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2022 The Google Fonts Tools Authors.
|
||||
# Copyright 2017,2022 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS-IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# =======================================================================
|
||||
# ======= This code-snippet uses hyperglot, which is licensed =======
|
||||
# ======= under the GNU GPLv3. So, the resulting license for =======
|
||||
# ======= any program using this snippet will also have to be =======
|
||||
# ======= the GNU GPLv3. =======
|
||||
# =======================================================================
|
||||
|
||||
from gflanguages.lang_support import LoadLanguages
|
||||
from hyperglot import parse as hyperglot_parse
|
||||
|
||||
|
||||
def _ParseFontChars(path):
|
||||
"""
|
||||
Open the provided font path and extract the codepoints encoded in the font
|
||||
@return list of characters
|
||||
"""
|
||||
from fontTools.ttLib import TTFont
|
||||
font = TTFont(path, lazy=True)
|
||||
cmap = font["cmap"].getBestCmap()
|
||||
font.close()
|
||||
|
||||
# The cmap keys are int codepoints
|
||||
return [chr(c) for c in cmap.keys()]
|
||||
|
||||
|
||||
def SupportedLanguages(font_path, languages=None):
|
||||
"""
|
||||
Get languages supported by given font file.
|
||||
|
||||
Languages are pulled from the given set. Based on whether exemplar character
|
||||
sets are present in the given font.
|
||||
|
||||
Logic based on Hyperglot:
|
||||
https://github.com/rosettatype/hyperglot/blob/3172061ca05a62c0ff330eb802a17d4fad8b1a4d/lib/hyperglot/language.py#L273-L301
|
||||
"""
|
||||
if languages is None:
|
||||
languages = LoadLanguages()
|
||||
|
||||
chars = _ParseFontChars(font_path)
|
||||
|
||||
supported = []
|
||||
for lang in languages.values():
|
||||
if not lang.HasField('exemplar_chars') or \
|
||||
not lang.exemplar_chars.HasField('base'):
|
||||
continue
|
||||
|
||||
base = hyperglot_parse.parse_chars(lang.exemplar_chars.base,
|
||||
decompose=False,
|
||||
retainDecomposed=False)
|
||||
if set(base).issubset(chars):
|
||||
supported.append(lang)
|
||||
|
||||
return supported
|
||||
|
||||
|
||||
def portable_path(p):
|
||||
import os
|
||||
return os.path.join(*p.split('/'))
|
||||
|
||||
|
||||
def TEST_FILE(f):
|
||||
return portable_path("data/test/" + f)
|
||||
|
||||
|
||||
def test_SupportedLanguages():
|
||||
font = TEST_FILE('nunito/Nunito-Regular.ttf')
|
||||
supported = SupportedLanguages(font)
|
||||
langs = [supported[i].name for i, _ in enumerate(supported)]
|
||||
assert len(langs) == 225
|
||||
assert 'Lithuanian' in langs
|
@ -1,15 +1,6 @@
|
||||
import os
|
||||
from gflanguages import lang_support
|
||||
|
||||
|
||||
def portable_path(p):
|
||||
return os.path.join(*p.split('/'))
|
||||
|
||||
|
||||
def TEST_FILE(f):
|
||||
return portable_path("data/test/" + f)
|
||||
|
||||
|
||||
def test_LoadLanguages():
|
||||
langs = lang_support.LoadLanguages()
|
||||
numerals = langs["yi_Hebr"].exemplar_chars.numerals
|
||||
@ -26,11 +17,3 @@ def test_LoadRegions():
|
||||
br = regions["BR"]
|
||||
assert br.name == 'Brazil'
|
||||
assert br.region_group == ['Americas']
|
||||
|
||||
|
||||
def test_SupportedLanguages():
|
||||
font = TEST_FILE('nunito/Nunito-Regular.ttf')
|
||||
supported = lang_support.SupportedLanguages(font)
|
||||
langs = [supported[i].name for i, _ in enumerate(supported)]
|
||||
assert len(langs) == 225
|
||||
assert 'Lithuanian' in langs
|
||||
|
Loading…
Reference in New Issue
Block a user