1
1
mirror of https://github.com/rsms/inter.git synced 2024-08-16 22:30:53 +03:00

Initial public commit

This commit is contained in:
Rasmus Andersson 2017-08-22 00:05:20 -07:00
commit 3b1fffade1
6648 changed files with 363948 additions and 0 deletions

2
.gitattributes vendored Normal file
View File

@ -0,0 +1,2 @@
# Use CRLF for line endings in the Windows install "readme" file
misc/doc/install-win.txt text eol=crlf

19
.gitignore vendored Normal file
View File

@ -0,0 +1,19 @@
*.pyc
*.pyo
*.ttx
*.o
*.d
*.core
*.obj
*.exe
*.patch
*.diff
_*.ignore
*~
.DS_Store
*.sparseimage
build
/_*
src/FontInspector.html

92
LICENSE.txt Normal file
View File

@ -0,0 +1,92 @@
Copyright (c) 2017 The Interface Project Authors (me@rsms.me)
This Font Software is licensed under the SIL Open Font License, Version 1.1.
This license is copied below, and is also available with a FAQ at:
http://scripts.sil.org/OFL
-----------------------------------------------------------
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
-----------------------------------------------------------
PREAMBLE
The goals of the Open Font License (OFL) are to stimulate worldwide
development of collaborative font projects, to support the font creation
efforts of academic and linguistic communities, and to provide a free and
open framework in which fonts may be shared and improved in partnership
with others.
The OFL allows the licensed fonts to be used, studied, modified and
redistributed freely as long as they are not sold by themselves. The
fonts, including any derivative works, can be bundled, embedded,
redistributed and/or sold with any software provided that any reserved
names are not used by derivative works. The fonts and derivatives,
however, cannot be released under any other type of license. The
requirement for fonts to remain under this license does not apply
to any document created using the fonts or their derivatives.
DEFINITIONS
"Font Software" refers to the set of files released by the Copyright
Holder(s) under this license and clearly marked as such. This may
include source files, build scripts and documentation.
"Reserved Font Name" refers to any names specified as such after the
copyright statement(s).
"Original Version" refers to the collection of Font Software components as
distributed by the Copyright Holder(s).
"Modified Version" refers to any derivative made by adding to, deleting,
or substituting -- in part or in whole -- any of the components of the
Original Version, by changing formats or by porting the Font Software to a
new environment.
"Author" refers to any designer, engineer, programmer, technical
writer or other person who contributed to the Font Software.
PERMISSION AND CONDITIONS
Permission is hereby granted, free of charge, to any person obtaining
a copy of the Font Software, to use, study, copy, merge, embed, modify,
redistribute, and sell modified and unmodified copies of the Font
Software, subject to the following conditions:
1) Neither the Font Software nor any of its individual components,
in Original or Modified Versions, may be sold by itself.
2) Original or Modified Versions of the Font Software may be bundled,
redistributed and/or sold with any software, provided that each copy
contains the above copyright notice and this license. These can be
included either as stand-alone text files, human-readable headers or
in the appropriate machine-readable metadata fields within text or
binary files as long as those fields can be easily viewed by the user.
3) No Modified Version of the Font Software may use the Reserved Font
Name(s) unless explicit written permission is granted by the corresponding
Copyright Holder. This restriction only applies to the primary font name as
presented to the users.
4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
Software shall not be used to promote, endorse or advertise any
Modified Version, except to acknowledge the contribution(s) of the
Copyright Holder(s) and the Author(s) or with their explicit written
permission.
5) The Font Software, modified or unmodified, in part or in whole,
must be distributed entirely under this license, and must not be
distributed under any other license. The requirement for fonts to
remain under this license does not apply to any document created
using the Font Software.
TERMINATION
This license becomes null and void if any of the above conditions are
not met.
DISCLAIMER
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
OTHER DEALINGS IN THE FONT SOFTWARE.

106
Makefile Normal file
View File

@ -0,0 +1,106 @@
# Targets:
# all Build all styles in all formats (default)
# all_ttf Build all styles as TrueType
# STYLE Build STYLE in all formats (e.g. MediumItalic)
# STYLE_ttf Build STYLE as TrueType (e.g. MediumItalic_ttf)
# zip Build all styles as TrueType and package into a zip archive
#
all: all_web all_otf
# generated.make is automatically generated by init.sh and defines depenencies for
# all styles and alias targets
include build/etc/generated.make
res_files := src/fontbuild.cfg src/diacritics.txt src/glyphlist.txt src/glyphorder.txt
# UFO -> TTF & OTF (note that UFO deps are defined by generated.make)
build/tmp/InterfaceTTF/Interface-%.ttf: $(res_files)
misc/ufocompile --otf $*
build/tmp/InterfaceOTF/Interface-%.otf: build/tmp/InterfaceTTF/Interface-%.ttf $(res_files)
@true
# build/tmp/ttf -> build (generated.make handles build/tmp/InterfaceTTF/Interface-%.ttf)
build/dist-unhinted/Interface-%.ttf: build/tmp/InterfaceTTF/Interface-%.ttf
@mkdir -p build/dist-unhinted
cp -a "$<" "$@"
# OTF
build/dist-unhinted/Interface-%.otf: build/tmp/InterfaceOTF/Interface-%.otf
cp -a "$<" "$@"
build/dist:
@mkdir -p build/dist
# autohint
build/dist/Interface-%.ttf: build/dist-unhinted/Interface-%.ttf build/dist
ttfautohint \
--hinting-limit=256 \
--hinting-range-min=8 \
--hinting-range-max=64 \
--fallback-stem-width=256 \
--strong-stem-width=D \
--no-info \
--verbose \
"$<" "$@"
# TTF -> WOFF2
build/%.woff2: build/%.ttf
woff2_compress "$<"
# TTF -> WOFF
build/%.woff: build/%.ttf
ttf2woff -O -t woff "$<" "$@"
# TTF -> EOT (disabled)
# build/%.eot: build/%.ttf
# ttf2eot "$<" > "$@"
# TTF -> zip
zip: all
@rm -rf build/.zip
@rm -f build/.zip.zip
@mkdir -p \
"build/.zip/Interface (web)" \
"build/.zip/Interface (hinted TTF)" \
"build/.zip/Interface (TTF)" \
"build/.zip/Interface (OTF)"
cp -a build/dist/*.woff build/dist/*.woff2 "build/.zip/Interface (web)/"
cp -a build/dist/*.ttf "build/.zip/Interface (hinted TTF)/"
cp -a build/dist-unhinted/*.ttf "build/.zip/Interface (TTF)/"
cp -a build/dist-unhinted/*.otf "build/.zip/Interface (OTF)/"
cp -a misc/doc/install-*.txt "build/.zip/"
cd build/.zip && zip -v -X -r "../../build/.zip.zip" *
@mkdir -p build/release
@mv -f build/.zip.zip build/release/Interface-`date '+%Y%m%d'`.zip
@echo write build/release/Interface-`date '+%Y%m%d'`.zip
@rm -rf build/.zip
install_ttf: all_ttf
@echo "Installing TTF files locally at ~/Library/Fonts/Interface"
rm -rf ~/Library/Fonts/Interface
mkdir -p ~/Library/Fonts/Interface
cp -va build/dist/*.ttf ~/Library/Fonts/Interface
install_otf: all_otf
@echo "Installing OTF files locally at ~/Library/Fonts/Interface"
rm -rf ~/Library/Fonts/Interface
mkdir -p ~/Library/Fonts/Interface
cp -va build/dist-unhinted/*.otf ~/Library/Fonts/Interface
install: all install_otf
glyphinfo: _local/UnicodeData.txt
misc/gen-glyphinfo.py -ucd _local/UnicodeData.txt \
src/Interface-*.ufo > misc/preview/glyphinfo.json
# Download latest Unicode data
_local/UnicodeData.txt:
@mkdir -p _local
curl -s '-#' -o "$@" \
http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
clean:
rm -vrf build/tmp/* build/dist/Interface-*.*
.PHONY: all web clean install install_otf install_ttf deploy zip glyphinfo

207
README.md Normal file
View File

@ -0,0 +1,207 @@
# Interface
Interface is a typeface specially designed for user interfaces, with excellent ligibility at small sizes.
![Sample](docs/res/sample.png)
### [⬇︎ Download the latest release](https://github.com/rsms/interface/releases)
After downloading the zip from above:
1. Double-click the downloaded zip file to unpack or open it.
2. Follow the instructions in "install-mac.txt" or "install-win.txt", depending
on what operating system you're using.
## Design
Interface is similar to Roboto, San Francisco, Akkurat, Asap, Lucida Grande and other "UI" typefaces. Some trade-offs were made in order to make this typeface work really well at small sizes:
- Currently not suitable for very large sizes because of some small-scale glyph optimizations (like "pits" and "traps") that help rasterization at small sizes but stand out and interfere at large sizes.
- Rasterized at sizes below 12px, some stems—like the horizontal center of "E", "F", or vertical center of "m"—are drawn with two semi-opaque pixels instead of one solid. This is because we "prioritize" (optimize for) higher-denisty rasterizations. If we move these stems to an off-center position—so that they can be drawn sharply at e.g. 11px—text will be less legible at higher resolutions.
Current font styles:
- Regular — master
- Italic
- Bold — master
- BoldItalic
- Medium — derived from Regular and Bold by mixing
- MediumItalic
- Black — derived from Regular and Bold by mixing
- BlackItalic
Future versions will hopefully include lighter weights.
### Font metrics
This font was originally designed to work at a specific size: 11px. Thus, the Units per [EM](https://en.wikipedia.org/wiki/Em_(typography)) (UPM) is defined in such a way that a power-of-two multiple of one EM unit ends up at an integer value compared to a pixel. Most fonts are designed with a UPM of either 1000 or 2048. Because of this we picked a value that is as high as possible but also as close as possible to one of those common values (since it's reasonable to assume that some layout engines and rasterizers are optimized for those value magnitudes.) We ended up picking a UPM of 2816 which equates to exactly 256 units per pixel when rasterized for size 11pt at 1x scale. This also means that when rasterized at power-of-two scales (like 2x and 4x) the number of EM units corresponding to a pixel is an integer (128 units for 2x, 64 for 4x, and so on.)
However, as the project progressed and the typeface was put into use, it quickly
bacame clear that for anything longer than a short word, it was actually hard to
read the almost monotonically-spaced letters.
A second major revision was create where the previously-strict rule of geometry being even multiples of 256 was relaxed and now the rule is "try to stick with 128x, if you can't, stick with 64x and if you can't do that either, never go below 16x." This means that Interface is now much more variable in pace than it used to be, making it work better at higher resolutions and work much better in longer text, but losing some contrast and sharpness at small sizes.
![Metrics](docs/res/metrics.png)
The glyphs are designed based on this "plan"; most stems and lines will be positioned at EM units that are even multiples of 128, and in a few cases they are at even multiples of 64 or as low as 16.
Metrics:
- UPM: 2816
- Descender: -640
- x-height: 1536
- Cap height: 2048
- Ascender: 2688
Translating between EM units and pixels:
- Rasterized at 11px: 1px = 256 units
- Rasterized at 22px: 1px = 128 units
- Rasterized at 44px: 1px = 64 units
There's a Figma workspace for glyphs, with configured metrics: ["Interface glyphs"](https://www.figma.com/file/RtScFU5NETY3j9E0yOmnW4gv/Interface-glyphs)
## Contributing
By contributing work to the Interface font project you agree to have all work
contributed becoming the intellectual property of the Interface font project as
described by [SIL Open Font License, Version 1.1](http://scripts.sil.org/OFL)
### Building
Prerequisites:
- Python 2.7 with pip (you get pip with `brew install python`)
- [virtualenv](https://virtualenv.pypa.io/)
```
$ ./init.sh
```
This will generate makefile support, dependencies required by the toolchain, etc.
At the end, the script prints instructions for how to activate `virtualenv`.
As a convenience, you can also source init.sh to activate virtualenv.
We can now run `make` to build all font files:
```
$ make
```
Or just specific styles:
```
$ make Regular BoldItalic
```
Or all fonts but only TrueType format (no web file formats):
```
$ make all_ttf
```
Or just specific styles and formats:
```
# Regular in all formats, BoldItalic in only TrueType format
$ make Regular BoldItalic_ttf
```
You can also specify specific style + file format to `make` through `build/Interface-STYLE.FORMAT`.
E.g.
- `make build/Interface-MediumItalic.eot`
- `make build/Interface-Bold.woff2`
- `make build/Interface-Regular.ttf`
- `...`
All resulting font files are written to the `build` directory with `Interface-` as the filename prefix.
Note: Making all files takes a considerable amount of time.
It's a CPU and I/O intensive task to compile the fonts and so the build system has been setup to
be able to run many jobs in parallel. Therefore it's recommended to pass the [`-j` flag to make](https://www.gnu.org/software/make/manual/html_node/Parallel.html) and
optionally pipe the fairly verbose output to /dev/null, e.g. `make -j 8 >/dev/null`.
### Editing
This font is stored and authored in the [Unified Font Object (UFO)](http://unifiedfontobject.org/) file format and can be edited by many different software, some free. However, it's only been "tested" with [RoboFont](http://robofont.com/) which is a popular commercial font editor. There's a 30 day fully-functional free trial version of the app, so you can use it for smaller contributions without needing to buy a RoboFont license.
To make life easier for you, configure RoboFont's settings like this:
- Set the grid to 128 units. This means that each grid square equals one pixel at 2x scale.
- Set "Snap points to" to a reasonably high number that's a power-of-two, like 8.
- Set "SHIFT increment" to 16
- Set "CMD SHIFT increment" to 128
When you've made an edit, simply save your changes and run make:
```
$ make
```
*For quick turnaround, consider:*
- Build and test only the "Regular" style.
- Use `misc/notify` to get desktop notifications on builds so that you don't have to sit and wait looking at the terminal while it's building.
E.g. `misc/notify make Regular`
See ["Building"](#Building) for more details.
### Preview & debug
This project comes with a simple web-based application for debugging and
previewing the font. It's a very useful tool to have when working on the font.
- Comes with a large body of sample text data (which is also editable.)
- Provides samples of the most common latin-script pairs, useful for kerning.
- Provides samples of words ordered by commonality in latin scripts with a
preference for English (accessible via common-pair samples.)
- Can show the complete repertoire of the fonts, with correct glyph order and
even RoboFont color labels ("marks").
- Controls for basic font properties like family, weight, italic, size,
line-height, letter-spacing, etc.
- Controls for a lot of font features like ligature sets, contextual alternates,
alternate numerics, etc.
- Controls for web-browser text features like `captialize`, `uppercase`,
`lowercase`, etc.
- Ability to compare Interface side-by-side with other fonts.
![Preview app screenshot](docs/res/preview-app.png)
The following will start a local web server (which is only accessable from your local computer; not the internet) that serves the debug-and-preview app:
```
$ docs/serve.sh &
```
You can now visit `http://localhost:2015/lab/`.
After you rebuild some font files, reload the web page to refresh fonts.
## FAQ
> Do I need RoboFont?
No, you don't. To build font files, all you need is Python. To edit the font files, you need something that can edit UFO files (like [RoboFont](http://robofont.com/) or a text editor.)
> `KeyError: 'Lj'` when building
This probably means that you need to run `./init.sh` to setup the case-sensitive virtual file system mount that is needed by the font build system. Unfortunately the toolchain used (which is the same as for Roboto) requires not only a case-preserving file system, but also a case-sensitive one.
> `ImportError: No module named robofab.objects.objectsRF`
Python virtualenv not configured. Run `. init.sh`
> `make: *** No rule to make target ...`
Run `./init.sh` to update the generated makefile.

1
docs/README.md Normal file
View File

@ -0,0 +1 @@
This directory is published as a website by Github at [https://rsms.me/interface](https://rsms.me/interface/)

BIN
docs/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

237
docs/index.html Normal file
View File

@ -0,0 +1,237 @@
<!DOCTYPE HTML>
<html lang="en" prefix="og: http://ogp.me/ns#">
<head>
<meta charset="utf-8">
<title>Interface font family</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="description" content="Interface is a new typeface optimized for high legibility on computer screens">
<meta name="og:description" content="Interface is a new typeface optimized for high legibility on computer screens">
<meta name="twitter:description" content="Interface is a new typeface optimized for high legibility on computer screens">
<meta name="twitter:card" content="summary">
<meta name="twitter:site" content="@rsms">
<meta name="twitter:creator" content="@rsms">
<meta property="og:image" content="https://rsms.me/interface/res/poster.png">
<meta name="twitter:image" content="https://rsms.me/interface/res/poster.png">
<link href="interface.css" rel="stylesheet">
<style type="text/css">
* { margin:0; padding:0; }
html { }
body {
background-color: #f4f4f4;
color: #414141;
font: 16px/22px Interface, sans-serif;
letter-spacing:-0.1px;
font-weight: 400; /*300=light, 400=regular, 500=medium, 600=semibold*/
padding-bottom: 30px;
}
a {
color: inherit;
text-decoration: inherit;
background-image:
linear-gradient(to bottom,
rgba(0,0,0,.6) 50%,
rgba(0,0,0,0) 50%);
background-repeat: repeat-x;
background-size: 2px .1em;
background-position: 0 1.155em;
text-shadow:
/* creates "openings" for descenders in the underline */
-1px -1px 0 #f4f4f4,
1px -1px 0 #f4f4f4,
-1px 1px 0 #f4f4f4,
1px 1px 0 #f4f4f4;
word-break: break-word;
word-wrap: break-word;
}
a:hover {
color: rgb(48, 112, 232);
background-image:
linear-gradient(to bottom,
rgba(48, 112, 232, 1) 50%,
rgba(48, 112, 232, 0) 50%);
}
a.plain {
background:none;
text-shadow:none;
}
p {
margin: 20px 0;
}
code {
display: block;
font-family: "SFMono-Regular", Menlo, Consolas, Inconsolata, monospace;
/*background: rgba(0,0,0,0.05);*/
border-radius:1px;
padding: 0.5em 0;
}
h1, h2, h3 {
font-weight: 500;
}
h1 {
color: #333;
font-size: 38px;
letter-spacing: -1.3px;
line-height: 60px;
text-indent: -2px;
}
h2 {
font-size: 24px;
letter-spacing: -0.4px;
line-height: 40px;
}
h1 > a, h2 > a, h3 > a {
color: inherit;
text-shadow: none;
background: none !important;
}
.row {
padding: 50px;
/*background: salmon;*/
display: flex;
justify-content: center;
/*background: white;*/
}
.row > * {
max-width: 888px;
flex: 1 0 100%;
/*background: white;*/
}
.row.white {
background: white;
}
.row.white a {
text-shadow:
/* creates "openings" for descenders in the underline */
-1px -1px 0 white,
1px -1px 0 white,
-1px 1px 0 white,
1px 1px 0 white;
}
.row.dark {
background: #2b2b2b;
color: #99999b;
}
.row.dark a:hover {
color: rgb(164, 188, 255);
background-image:
linear-gradient(to bottom,
rgba(164, 188, 255, 1) 50%,
rgba(164, 188, 255, 0) 50%);
}
.row.dark a {
background-image:
linear-gradient(to bottom,
rgba(255,255,255,.3) 50%,
rgba(255,255,255,0) 50%);
text-shadow:
/* creates "openings" for descenders in the underline */
-1px -1px 0 #2b2b2b,
1px -1px 0 #2b2b2b,
-1px 1px 0 #2b2b2b,
1px 1px 0 #2b2b2b;
}
.row.dark h2, .row.dark h2 > a {
color: #ccc;
background: none;
}
</style>
</head>
<body>
<div class="row"><div>
<h1>The Interface font family</h1>
<p>
Interface is a font for highly legible text on computer screens.<br>
<a href="https://github.com/rsms/interface/releases/latest/">Download the latest release</a>
or try it out in the <a href="/lab/">playground</a>
</p>
</div></div>
<div class="row white"><div>
<h2><a id="sample" href="#sample">Sample</a></h2>
<p>
<img src="res/sample.png" width="888">
</p>
</div></div>
<div class="row dark"><div>
<h2><a id="free" href="#free">How much does it cost?</a></h2>
<p>
Interface is a <a href="https://github.com/rsms/interface">free and open source</a> font family. You are free to use this font in almost any way imaginable.
Refer to the <a href="https://choosealicense.com/licenses/ofl-1.1/">SIL Open Font License 1.1</a> for exact details on what the conditions and restrictions are.
</p>
<p>&nbsp;</p>
<h2><a id="usage" href="#usage">How do I use it?</a></h2>
<p>
Using the font is as easy as
<a href="https://github.com/rsms/interface/releases/latest/">download &amp; installing</a> locally on your computer.
</p>
<p>
You're free to bundle copies of Interface with your software, even if it's
commercial and you charge money for your software. Interface can also be used
on the web by either hosting the font files yourself or by including this CSS:
</p>
<code>@import url('https://rsms.me/interface/interface.css');</code>
<p>Use the following CSS rules to specify these families:</p>
<code>font-family: Interface, sans-serif;</code>
</div></div>
<div class="row"><div>
<h2><a id="story" href="#story">The story behind Interface</a></h2>
<p>
Interface started out in late 2016 as an experiment to build a perfectly
pixel-fitting font at a specific small size (11px.) The idea was that
by crafting a font in a particular way, with a particular coordinate system
(Units Per EM), and for a particular target rasterization size (11), it would
be possible to get the best of both sharpness and readability.
</p>
<p>
However after a few months of using an early version of Interface, it dawned
on everyone exposed to the test that this approach had some serious realworld
problems. Most notably that it was really hard to read longer text. Because of
the pixelaligning nature of that approach, the font took an almost monospaced
appearance, making it really easy to read numbers, punctuation and very short
words, but eyestraining to read anything longer.
</p>
<p>
The project was rebooted with a different approach, sticking with the
rasterspcific UPM, but crafting glyphs and kerning in a way that made for
more variation in the rythm and smoother vertical and horizontal stems.
As Interface was being developed, it was tested on an internal version of
<a href="https://www.figma.com/">Figma</a>—where the author of Interface works as a designer—and slowly imporved upon based on experience and feedback.
</p>
<p>&nbsp;</p>
<h2><a id="status" href="#status">Current status &amp; usability</a></h2>
<p>
Interface works great for Englishlanguage text, and pretty well for other
latin and cyrillic languages. There's still a lot of work to be done, and
<a href="https://github.com/rsms/interface#contributing">contributions are warmly welcomed</a>.
Please refer to the <a href="/lab/?sample=Repertoire&size=42">glyph repertoire</a>
for an overview of currentlyavailable glyphs and their quality.
You can also look at some common <a href="/lab/?sample=Kerning%20body%20multi-lang&size=16">non Englishlanguage words in the playground.</a>
</p>
</div></div>
<div class="row"><div>
<a href="https://twitter.com/rsms" class="plain">@rsms</a>
</div></div>
</body>
</html>

44
docs/interface.css Normal file
View File

@ -0,0 +1,44 @@
@font-face {
font-family: 'Interface';
font-style: normal;
font-weight: 400;
src: url("https://rsms.me/interface/font-files/Interface-Regular.woff2") format("woff2"),
url("https://rsms.me/interface/font-files/Interface-Regular.woff") format("woff");
}
@font-face {
font-family: 'Interface';
font-style: italic;
font-weight: 400;
src: url("https://rsms.me/interface/font-files/Interface-RegularItalic.woff2") format("woff2"),
url("https://rsms.me/interface/font-files/Interface-RegularItalic.woff") format("woff");
}
@font-face {
font-family: 'Interface';
font-style: normal;
font-weight: 500;
src: url("https://rsms.me/interface/font-files/Interface-Medium.woff2") format("woff2"),
url("https://rsms.me/interface/font-files/Interface-Medium.woff") format("woff");
}
@font-face {
font-family: 'Interface';
font-style: italic;
font-weight: 500;
src: url("https://rsms.me/interface/font-files/Interface-MediumItalic.woff2") format("woff2"),
url("https://rsms.me/interface/font-files/Interface-MediumItalic.woff") format("woff");
}
@font-face {
font-family: 'Interface';
font-style: normal;
font-weight: 700;
src: url("https://rsms.me/interface/font-files/Interface-Bold.woff2") format("woff2"),
url("https://rsms.me/interface/font-files/Interface-Bold.woff") format("woff");
}
@font-face {
font-family: 'Interface';
font-style: italic;
font-weight: 700;
src: url("https://rsms.me/interface/font-files/Interface-BoldItalic.woff2") format("woff2"),
url("https://rsms.me/interface/font-files/Interface-BoldItalic.woff") format("woff");
}

1
docs/lab/fonts Symbolic link
View File

@ -0,0 +1 @@
../../build/dist

3075
docs/lab/glyphinfo.json Normal file

File diff suppressed because it is too large Load Diff

1635
docs/lab/index.html Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

BIN
docs/res/metrics.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 308 KiB

BIN
docs/res/poster.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

BIN
docs/res/preview-app.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 926 KiB

BIN
docs/res/sample.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 330 KiB

20
docs/serve.sh Executable file
View File

@ -0,0 +1,20 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
if (which caddy >/dev/null); then
caddy_args=(\
-host localhost \
"bind localhost" \
"mime .woff2 font/woff2" \
"mime .woff application/font-woff" \
)
caddy "${caddy_args[@]}"
elif (which servedir >/dev/null); then
servedir
else
echo "Can not find 'caddy' nor 'servedir' in PATH." >&2
echo "Install caddy from brew, apt or https://caddyserver.com/download"
echo "or install servedir with 'npm install -g secure-servedir'"
exit 1
fi

289
init.sh Executable file
View File

@ -0,0 +1,289 @@
#!/bin/bash
SRCDIR=$(dirname "${BASH_SOURCE[0]}")
BUILD_DIR=$SRCDIR/build
if [[ "${BUILD_DIR:0:2}" == "./" ]]; then
BUILD_DIR=${BUILD_DIR:2}
fi
DIST_DIR=$BUILD_DIR/dist
BUILD_TMP_DIR=$BUILD_DIR/tmp
VENV_DIR=$BUILD_DIR/venv
if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then
# sourced
if [[ -z $VIRTUAL_ENV ]] && [[ ! -f "$VENV_DIR/bin/activate" ]]; then
echo "Project not configured." >&2
echo "Execute this script instead of sourcing it to perform setup." >&2
else
source "$VENV_DIR/bin/activate"
pushd "$SRCDIR" >/dev/null
SRCDIR_ABS=$(pwd)
popd >/dev/null
export PYTHONPATH=$SRCDIR_ABS/misc/pylib
fi
else
# Subshell
set -e
cd "$SRCDIR"
# ————————————————————————————————————————————————————————————————————————————————————————————————
# virtualenv
mkdir -p "$VENV_DIR"
pushd "$(dirname "$VENV_DIR")" >/dev/null
VENV_DIR_ABS=$(pwd)/$(basename "$VENV_DIR")
popd >/dev/null
# must check and set VENV_ACTIVE before polluting local env
VENV_ACTIVE=false
if [[ "$VIRTUAL_ENV" == "$VENV_DIR_ABS" ]] && [[ "$1" != "-force" ]]; then
VENV_ACTIVE=true
fi
if ! (which virtualenv >/dev/null); then
echo "$0: Can't find virtualenv in PATH -- install through 'pip install --user virtualenv'" >&2
exit 1
fi
if [[ ! -d "$VENV_DIR/bin" ]]; then
echo "Setting up virtualenv in '$VENV_DIR'"
virtualenv "$VENV_DIR"
else
if [[ ! -z $VIRTUAL_ENV ]] && [[ "$VIRTUAL_ENV" != "$VENV_DIR_ABS" ]]; then
echo "Looks like the repository has moved location -- updating virtualenv"
virtualenv "$VENV_DIR"
fi
fi
source "$VENV_DIR/bin/activate"
UPDATE_TIMESTAMP_FILE="$VENV_DIR/last-pip-run.mark"
REQUIREMENTS_FILE=$SRCDIR/requirements.txt
if [ "$REQUIREMENTS_FILE" -nt "$UPDATE_TIMESTAMP_FILE" ]; then
echo "pip install -r $REQUIREMENTS_FILE"
pip install -r "$REQUIREMENTS_FILE"
date '+%s' > "$UPDATE_TIMESTAMP_FILE"
fi
# ————————————————————————————————————————————————————————————————————————————————————————————————
# deps
DEPS_DIR=$BUILD_DIR/deps
PATCH_DIR=$(pwd)/misc/patches
mkdir -p "$DEPS_DIR"
check_dep() {
NAME=$1
REPO_URL=$2
BRANCH=$3
TREE_REF=$4
set -e
REPODIR=$DEPS_DIR/$NAME
if [[ ! -d "$REPODIR/.git" ]]; then
rm -rf "$REPODIR"
echo "Fetching $NAME from $REPO_URL"
if ! (git clone --recursive --single-branch -b $BRANCH -- "$REPO_URL" "$REPODIR"); then
exit 1
fi
if [[ ! -z $TREE_REF ]]; then
git -C "$REPODIR" checkout "$TREE_REF"
git -C "$REPODIR" submodule update
fi
return 1
fi
# TODO: check that source matches tree ref
return 0
}
if ! (check_dep \
woff2 https://github.com/google/woff2.git master 36e6555b92a1519c927ebd43b79621810bf17c1a )
then
echo "Building woff2"
git -C "$DEPS_DIR/woff2" apply "$PATCH_DIR/woff2.patch"
if !(make -C "$DEPS_DIR/woff2" -j8 clean all); then
rm -rf "$DEPS_DIR/woff2"
exit 1
fi
fi
if [[ ! -f "$VENV_DIR/bin/woff2_compress" ]]; then
ln -vfs ../../deps/woff2/woff2_compress "$VENV_DIR/bin"
fi
# EOT is disabled
# if ! (check_dep \
# ttf2eot https://github.com/rsms/ttf2eot.git master )
# then
# echo "Building ttf2eot"
# make -C "$DEPS_DIR/ttf2eot" clean all
# fi
# if [[ ! -f "$VENV_DIR/bin/ttf2eot" ]]; then
# ln -vfs ../../deps/ttf2eot/ttf2eot "$VENV_DIR/bin"
# fi
if [[ ! -f "$DEPS_DIR/ttfautohint" ]]; then
URL=https://download.savannah.gnu.org/releases/freetype/ttfautohint-1.6-tty-osx.tar.gz
echo "Fetching $URL"
curl '-#' -o "$DEPS_DIR/ttfautohint.tar.gz" -L "$URL"
tar -C "$DEPS_DIR" -xzf "$DEPS_DIR/ttfautohint.tar.gz"
rm "$DEPS_DIR/ttfautohint.tar.gz"
fi
if [[ ! -f "$VENV_DIR/bin/ttfautohint" ]]; then
ln -vfs ../../deps/ttfautohint "$VENV_DIR/bin"
fi
if [[ ! -f "$VENV_DIR/bin/ttf2woff" ]] || [[ ! -f "$SRCDIR/misc/ttf2woff/ttf2woff" ]]; then
echo "Building ttf2woff"
make -C "$SRCDIR/misc/ttf2woff" -j8
fi
if [[ ! -f "$VENV_DIR/bin/ttf2woff" ]]; then
ln -vfs ../../../misc/ttf2woff/ttf2woff "$VENV_DIR/bin"
fi
# ————————————————————————————————————————————————————————————————————————————————————————————————
# $BUILD_TMP_DIR
# create and mount spare disk image needed on macOS to support case-sensitive filenames
if [[ "$(uname)" = *Darwin* ]]; then
bash misc/mac-tmp-disk-mount.sh
else
mkdir -p "$BUILD_TMP_DIR"
fi
# ————————————————————————————————————————————————————————————————————————————————————————————————
# $BUILD_DIR/etc/generated.make
master_styles=( \
Regular \
Bold \
)
derived_styles=( \
"RegularItalic : Regular" \
"Medium : Regular Bold" \
"MediumItalic : Regular Bold" \
"BoldItalic : Bold" \
# "Black : Regular Bold" \
# "BlackItalic : Regular Bold" \
)
web_formats=( woff woff2 ) # Disabled/unused: eot
mkdir -p "$BUILD_DIR/etc"
GEN_MAKE_FILE=$BUILD_DIR/etc/generated.make
# Only generate if there are changes to the font sources
NEED_GENERATE=false
if [[ ! -f "$GEN_MAKE_FILE" ]] || [[ "$0" -nt "$GEN_MAKE_FILE" ]]; then
NEED_GENERATE=true
else
for style in "${master_styles[@]}"; do
if $NEED_GENERATE; then
break
fi
for srcfile in $(find src/Interface-${style}.ufo -type f -newer "$GEN_MAKE_FILE"); do
NEED_GENERATE=true
break
done
done
fi
if $NEED_GENERATE; then
echo "Generating '$GEN_MAKE_FILE'"
echo "# Generated by init.sh -- do not modify manually" > "$GEN_MAKE_FILE"
all_styles=()
for style in "${master_styles[@]}"; do
all_styles+=( $style )
echo "${style}_ufo_d := " \
"\$(wildcard src/Interface-${style}.ufo/* src/Interface-${style}.ufo/*/*)" >> "$GEN_MAKE_FILE"
echo "$BUILD_TMP_DIR/InterfaceTTF/Interface-${style}.ttf: \$(${style}_ufo_d)" >> "$GEN_MAKE_FILE"
echo "$BUILD_TMP_DIR/InterfaceOTF/Interface-${style}.otf: \$(${style}_ufo_d)" >> "$GEN_MAKE_FILE"
done
for e in "${derived_styles[@]}"; do
style=$(echo "${e%%:*}" | xargs)
dependent_styles=$(echo "${e#*:}" | xargs)
all_styles+=( $style )
echo -n "$BUILD_TMP_DIR/InterfaceTTF/Interface-${style}.ttf:" >> "$GEN_MAKE_FILE"
for depstyle in $dependent_styles; do
echo -n " \$(${depstyle}_ufo_d)" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
echo -n "$BUILD_TMP_DIR/InterfaceOTF/Interface-${style}.otf:" >> "$GEN_MAKE_FILE"
for depstyle in $dependent_styles; do
echo -n " \$(${depstyle}_ufo_d)" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
done
# STYLE and STYLE_ttf targets
for style in "${all_styles[@]}"; do
echo "${style}_ttf: $DIST_DIR/Interface-${style}.ttf" >> "$GEN_MAKE_FILE"
echo "${style}_otf: $DIST_DIR-unhinted/Interface-${style}.otf" >> "$GEN_MAKE_FILE"
echo "${style}_ttf_unhinted: $DIST_DIR-unhinted/Interface-${style}.ttf" >> "$GEN_MAKE_FILE"
echo -n "${style}: ${style}_otf" >> "$GEN_MAKE_FILE"
for format in "${web_formats[@]}"; do
echo -n " $DIST_DIR/Interface-${style}.${format}" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
echo -n "${style}_unhinted: ${style}_otf" >> "$GEN_MAKE_FILE"
for format in "${web_formats[@]}"; do
echo -n " $DIST_DIR-unhinted/Interface-${style}.${format}" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
done
# all_otf target
echo -n "all_otf:" >> "$GEN_MAKE_FILE"
for style in "${all_styles[@]}"; do
echo -n " ${style}_otf" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
# all_ttf target
echo -n "all_ttf:" >> "$GEN_MAKE_FILE"
for style in "${all_styles[@]}"; do
echo -n " ${style}_ttf" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
echo -n "all_ttf_unhinted:" >> "$GEN_MAKE_FILE"
for style in "${all_styles[@]}"; do
echo -n " ${style}_ttf_unhinted" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
# all_web target
echo -n "all_web:" >> "$GEN_MAKE_FILE"
for style in "${all_styles[@]}"; do
echo -n " ${style}" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
echo -n "all_web_unhinted:" >> "$GEN_MAKE_FILE"
for style in "${all_styles[@]}"; do
echo -n " ${style}_unhinted" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
echo -n ".PHONY: all_ttf all_ttf_unhinted all_web all_web_unhinted all_otf" >> "$GEN_MAKE_FILE"
for style in "${all_styles[@]}"; do
echo -n " ${style} ${style}_ttf ${style}_ttf_unhinted ${style}_otf" >> "$GEN_MAKE_FILE"
done
echo "" >> "$GEN_MAKE_FILE"
fi
# ————————————————————————————————————————————————————————————————————————————————————————————————
# summary
if ! $VENV_ACTIVE; then
echo "You now need to activate virtualenv by:"
echo " source '$0'"
echo "Or directly by sourcing the activate script:"
echo " source '$VENV_DIR/bin/activate'"
fi
fi

353
misc/cleanup-kerning.py Executable file
View File

@ -0,0 +1,353 @@
#!/usr/bin/env python
# encoding: utf8
from __future__ import print_function
import os, sys, plistlib, re
from collections import OrderedDict
from ConfigParser import RawConfigParser
from argparse import ArgumentParser
from fontTools import ttLib
from robofab.objects.objectsRF import OpenFont
# Regex matching "default" glyph names, like "uni2043" and "u01C5"
uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
def unicodeForDefaultGlyphName(glyphName):
m = uniNameRe.match(glyphName)
if m is not None:
try:
return int(m.group(1), 16)
except:
pass
return None
def canonicalGlyphName(glyphName, uc2names):
uc = unicodeForDefaultGlyphName(glyphName)
if uc is not None:
names = uc2names.get(uc)
if names is not None and len(names) > 0:
return names[0]
return glyphName
def parseGlyphComposition(composite):
c = composite.split("=")
d = c[1].split("/")
glyphName = d[0]
if len(d) == 1:
offset = [0, 0]
else:
offset = [int(i) for i in d[1].split(",")]
accentString = c[0]
accents = accentString.split("+")
baseName = accents.pop(0)
accentNames = [i.split(":") for i in accents]
return (glyphName, baseName, accentNames, offset)
def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
compositions = OrderedDict()
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0 and line[0] != '#':
glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
compositions[glyphName] = (baseName, accentNames, offset)
return compositions
def loadAGL(filename): # -> { 2126: 'Omega', ... }
m = {}
with open(filename, 'r') as f:
for line in f:
# Omega;2126
# dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
line = line.strip()
if len(line) > 0 and line[0] != '#':
name, uc = tuple([c.strip() for c in line.split(';')])
if uc.find(' ') == -1:
# it's a 1:1 mapping
m[int(uc, 16)] = name
return m
def loadLocalNamesDB(fonts, agl, diacriticComps):
uc2names = None # { 2126: ['Omega', ...], ...}
allNames = set() # set('Omega', ...)
for font in fonts:
_uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
if uc2names is None:
uc2names = _uc2names
else:
for uc, _names in _uc2names.iteritems():
names = uc2names.setdefault(uc, [])
for name in _names:
if name not in names:
names.append(name)
for g in font:
allNames.add(g.name)
# agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
aglName2Ucs = {}
for uc, name in agl.iteritems():
aglName2Ucs.setdefault(name, []).append(uc)
for glyphName, comp in diacriticComps.iteritems():
aglUCs = aglName2Ucs.get(glyphName)
if aglUCs is None:
uc = unicodeForDefaultGlyphName(glyphName)
if uc is not None:
glyphName2 = agl.get(uc)
if glyphName2 is not None:
glyphName = glyphName2
names = uc2names.setdefault(uc, [])
if glyphName not in names:
names.append(glyphName)
allNames.add(glyphName)
else:
allNames.add(glyphName)
for uc in aglUCs:
names = uc2names.get(uc, [])
if glyphName not in names:
names.append(glyphName)
uc2names[uc] = names
name2ucs = {} # { 'Omega': [2126, ...], ...}
for uc, names in uc2names.iteritems():
for name in names:
name2ucs.setdefault(name, set()).add(uc)
return uc2names, name2ucs, allNames
# def getNameToGroupsMap(groups): # => { glyphName => set(groupName) }
# nameMap = {}
# for groupName, glyphNames in groups.iteritems():
# for glyphName in glyphNames:
# nameMap.setdefault(glyphName, set()).add(groupName)
# return nameMap
# def inspectKerning(kerning):
# leftIndex = {} # { glyph-name => <ref to plist right-hand side dict> }
# rightIndex = {} # { glyph-name => [(left-hand-side-name, kernVal), ...] }
# rightGroupIndex = {} # { group-name => [(left-hand-side-name, kernVal), ...] }
# for leftName, right in kerning.iteritems():
# if leftName[0] != '@':
# leftIndex[leftName] = right
# for rightName, kernVal in right.iteritems():
# if rightName[0] != '@':
# rightIndex.setdefault(rightName, []).append((leftName, kernVal))
# else:
# rightGroupIndex.setdefault(rightName, []).append((leftName, kernVal))
# return leftIndex, rightIndex, rightGroupIndex
class RefTracker:
def __init__(self):
self.refs = {}
def incr(self, name):
self.refs[name] = self.refs.get(name, 0) + 1
def decr(self, name): # => bool hasNoRefs
r = self.refs.get(name)
if r is None:
raise Exception('decr untracked ref ' + repr(name))
if r < 1:
raise Exception('decr already zero ref ' + repr(name))
if r == 1:
del self.refs[name]
return True
self.refs[name] = r - 1
def __contains__(self, name):
return name in self.refs
def main():
argparser = ArgumentParser(description='Remove unused kerning')
argparser.add_argument(
'-dry', dest='dryRun', action='store_const', const=True, default=False,
help='Do not modify anything, but instead just print what would happen.')
argparser.add_argument(
'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
args = argparser.parse_args()
dryRun = args.dryRun
agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
diacriticComps = loadGlyphCompositions('src/diacritics.txt') # {glyphName => (baseName, a, o)}
for fontPath in args.fontPaths:
print(fontPath)
groupsFilename = os.path.join(fontPath, 'groups.plist')
kerningFilename = os.path.join(fontPath, 'kerning.plist')
groups = plistlib.readPlist(groupsFilename) # { groupName => [glyphName] }
kerning = plistlib.readPlist(kerningFilename) # { leftName => {rightName => kernVal} }
font = OpenFont(fontPath)
uc2names, name2ucs, allNames = loadLocalNamesDB([font], agl, diacriticComps)
# start with eliminating non-existent glyphs from groups and completely
# eliminate groups with all-dead glyphs.
eliminatedGroups = set()
for groupName, glyphNames in list(groups.items()):
glyphNames2 = []
for name in glyphNames:
if name in allNames:
glyphNames2.append(name)
else:
name2 = canonicalGlyphName(name, uc2names)
if name2 != name and name2 in allNames:
print('group: rename glyph', name, '->', name2)
glyphNames2.append(name2)
if len(glyphNames2) == 0:
print('group: eliminate', groupName)
eliminatedGroups.add(groupName)
del groups[groupName]
elif len(glyphNames2) != len(glyphNames):
print('group: shrink', groupName)
groups[groupName] = glyphNames2
# now eliminate kerning
groupRefs = RefTracker() # tracks group references, so we can eliminate unreachable ones
for leftName, right in list(kerning.items()):
leftIsGroup = leftName[0] == '@'
if leftIsGroup:
if leftName in eliminatedGroups:
print('kerning: eliminate LHS', leftName)
del kerning[leftName]
continue
groupRefs.incr(leftName)
else:
if leftName not in allNames:
print('kerning: eliminate LHS', leftName)
del kerning[leftName]
continue
right2 = {}
for rightName, kernVal in right.iteritems():
rightIsGroup = rightName[0] == '@'
if rightIsGroup:
if rightIsGroup in eliminatedGroups:
print('kerning: eliminate RHS group', rightName)
else:
groupRefs.incr(rightName)
right2[rightName] = kernVal
else:
if rightName not in allNames:
# maybe an unnamed glyph?
rightName2 = canonicalGlyphName(rightName, uc2names)
if rightName2 != rightName:
print('kerning: rename & update RHS glyph', rightName, '->', rightName2)
right2[rightName2] = kernVal
else:
print('kerning: eliminate RHS glyph', rightName)
else:
right2[rightName] = kernVal
if len(right2) == 0:
print('kerning: eliminate LHS', leftName)
del kerning[leftName]
if leftIsGroup:
groupRefs.decr(leftName)
else:
kerning[leftName] = right2
# eliminate any unreferenced groups
for groupName, glyphNames in list(groups.items()):
if not groupName in groupRefs:
print('group: eliminate unreferenced group', groupName)
del groups[groupName]
# verify that there are no conflicting kerning pairs
pairs = {} # { key => [...] }
conflictingPairs = set()
for leftName, right in kerning.iteritems():
# expand LHS group -> names
topLeftName = leftName
for leftName in groups[leftName] if leftName[0] == '@' else [leftName]:
if leftName not in allNames:
raise Exception('unknown LHS glyph name ' + repr(leftName))
keyPrefix = leftName + '+'
for rightName, kernVal in right.iteritems():
# expand RHS group -> names
topRightName = rightName
for rightName in groups[rightName] if rightName[0] == '@' else [rightName]:
if rightName not in allNames:
raise Exception('unknown RHS glyph name ' + repr(rightName))
# print(leftName, '+', rightName, '=>', kernVal)
key = keyPrefix + rightName
isConflict = key in pairs
pairs.setdefault(key, []).append(( topLeftName, topRightName, kernVal ))
if isConflict:
conflictingPairs.add(key)
# # resolve pair conflicts by preferring pairs defined via group kerning
# for key in conflictingPairs:
# pairs = pairs[key]
# print('kerning: conflicting pairs %r: %r' % (key, pairs))
# bestPair = None
# redundantPairs = []
# for pair in pairs:
# leftName, rightName, kernVal = pair
# if bestPair is None:
# bestPair = pair
# else:
# bestLeftName, bestRightName, _ = bestPair
# bestScore = 0
# score = 0
# if bestLeftName[0] == '@': bestScore += 1
# if bestRightName[0] == '@': bestScore += 1
# if leftName[0] == '@': score += 1
# if rightName[0] == '@': score += 1
# if bestScore == 2:
# # doesn't get better than this
# break
# elif score > bestScore:
# redundantPairs.append(bestPair)
# bestPair = pair
# else:
# redundantPairs.append(pair)
# print('- keeping', bestPair)
# print('- eliminating', redundantPairs)
# for redundantPairs
# # eliminate any unreferenced groups
# for groupName, glyphNames in list(groups.items()):
# if not groupName in groupRefs:
# print('group: eliminate unreferenced group', groupName)
# del groups[groupName]
print('Write', groupsFilename)
if not dryRun:
plistlib.writePlist(groups, groupsFilename)
print('Write', kerningFilename)
if not dryRun:
plistlib.writePlist(kerning, kerningFilename)
# [end] for fontPath in args.fontPaths
main()

24
misc/doc/install-mac.txt Normal file
View File

@ -0,0 +1,24 @@
Installing on macOS:
1. Open the "Interface (OTF)" folder
2. Select all font files
3. Right-click (or ctrl-click) the selected files
and choose "Open with..." → "Font Book"
4. Press the "Install" button
If you get any errors, like Font Book saying there're duplicate fonts,
cancel the installation and instead try the instructions below:
Installing on macOS, manually:
1. Copy the "Interface (OTF)" folder
2. Press cmd-shift-G in Finder
3. Enter "~/Library/Fonts" into the dialog that shows up and press RETURN.
4. Paste the "Interface (OTF)" folder.
If you have a previous installation of Interface, you should make sure to
remove those fonts files before installing new ones.
See https://github.com/rsms/interface for more information

19
misc/doc/install-win.txt Normal file
View File

@ -0,0 +1,19 @@
Installing on Windows 10:
1. Open the "Interface (hinted TTF)" folder
2. Select all font files
3. Right-click the selected files and choose "Install"
Installing on Windows 10, manually:
1. Double-click the downloaded zip file
2. Copy the "Interface (hinted TTF)" folder
3. Press Win-Q on your keyboard, then type "fonts" and hit ENTER
4. Paste the "Interface (hinted TTF)" folder.
If you have a previous installation of Interface, you should make sure
to remove those fonts files before installing new ones.
See https://github.com/rsms/interface for more information

View File

@ -0,0 +1,37 @@
<?xml version="1.0" encoding="UTF-8"?>
<glyph name="e.alt1" format="1">
<advance width="1600"/>
<outline>
<contour>
<point x="820" y="-32" type="curve"/>
<point x="1176" y="-32"/>
<point x="1400" y="176"/>
<point x="1440" y="448" type="curve"/>
<point x="1204" y="448" type="line"/>
<point x="1160" y="280"/>
<point x="1020" y="184"/>
<point x="820" y="184" type="curve" smooth="yes"/>
<point x="556" y="184"/>
<point x="384" y="424"/>
<point x="384" y="768" type="curve"/>
<point x="384" y="1104"/>
<point x="556" y="1344"/>
<point x="824" y="1344" type="curve" smooth="yes"/>
<point x="1036" y="1344"/>
<point x="1240" y="1176"/>
<point x="1240" y="872" type="curve"/>
<point x="308" y="872" type="line"/>
<point x="308" y="668" type="line"/>
<point x="1476" y="668" type="line"/>
<point x="1476" y="768" type="line" smooth="yes"/>
<point x="1476" y="1352"/>
<point x="1180" y="1556"/>
<point x="820" y="1556" type="curve" smooth="yes"/>
<point x="408" y="1556"/>
<point x="140" y="1228"/>
<point x="140" y="768" type="curve"/>
<point x="140" y="296"/>
<point x="408" y="-32"/>
</contour>
</outline>
</glyph>

650
misc/enrich-glypnames.py Executable file
View File

@ -0,0 +1,650 @@
#!/usr/bin/env python
# encoding: utf8
from __future__ import print_function
import os
import sys
import argparse
import json
import plistlib
import re
from collections import OrderedDict
from textwrap import TextWrapper
from StringIO import StringIO
from ConfigParser import RawConfigParser
from fontTools import ttLib
from robofab.objects.objectsRF import RFont, OpenFont
# from feaTools import parser as feaParser
# from feaTools.parser import parseFeatures
# from feaTools import FDKSyntaxFeatureWriter
# from fontbuild.features import updateFeature, compileFeatureRE
# Regex matching "default" glyph names, like "uni2043" and "u01C5"
uniNameRe = re.compile(r'^u(?:ni)[0-9A-F]{4,8}$')
def defaultGlyphName(uc):
return 'uni%04X' % uc
def defaultGlyphName2(uc):
return 'u%04X' % uc
def isDefaultGlyphName(name):
return True if uniNameRe.match(name) else False
def isDefaultGlyphNameForUnicode(name, uc):
return name == defaultGlyphName(uc) or name == defaultGlyphName2(uc)
def getFirstNonDefaultGlyphName(uc, names):
for name in names:
if not isDefaultGlyphNameForUnicode(name, uc):
return name
return None
def getTTGlyphList(font): # -> { 'Omega': [2126, ...], ... }
if isinstance(font, str):
font = ttLib.TTFont(font)
if not 'cmap' in font:
raise Exception('missing cmap table')
gl = {}
bestCodeSubTable = None
bestCodeSubTableFormat = 0
for st in font['cmap'].tables:
if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
if st.format > bestCodeSubTableFormat:
bestCodeSubTable = st
bestCodeSubTableFormat = st.format
if bestCodeSubTable is not None:
for cp, glyphname in bestCodeSubTable.cmap.items():
if glyphname in gl:
gl[glyphname].append(cp)
else:
gl[glyphname] = [cp]
return gl, font
def getUFOGlyphList(font): # -> { 'Omega': [2126, ...], ... }
# Note: font.getCharacterMapping() returns {2126:['Omega', ...], ...}
gl = {}
for g in font:
ucv = g.unicodes
if len(ucv) > 0:
gl[g.name] = ucv
return gl
def appendNames(uc2names, extraUc2names, uc, name, isDestination):
if uc in uc2names:
names = uc2names[uc]
if name not in names:
names.append(name)
elif isDestination:
uc2names[uc] = [name]
else:
if uc in extraUc2names:
names = extraUc2names[uc]
if name not in names:
names.append(name)
else:
extraUc2names[uc] = [name]
def buildGlyphNames(dstFonts, srcFonts, glyphOrder, fallbackGlyphNames):
# fallbackGlyphNames: { 2126: 'Omega', ...}
uc2names = {} # { 2126: ['Omega', 'Omegagreek', ...], ...}
extraUc2names = {} # { 2126: ['Omega', 'Omegagreek', ...], ...}
# -- codepoints in Nth fonts, not found in first font
name2ucsv = [] # [ { 'Omega': [2126, ...] }, ... ] -- same order as fonts
fontIndex = 0
for font in dstFonts + srcFonts:
gl = None
if isinstance(font, RFont):
print('Inspecting', font.info.familyName, font.info.styleName)
gl = getUFOGlyphList(font)
else:
print('Inspecting', font)
gl, font = getTTGlyphList(font)
name2ucsv.append(gl)
isDestination = fontIndex < len(dstFonts)
for name, unicodes in gl.iteritems():
# if len(uc2names) > 100: break
for uc in unicodes:
appendNames(uc2names, extraUc2names, uc, name, isDestination)
if isDestination:
fallbackName = fallbackGlyphNames.get(uc)
if fallbackName is not None:
appendNames(uc2names, extraUc2names, uc, fallbackName, isDestination)
fontIndex += 1
# for name in glyphOrder:
# if len(name) > 7 and name.startswith('uni') and name.find('.') == -1 and name.find('_') == -1:
# try:
# print('name: %r, %r' % (name, name[3:]))
# uc = int(name[3:], 16)
# appendNames(uc2names, extraUc2names, uc, name, isDestination=True)
# except:
# print()
# pass
return uc2names, extraUc2names, name2ucsv
def renameStrings(listofstrs, newNames):
v = []
for s in listofstrs:
s2 = newNames.get(s)
if s2 is not None:
s = s2
v.append(s)
return v
def renameUFOLib(ufoPath, newNames, dryRun=False, print=print):
filename = os.path.join(ufoPath, 'lib.plist')
plist = plistlib.readPlist(filename)
glyphOrder = plist.get('public.glyphOrder')
if glyphOrder is not None:
plist['public.glyphOrder'] = renameStrings(glyphOrder, newNames)
roboSort = plist.get('com.typemytype.robofont.sort')
if roboSort is not None:
for entry in roboSort:
if isinstance(entry, dict) and entry.get('type') == 'glyphList':
asc = entry.get('ascending')
desc = entry.get('descending')
if asc is not None:
entry['ascending'] = renameStrings(asc, newNames)
if desc is not None:
entry['descending'] = renameStrings(desc, newNames)
print('Writing', filename)
if not dryRun:
plistlib.writePlist(plist, filename)
def renameUFOGroups(ufoPath, newNames, dryRun=False, print=print):
filename = os.path.join(ufoPath, 'groups.plist')
plist = None
try:
plist = plistlib.readPlist(filename)
except:
return
didChange = False
for groupName, glyphNames in plist.items():
for i in range(len(glyphNames)):
name = glyphNames[i]
if name in newNames:
didChange = True
glyphNames[i] = newNames[name]
if didChange:
print('Writing', filename)
if not dryRun:
plistlib.writePlist(plist, filename)
def renameUFOKerning(ufoPath, newNames, dryRun=False, print=print):
filename = os.path.join(ufoPath, 'kerning.plist')
plist = None
try:
plist = plistlib.readPlist(filename)
except:
return
didChange = False
newPlist = {}
for leftName, right in plist.items():
if leftName in newNames:
didChange = True
leftName = newNames[leftName]
newRight = {}
for rightName, kernValue in plist.items():
if rightName in newNames:
didChange = True
rightName = newNames[rightName]
newRight[rightName] = kernValue
newPlist[leftName] = right
if didChange:
print('Writing', filename)
if not dryRun:
plistlib.writePlist(newPlist, filename)
def subFeaName(m, newNames, state):
try:
int(m[3], 16)
except:
return m[0]
name = m[2]
if name in newNames:
# print('sub %r => %r' % (m[0], m[1] + newNames[name] + m[4]))
if name == 'uni0402':
print('sub %r => %r' % (m[0], m[1] + newNames[name] + m[4]))
state['didChange'] = True
return m[1] + newNames[name] + m[4]
return m[0]
FEA_TOK = 'tok'
FEA_SEP = 'sep'
FEA_END = 'end'
def feaTokenizer(feaText):
separators = set('; \t\r\n,[]\'"')
tokStartIndex = -1
sepStartIndex = -1
for i in xrange(len(feaText)):
ch = feaText[i]
if ch in separators:
if tokStartIndex != -1:
yield (FEA_TOK, feaText[tokStartIndex:i])
tokStartIndex = -1
if sepStartIndex == -1:
sepStartIndex = i
else:
if sepStartIndex != -1:
yield (FEA_SEP, feaText[sepStartIndex:i])
sepStartIndex = -1
if tokStartIndex == -1:
tokStartIndex = i
if sepStartIndex != -1 and tokStartIndex != -1:
yield (FEA_END, feaText[min(sepStartIndex, tokStartIndex):])
elif sepStartIndex != -1:
yield (FEA_END, feaText[sepStartIndex:])
elif tokStartIndex != -1:
yield (FEA_END, feaText[tokStartIndex:])
else:
yield (FEA_END, '')
def renameUFOFeatures(font, ufoPath, newNames, dryRun=False, print=print):
filename = os.path.join(ufoPath, 'features.fea')
feaText = ''
try:
with open(filename, 'r') as f:
feaText = f.read()
except:
return
didChange = False
feaText2 = ''
for t, v in feaTokenizer(feaText):
if t is FEA_TOK and len(v) > 6 and v.startswith('uni'):
if v in newNames:
# print('sub', v, newNames[v])
didChange = True
v = newNames[v]
feaText2 += v
feaText = feaText2
if didChange:
print('Writing', filename)
if not dryRun:
with open(filename, 'w') as f:
f.write(feaText)
print(
'Important: you need to manually verify that', filename, 'looks okay.',
'We did an optimistic update which is not perfect.'
)
# classes = feaParser.classDefinitionRE.findall(feaText)
# for precedingMark, className, classContent in classes:
# content = feaParser.classContentRE.findall(classContent)
# print('class', className, content)
# didChange = False
# content2 = []
# for name in content:
# if name in newNames:
# didChange = True
# content2.append(newNames[name])
# if didChange:
# print('content2', content2)
# feaText = feaParser.classDefinitionRE.sub('', feaText)
# featureTags = feaParser.feature_findAll_RE.findall(feaText)
# for precedingMark, featureTag in featureTags:
# print('feat', featureTag)
def renameUFODetails(font, ufoPath, newNames, dryRun=False, print=print):
renameUFOLib(ufoPath, newNames, dryRun, print)
renameUFOGroups(ufoPath, newNames, dryRun, print)
renameUFOKerning(ufoPath, newNames, dryRun, print)
renameUFOFeatures(font, ufoPath, newNames, dryRun, print)
def readLines(filename):
with open(filename, 'r') as f:
return f.read().strip().splitlines()
def readGlyphOrderFile(filename):
names = []
for line in readLines(filename):
line = line.lstrip()
if len(line) > 0 and line[0] != '#':
names.append(line)
return names
def renameGlyphOrderFile(filename, newNames, dryRun=False, print=print):
lines = []
didRename = False
for line in readLines(filename):
line = line.lstrip()
if len(line) > 0 and line[0] != '#':
newName = newNames.get(line)
if newName is not None:
didRename = True
line = newName
lines.append(line)
if didRename:
print('Writing', filename)
if not dryRun:
with open(filename, 'w') as f:
f.write('\n'.join(lines))
def parseGlyphComposition(composite):
c = composite.split("=")
d = c[1].split("/")
glyphName = d[0]
if len(d) == 1:
offset = [0, 0]
else:
offset = [int(i) for i in d[1].split(",")]
accentString = c[0]
accents = accentString.split("+")
baseName = accents.pop(0)
accentNames = [i.split(":") for i in accents]
return (glyphName, baseName, accentNames, offset)
def fmtGlyphComposition(glyphName, baseName, accentNames, offset):
# glyphName = 'uni03D3'
# baseName = 'uni03D2'
# accentNames = [['tonos', 'top'], ['acute', 'top']]
# offset = [100, 0]
# => "uni03D2+tonos:top+acute:top=uni03D3/100,0"
s = baseName
for accentNameTuple in accentNames:
s += '+' + accentNameTuple[0]
if len(accentNameTuple) > 1:
s += ':' + accentNameTuple[1]
s += '=' + glyphName
if offset[0] != 0 or offset[1] != 0:
s += '/%d,%d' % tuple(offset)
return s
def renameDiacriticsFile(filename, newNames, dryRun=False, print=print):
lines = []
didRename = False
for line in readLines(filename):
line = line.strip()
if len(line) > 0 and line[0] != '#':
glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
# rename
glyphName = newNames.get(glyphName, glyphName)
baseName = newNames.get(baseName, baseName)
for accentTuple in accentNames:
accentTuple[0] = newNames.get(accentTuple[0], accentTuple[0])
line2 = fmtGlyphComposition(glyphName, baseName, accentNames, offset)
if line != line2:
line = line2
didRename = True
# print(line, '=>', line2)
lines.append(line)
if didRename:
print('Writing', filename)
if not dryRun:
with open(filename, 'w') as f:
f.write('\n'.join(lines))
def configFindResFile(config, basedir, name):
fn = os.path.join(basedir, config.get("res", name))
if not os.path.isfile(fn):
basedir = os.path.dirname(basedir)
fn = os.path.join(basedir, config.get("res", name))
if not os.path.isfile(fn):
fn = None
return fn
def renameConfigFile(config, filename, newNames, dryRun=False, print=print):
wrapper = TextWrapper()
wrapper.width = 80
wrapper.break_long_words = False
wrapper.break_on_hyphens = False
wrap = lambda names: '\n'.join(wrapper.wrap(' '.join(names)))
didRename = False
for propertyName, values in config.items('glyphs'):
glyphNames = values.split()
# print(propertyName, glyphNames)
propChanged = False
for name in glyphNames:
if name in newNames:
sectionChanged = True
if sectionChanged:
config.set('glyphs', propertyName, wrap(glyphNames)+'\n')
didRename = True
# config.set(section, option, value)
if didRename:
s = StringIO()
config.write(s)
s = s.getvalue()
s = re.sub(r'\n(\w+)\s+=\s*', '\n\\1: ', s, flags=re.M)
s = re.sub(r'((?:^|\n)\[[^\]]*\])', '\\1\n', s, flags=re.M)
s = re.sub(r'\n\t\n', '\n\n', s, flags=re.M)
s = s.strip() + '\n'
print('Writing', filename)
if not dryRun:
with open(filename, 'w') as f:
f.write(s)
def parseAGL(filename): # -> { 2126: 'Omega', ... }
m = {}
for line in readLines(filename):
# Omega;2126
# dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
line = line.strip()
if len(line) > 0 and line[0] != '#':
name, uc = tuple([c.strip() for c in line.split(';')])
if uc.find(' ') == -1:
# it's a 1:1 mapping
m[int(uc, 16)] = name
return m
def main():
argparser = argparse.ArgumentParser(description='Enrich UFO glyphnames')
argparser.add_argument(
'-dry', dest='dryRun', action='store_const', const=True, default=False,
help='Do not modify anything, but instead just print what would happen.')
argparser.add_argument(
'-list-missing', dest='listMissing', action='store_const', const=True, default=False,
help='List glyphs with unicodes found in source files but missing in any of the target UFOs.')
argparser.add_argument(
'-list-unnamed', dest='listUnnamed', action='store_const', const=True, default=False,
help="List glyphs with unicodes in target UFOs that don't have symbolic names.")
argparser.add_argument(
'-backfill-agl', dest='backfillWithAgl', action='store_const', const=True, default=False,
help="Use glyphnames from Adobe Glyph List for any glyphs that no names in any of"+
" the input font files")
argparser.add_argument(
'-src', dest='srcFonts', metavar='<fontfile>', type=str, nargs='*',
help='TrueType, OpenType or UFO fonts to gather glyph info from. '+
'Names found in earlier-listed fonts are prioritized over later listings.')
argparser.add_argument(
'dstFonts', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
args = argparser.parse_args()
# Load UFO fonts
dstFonts = []
dstFontPaths = {} # keyed by RFont object
srcDir = None
for fn in args.dstFonts:
fn = fn.rstrip('/')
font = OpenFont(fn)
dstFonts.append(font)
dstFontPaths[font] = fn
srcDir2 = os.path.dirname(fn)
if srcDir is None:
srcDir = srcDir2
elif srcDir != srcDir2:
raise Exception('All <ufofile>s must be rooted in same directory')
# load fontbuild configuration
config = RawConfigParser(dict_type=OrderedDict)
configFilename = os.path.join(srcDir, 'fontbuild.cfg')
config.read(configFilename)
glyphOrderFile = configFindResFile(config, srcDir, 'glyphorder')
diacriticsFile = configFindResFile(config, srcDir, 'diacriticfile')
glyphOrder = readGlyphOrderFile(glyphOrderFile)
fallbackGlyphNames = {} # { 2126: 'Omega', ... }
if args.backfillWithAgl:
fallbackGlyphNames = parseAGL(configFindResFile(config, srcDir, 'agl_glyphlistfile'))
# find glyph names
uc2names, extraUc2names, name2ucsv = buildGlyphNames(
dstFonts,
args.srcFonts,
glyphOrder,
fallbackGlyphNames
)
# Note: name2ucsv has same order as parameters to buildGlyphNames
if args.listMissing:
print('# Missing glyphs: (found in -src but not in any <ufofile>)')
for uc, names in extraUc2names.iteritems():
print('U+%04X\t%s' % (uc, ', '.join(names)))
return
elif args.listUnnamed:
print('# Unnamed glyphs:')
unnamed = set()
for name in glyphOrder:
if len(name) > 7 and name.startswith('uni'):
unnamed.add(name)
for gl in name2ucsv[:len(dstFonts)]:
for name, ucs in gl.iteritems():
for uc in ucs:
if isDefaultGlyphNameForUnicode(name, uc):
unnamed.add(name)
break
for name in unnamed:
print(name)
return
printDry = lambda *args: print(*args)
if args.dryRun:
printDry = lambda *args: print('[dry-run]', *args)
newNames = {}
renameGlyphsQueue = {} # keyed by RFont object
for font in dstFonts:
renameGlyphsQueue[font] = {}
for uc, names in uc2names.iteritems():
if len(names) < 2:
continue
dstGlyphName = names[0]
if isDefaultGlyphNameForUnicode(dstGlyphName, uc):
newGlyphName = getFirstNonDefaultGlyphName(uc, names[1:])
# if newGlyphName is None:
# # if we found no symbolic name, check in fallback list
# newGlyphName = fallbackGlyphNames.get(uc)
# if newGlyphName is not None:
# printDry('Using fallback %s' % newGlyphName)
if newGlyphName is not None:
printDry('Rename %s -> %s' % (dstGlyphName, newGlyphName))
for font in dstFonts:
if dstGlyphName in font:
renameGlyphsQueue[font][dstGlyphName] = newGlyphName
newNames[dstGlyphName] = newGlyphName
if len(newNames) == 0:
printDry('No changes')
return
# rename component instances
for font in dstFonts:
componentMap = font.getReverseComponentMapping()
for currName, newName in renameGlyphsQueue[font].iteritems():
for depName in componentMap.get(currName, []):
depG = font[depName]
for c in depG.components:
if c.baseGlyph == currName:
c.baseGlyph = newName
c.setChanged()
# rename glyphs
for font in dstFonts:
for currName, newName in renameGlyphsQueue[font].iteritems():
font[currName].name = newName
# save fonts and update font data
for font in dstFonts:
fontPath = dstFontPaths[font]
printDry('Saving %d glyphs in %s' % (len(newNames), fontPath))
if not args.dryRun:
font.save()
renameUFODetails(font, fontPath, newNames, dryRun=args.dryRun, print=printDry)
# update resource files
renameGlyphOrderFile(glyphOrderFile, newNames, dryRun=args.dryRun, print=printDry)
renameDiacriticsFile(diacriticsFile, newNames, dryRun=args.dryRun, print=printDry)
renameConfigFile(config, configFilename, newNames, dryRun=args.dryRun, print=printDry)
if __name__ == '__main__':
main()

167
misc/fixup-diacritics.py Executable file
View File

@ -0,0 +1,167 @@
#!/usr/bin/env python
# encoding: utf8
from __future__ import print_function
import os, sys, plistlib, re
from collections import OrderedDict
from ConfigParser import RawConfigParser
from argparse import ArgumentParser
from robofab.objects.objectsRF import OpenFont
# Regex matching "default" glyph names, like "uni2043" and "u01C5"
uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
def unicodeForDefaultGlyphName(glyphName):
m = uniNameRe.match(glyphName)
if m is not None:
try:
return int(m.group(1), 16)
except:
pass
return None
def canonicalGlyphName(glyphName, uc2names):
uc = unicodeForDefaultGlyphName(glyphName)
if uc is not None:
names = uc2names.get(uc)
if names is not None and len(names) > 0:
return names[0]
return glyphName
def parseGlyphComposition(composite):
c = composite.split("=")
d = c[1].split("/")
glyphName = d[0]
if len(d) == 1:
offset = [0, 0]
else:
offset = [int(i) for i in d[1].split(",")]
accentString = c[0]
accents = accentString.split("+")
baseName = accents.pop(0)
accentNames = [i.split(":") for i in accents]
return (glyphName, baseName, accentNames, offset)
def fmtGlyphComposition(glyphName, baseName, accentNames, offset):
# glyphName = 'uni03D3'
# baseName = 'uni03D2'
# accentNames = [['tonos', 'top'], ['acute', 'top']]
# offset = [100, 0]
# => "uni03D2+tonos:top+acute:top=uni03D3/100,0"
s = baseName
for accentNameTuple in accentNames:
s += '+' + accentNameTuple[0]
if len(accentNameTuple) > 1:
s += ':' + accentNameTuple[1]
s += '=' + glyphName
if offset[0] != 0 or offset[1] != 0:
s += '/%d,%d' % tuple(offset)
return s
def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
compositions = OrderedDict()
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0 and line[0] != '#':
glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
compositions[glyphName] = (baseName, accentNames, offset)
return compositions
def loadAGL(filename): # -> { 2126: 'Omega', ... }
m = {}
with open(filename, 'r') as f:
for line in f:
# Omega;2126
# dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
line = line.strip()
if len(line) > 0 and line[0] != '#':
name, uc = tuple([c.strip() for c in line.split(';')])
if uc.find(' ') == -1:
# it's a 1:1 mapping
m[int(uc, 16)] = name
return m
def loadFontGlyphs(font):
uc2names = {} # { 2126: ['Omega', ...], ...}
name2ucs = {} # { 'Omega': [2126, ...], '.notdef': [], ...}
for g in font:
name = g.name
ucs = g.unicodes
name2ucs[name] = ucs
for uc in ucs:
names = uc2names.setdefault(uc, [])
if name not in names:
names.append(name)
return uc2names, name2ucs
def main():
argparser = ArgumentParser(description='Fixup diacritic names')
argparser.add_argument(
'-dry', dest='dryRun', action='store_const', const=True, default=False,
help='Do not modify anything, but instead just print what would happen.')
argparser.add_argument(
'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts')
args = argparser.parse_args()
dryRun = args.dryRun
uc2names = {}
name2ucs = {}
for fontPath in args.fontPaths:
font = OpenFont(fontPath)
_uc2names, _name2ucs = loadFontGlyphs(font)
for uc, _names in _uc2names.iteritems():
names = uc2names.setdefault(uc, [])
for name in _names:
if name not in names:
names.append(name)
for name, _ucs in _name2ucs.iteritems():
ucs = name2ucs.setdefault(name, [])
for uc in _ucs:
if uc not in ucs:
ucs.append(uc)
agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
diacriticsFilename = 'src/diacritics.txt'
diacriticComps = loadGlyphCompositions(diacriticsFilename) # {glyphName => (baseName, a, o)}
for glyphName, comp in list(diacriticComps.items()):
if glyphName not in name2ucs:
uc = unicodeForDefaultGlyphName(glyphName)
if uc is not None:
aglName = agl.get(uc)
if aglName is not None:
if aglName in diacriticComps:
raise Exception('composing same glyph with different names:', aglName, glyphName)
print('rename', glyphName, '->', aglName, '(U+%04X)' % uc)
del diacriticComps[glyphName]
diacriticComps[aglName] = comp
lines = []
for glyphName, comp in diacriticComps.iteritems():
lines.append(fmtGlyphComposition(glyphName, *comp))
# print('\n'.join(lines))
print('Write', diacriticsFilename)
if not dryRun:
with open(diacriticsFilename, 'w') as f:
for line in lines:
f.write(line + '\n')
main()

324
misc/fixup-features.py Executable file
View File

@ -0,0 +1,324 @@
#!/usr/bin/env python
# encoding: utf8
from __future__ import print_function
import os, sys, plistlib, re
from collections import OrderedDict
from ConfigParser import RawConfigParser
from argparse import ArgumentParser
from robofab.objects.objectsRF import OpenFont
from fontTools.feaLib.parser import Parser as FeaParser
from fontTools.feaLib.builder import Builder as FeaBuilder
from fontTools.ttLib import TTFont
# Regex matching "default" glyph names, like "uni2043" and "u01C5"
uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
def unicodeForDefaultGlyphName(glyphName):
m = uniNameRe.match(glyphName)
if m is not None:
try:
return int(m.group(1), 16)
except:
pass
return None
def canonicalGlyphName(glyphName, uc2names):
uc = unicodeForDefaultGlyphName(glyphName)
if uc is not None:
names = uc2names.get(uc)
if names is not None and len(names) > 0:
return names[0]
return glyphName
def parseGlyphComposition(composite):
c = composite.split("=")
d = c[1].split("/")
glyphName = d[0]
if len(d) == 1:
offset = [0, 0]
else:
offset = [int(i) for i in d[1].split(",")]
accentString = c[0]
accents = accentString.split("+")
baseName = accents.pop(0)
accentNames = [i.split(":") for i in accents]
return (glyphName, baseName, accentNames, offset)
def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
compositions = OrderedDict()
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0 and line[0] != '#':
glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
compositions[glyphName] = (baseName, accentNames, offset)
return compositions
def loadAGL(filename): # -> { 2126: 'Omega', ... }
m = {}
with open(filename, 'r') as f:
for line in f:
# Omega;2126
# dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
line = line.strip()
if len(line) > 0 and line[0] != '#':
name, uc = tuple([c.strip() for c in line.split(';')])
if uc.find(' ') == -1:
# it's a 1:1 mapping
m[int(uc, 16)] = name
return m
def loadLocalNamesDB(fonts, agl, diacriticComps):
uc2names = None # { 2126: ['Omega', ...], ...}
allNames = set() # set('Omega', ...)
for font in fonts:
_uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
if uc2names is None:
uc2names = _uc2names
else:
for uc, _names in _uc2names.iteritems():
names = uc2names.setdefault(uc, [])
for name in _names:
if name not in names:
names.append(name)
for g in font:
allNames.add(g.name)
# agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
aglName2Ucs = {}
for uc, name in agl.iteritems():
aglName2Ucs.setdefault(name, []).append(uc)
for glyphName, comp in diacriticComps.iteritems():
aglUCs = aglName2Ucs.get(glyphName)
if aglUCs is None:
uc = unicodeForDefaultGlyphName(glyphName)
if uc is not None:
glyphName2 = agl.get(uc)
if glyphName2 is not None:
glyphName = glyphName2
names = uc2names.setdefault(uc, [])
if glyphName not in names:
names.append(glyphName)
allNames.add(glyphName)
else:
allNames.add(glyphName)
for uc in aglUCs:
names = uc2names.get(uc, [])
if glyphName not in names:
names.append(glyphName)
uc2names[uc] = names
name2ucs = {} # { 'Omega': [2126, ...], ...}
for uc, names in uc2names.iteritems():
for name in names:
name2ucs.setdefault(name, set()).add(uc)
return uc2names, name2ucs, allNames
def main():
argparser = ArgumentParser(description='Fixup features.fea')
argparser.add_argument(
'-dry', dest='dryRun', action='store_const', const=True, default=False,
help='Do not modify anything, but instead just print what would happen.')
argparser.add_argument(
'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
args = argparser.parse_args()
dryRun = args.dryRun
agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
diacriticComps = loadGlyphCompositions('src/diacritics.txt') # {glyphName => (baseName, a, o)}
# collect glyph names
fonts = [OpenFont(fontPath) for fontPath in args.fontPaths]
uc2names, name2ucs, allNames = loadLocalNamesDB(fonts, agl, diacriticComps)
# open feature.fea
featuresFilename = ''
featuresLines = []
for fontPath in args.fontPaths:
try:
featuresFilename = os.path.join(fontPath, 'features.fea')
with open(featuresFilename, 'r') as f:
print('read', featuresFilename)
featuresLines = f.read().splitlines()
break
except:
pass
classDefRe = re.compile(r'^@([^\s=]+)\s*=\s*\[([^\]]+)\]\s*;\s*$')
subRe = re.compile(r'^\s*sub\s+(.+)(\'?)\s+by\s+(.+)\s*;\s*$')
sub2Re = re.compile(r'^\s*sub\s+([^\[]+)\s+\[\s*([^\]]+)\s*\](\'?)\s+by\s+(.+)\s*;\s*$')
# sub lmidtilde [uni1ABB uni1ABD uni1ABE]' by uni1ABE.w2;
# sub lmidtilde uni1ABC' by uni1ABC.w2;
spacesRe = re.compile(r'[\s\r\n]+')
classDefs = {}
featuresLines2 = []
for line in featuresLines:
clsM = classDefRe.match(line)
if clsM is not None:
clsName = clsM.group(1)
names = spacesRe.split(clsM.group(2).strip())
if clsName in classDefs:
raise Exception('duplicate class definition ' + clsName)
# print('classdef', clsName, ' '.join(names))
# print('classdef', clsName)
names2 = []
for name in names:
if name == '-':
# e.g. A - Z
names2.append(name)
continue
if name[0] != '@':
canonName = canonicalGlyphName(name, uc2names)
if canonName != name:
# print('renaming ' + name + ' -> ' + canonName)
names2.append(canonName)
elif name not in allNames:
print('skipping unknown glyph ' + name)
else:
names2.append(name)
else:
raise Exception('todo: class-ref ' + name + ' in class-def ' + clsName)
classDefs[clsName] = names2
line = '@%s = [ %s ];' % (clsName, ' '.join(names2))
featuresLines2.append(line)
continue
# sub2M = sub2Re.match(line)
# if sub2M is not None:
# findNames1 = spacesRe.split(sub2M.group(1))
# findNames2 = spacesRe.split(sub2M.group(2))
# apos = sub2M.group(3)
# rightName = sub2M.group(4)
# print('TODO: sub2', findNames1, findNames2, apos, rightName)
# featuresLines2.append(line)
# continue
sub2M = sub2Re.match(line)
subM = None
if sub2M is None:
subM = subRe.match(line)
if subM is not None or sub2M is not None:
findNamesStr = ''
findNamesHasBrackets = False
findNames = []
findNamesBStr = ''
findNamesBHasBrackets = False
findNamesB = []
newNamesStr = ''
newNamesHasBrackets = False
newNames = []
apos0 = ''
if subM is not None:
findNamesStr = subM.group(1)
apos0 = subM.group(2)
newNamesStr = subM.group(3)
else: # sub2M
findNamesStr = sub2M.group(1)
findNamesBStr = sub2M.group(2)
apos0 = sub2M.group(3)
newNamesStr = sub2M.group(4)
if newNamesStr[0] == '[':
newNamesHasBrackets = True
newNamesStr = newNamesStr.strip('[ ]')
newNames = spacesRe.split(newNamesStr)
if findNamesStr[0] == '[':
findNamesHasBrackets = True
findNamesStr = findNamesStr.strip('[ ]')
findNames = spacesRe.split(findNamesStr)
if findNamesBStr != '':
if findNamesBStr[0] == '[':
findNamesBHasBrackets = True
findNamesBStr = findNamesBStr.strip('[ ]')
findNamesB = spacesRe.split(findNamesBStr)
names22 = []
for names in [findNames, findNamesB, newNames]:
names2 = []
for name in names:
if name[0] == '@':
clsName = name[1:].rstrip("'")
if clsName not in classDefs:
raise Exception('sub: missing target class ' + clsName + ' at\n' + line)
names2.append(name)
else:
apos = name[-1] == "'"
if apos:
name = name[:-1]
if name not in allNames:
canonName = canonicalGlyphName(name, uc2names)
if canonName != name:
print('renaming ' + name + ' -> ' + canonName)
name = canonName
else:
raise Exception('TODO: unknown name', name)
# if we remove names, we also need to remove subs (that become empty), and so on.
if apos:
name += "'"
names2.append(name)
names22.append(names2)
findNames2, findNamesB2, newNames2 = names22
findNamesStr = ' '.join(findNames2)
if findNamesHasBrackets: findNamesStr = '[' + findNamesStr + ']'
if findNamesBStr != '':
findNamesBStr = ' '.join(findNamesB2)
if findNamesBHasBrackets: findNamesBStr = '[' + findNamesBStr + ']'
newNamesStr = ' '.join(newNames2)
if newNamesHasBrackets: newNamesStr = '[' + newNamesStr + ']'
if subM is not None:
line = ' sub %s%s by %s;' % (findNamesStr, apos0, newNamesStr)
else:
# if subM is None:
# sub bbar [uni1ABB uni1ABD uni1ABE]' by uni1ABE.w2;
line = ' sub %s [%s]%s by %s;' % (findNamesStr, findNamesBStr, apos0, newNamesStr)
featuresLines2.append(line)
print('Write', featuresFilename)
if not dryRun:
with open(featuresFilename + '2', 'w') as f:
for line in featuresLines2:
f.write(line + '\n')
# FeaParser(featuresFilename + '2', allNames).parse()
# font = TTFont('build/dist-unhinted/Interface-Regular.otf')
# FeaBuilder(font, featuresFilename + '2').build()
main()

362
misc/fixup-kerning.py Executable file
View File

@ -0,0 +1,362 @@
#!/usr/bin/env python
# encoding: utf8
from __future__ import print_function
import os, sys, plistlib, json
from collections import OrderedDict
from ConfigParser import RawConfigParser
from argparse import ArgumentParser
from fontTools import ttLib
from robofab.objects.objectsRF import OpenFont
def getTTCharMap(font): # -> { 2126: 'Omegagreek', ...}
if isinstance(font, str):
font = ttLib.TTFont(font)
if not 'cmap' in font:
raise Exception('missing cmap table')
gl = {}
bestCodeSubTable = None
bestCodeSubTableFormat = 0
for st in font['cmap'].tables:
if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
if st.format > bestCodeSubTableFormat:
bestCodeSubTable = st
bestCodeSubTableFormat = st.format
if bestCodeSubTable is not None:
for cp, glyphname in bestCodeSubTable.cmap.items():
if cp in gl:
raise Exception('duplicate unicode-to-glyphname mapping: U+%04X => %r and %r' % (
cp, glyphname, gl[cp]))
gl[cp] = glyphname
return gl
def revCharMap(ucToNames):
# {2126:['Omega','Omegagr']} -> {'Omega':2126, 'Omegagr':2126}
# {2126:'Omega'} -> {'Omega':2126}
m = {}
if len(ucToNames) == 0:
return m
lists = True
for v in ucToNames.itervalues():
lists = not isinstance(v, str)
break
if lists:
for uc, names in ucToNames.iteritems():
for name in names:
m[name] = uc
else:
for uc, name in ucToNames.iteritems():
m[name] = uc
return m
def getGlyphNameDifferenceMap(srcCharMap, dstCharMap, dstRevCharMap):
m = {} # { 'Omegagreek': 'Omega', ... }
for uc, srcName in srcCharMap.iteritems():
dstNames = dstCharMap.get(uc)
if dstNames is not None and len(dstNames) > 0:
if len(dstNames) != 1:
print('warning: ignoring multi-glyph map for U+%04X in source font' % uc)
dstName = dstNames[0]
if srcName != dstName and srcName not in dstRevCharMap:
# Only include names that differ. also, The `srcName not in dstRevCharMap` condition
# makes sure that we don't rename glyphs that are already valid.
m[srcName] = dstName
return m
def fixupGroups(fontPath, dstGlyphNames, srcToDstMap, dryRun, stats):
filename = os.path.join(fontPath, 'groups.plist')
groups = plistlib.readPlist(filename)
groups2 = {}
glyphToGroups = {}
for groupName, glyphNames in groups.iteritems():
glyphNames2 = []
for glyphName in glyphNames:
if glyphName in srcToDstMap:
gn2 = srcToDstMap[glyphName]
stats.renamedGlyphs[glyphName] = gn2
glyphName = gn2
if glyphName in dstGlyphNames:
glyphNames2.append(glyphName)
glyphToGroups[glyphName] = glyphToGroups.get(glyphName, []) + [groupName]
else:
stats.removedGlyphs.add(glyphName)
if len(glyphNames2) > 0:
groups2[groupName] = glyphNames2
else:
stats.removedGroups.add(groupName)
print('Writing', filename)
if not dryRun:
plistlib.writePlist(groups2, filename)
return groups2, glyphToGroups
def fixupKerning(fontPath, dstGlyphNames, srcToDstMap, groups, glyphToGroups, dryRun, stats):
filename = os.path.join(fontPath, 'kerning.plist')
kerning = plistlib.readPlist(filename)
kerning2 = {}
groupPairs = {} # { "lglyphname+lglyphname": ("lgroupname"|"", "rgroupname"|"", 123) }
# pairs = {} # { "name+name" => 123 }
for leftName, right in kerning.items():
leftIsGroup = leftName[0] == '@'
leftGroupNames = None
if leftIsGroup:
# left is a group
if leftName not in groups:
# dead group -- skip
stats.removedGroups.add(leftName)
continue
leftGroupNames = groups[leftName]
else:
if leftName in srcToDstMap:
leftName2 = srcToDstMap[leftName]
stats.renamedGlyphs[leftName] = leftName2
leftName = leftName2
if leftName not in dstGlyphNames:
# dead glyphname -- skip
stats.removedGlyphs.add(leftName)
continue
right2 = {}
rightGroupNamesAndValues = []
for rightName, kerningValue in right.iteritems():
rightIsGroup = rightName[0] == '@'
if rightIsGroup:
if leftIsGroup and leftGroupNames is None:
leftGroupNames = [leftName]
if rightName in groups:
right2[rightName] = kerningValue
rightGroupNamesAndValues.append((groups[rightName], rightName, kerningValue))
else:
stats.removedGroups.add(rightName)
else:
if rightName in srcToDstMap:
rightName2 = srcToDstMap[rightName]
stats.renamedGlyphs[rightName] = rightName2
rightName = rightName2
if rightName in dstGlyphNames:
right2[rightName] = kerningValue
if leftIsGroup:
rightGroupNamesAndValues.append(([rightName], '', kerningValue))
else:
stats.removedGlyphs.add(rightName)
if len(right2):
kerning2[leftName] = right2
# update groupPairs
lgroupname = leftName if rightIsGroup else ''
if leftIsGroup:
for lname in leftGroupNames:
kPrefix = lname + '+'
for rnames, rgroupname, kernv in rightGroupNamesAndValues:
for rname in rnames:
k = kPrefix + rname
v = (lgroupname, rgroupname, kernv)
if k in groupPairs:
raise Exception('duplicate group pair %s: %r and %r' % (k, groupPairs[k], v))
groupPairs[k] = v
elif leftIsGroup:
stats.removedGroups.add(leftName)
else:
stats.removedGlyphs.add(leftName)
# print('groupPairs:', groupPairs)
# remove individual pairs that are already represented through groups
kerning = kerning2
kerning2 = {}
for leftName, right in kerning.items():
leftIsGroup = leftName[0] == '@'
# leftNames = groups[leftName] if leftIsGroup else [leftName]
if not leftIsGroup:
right2 = {}
for rightName, kernVal in right.iteritems():
rightIsGroup = rightName[0] == '@'
if not rightIsGroup:
k = leftName + '+' + rightName
if k in groupPairs:
groupPair = groupPairs[k]
print(('simplify individual pair %r: kern %r (individual) -> %r (group)') % (
k, kernVal, groupPair[2]))
stats.simplifiedKerningPairs.add(k)
else:
right2[rightName] = kernVal
else:
right2[rightName] = kernVal
else:
# TODO, probably
right2 = right
kerning2[leftName] = right2
print('Writing', filename)
if not dryRun:
plistlib.writePlist(kerning2, filename)
return kerning2
def loadJSONCharMap(filename):
m = None
if filename == '-':
m = json.load(sys.stdin)
else:
with open(filename, 'r') as f:
m = json.load(f)
if not isinstance(m, dict):
raise Exception('json root is not a dict')
if len(m) > 0:
for k, v in m.iteritems():
if not isinstance(k, int) and not isinstance(k, float):
raise Exception('json dict key is not a number')
if not isinstance(v, str):
raise Exception('json dict value is not a string')
break
return m
class Stats:
def __init__(self):
self.removedGroups = set()
self.removedGlyphs = set()
self.simplifiedKerningPairs = set()
self.renamedGlyphs = {}
def configFindResFile(config, basedir, name):
fn = os.path.join(basedir, config.get("res", name))
if not os.path.isfile(fn):
basedir = os.path.dirname(basedir)
fn = os.path.join(basedir, config.get("res", name))
if not os.path.isfile(fn):
fn = None
return fn
def main():
jsonSchemaDescr = '{[unicode:int]: glyphname:string, ...}'
argparser = ArgumentParser(
description='Rename glyphnames in UFO kerning and remove unused groups and glyphnames.')
argparser.add_argument(
'-dry', dest='dryRun', action='store_const', const=True, default=False,
help='Do not modify anything, but instead just print what would happen.')
argparser.add_argument(
'-no-stats', dest='noStats', action='store_const', const=True, default=False,
help='Do not print statistics at the end.')
argparser.add_argument(
'-save-stats', dest='saveStatsPath', metavar='<file>', type=str,
help='Write detailed statistics to JSON file.')
argparser.add_argument(
'-src-json', dest='srcJSONFile', metavar='<file>', type=str,
help='JSON file to read glyph names from.'+
' Expected schema: ' + jsonSchemaDescr + ' (e.g. {2126: "Omega"})')
argparser.add_argument(
'-src-font', dest='srcFontFile', metavar='<file>', type=str,
help='TrueType or OpenType font to read glyph names from.')
argparser.add_argument(
'dstFontsPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
args = argparser.parse_args()
dryRun = args.dryRun
if args.srcJSONFile and args.srcFontFile:
argparser.error('Both -src-json and -src-font specified -- please provide only one.')
# Strip trailing slashes from font paths
args.dstFontsPaths = [s.rstrip('/ ') for s in args.dstFontsPaths]
# Load source char map
srcCharMap = None
if args.srcJSONFile:
try:
srcCharMap = loadJSONCharMap(args.srcJSONFile)
except Exception as err:
argparser.error('Invalid JSON: Expected schema %s (%s)' % (jsonSchemaDescr, err))
elif args.srcFontFile:
srcCharMap = getTTCharMap(args.srcFontFile.rstrip('/ ')) # -> { 2126: 'Omegagreek', ...}
else:
argparser.error('No source provided (-src-* argument missing)')
if len(srcCharMap) == 0:
print('Empty character map', file=sys.stderr)
sys.exit(1)
# Find project source dir
srcDir = ''
for dstFontPath in args.dstFontsPaths:
s = os.path.dirname(dstFontPath)
if not srcDir:
srcDir = s
elif srcDir != s:
raise Exception('All <ufofile>s must be rooted in the same directory')
# Load font project config
# load fontbuild configuration
config = RawConfigParser(dict_type=OrderedDict)
configFilename = os.path.join(srcDir, 'fontbuild.cfg')
config.read(configFilename)
diacriticsFile = configFindResFile(config, srcDir, 'diacriticfile')
for dstFontPath in args.dstFontsPaths:
dstFont = OpenFont(dstFontPath)
dstCharMap = dstFont.getCharacterMapping() # -> { 2126: [ 'Omega', ...], ...}
dstRevCharMap = revCharMap(dstCharMap) # { 'Omega': 2126, ...}
srcToDstMap = getGlyphNameDifferenceMap(srcCharMap, dstCharMap, dstRevCharMap)
stats = Stats()
groups, glyphToGroups = fixupGroups(dstFontPath, dstRevCharMap, srcToDstMap, dryRun, stats)
fixupKerning(dstFontPath, dstRevCharMap, srcToDstMap, groups, glyphToGroups, dryRun, stats)
# stats
if args.saveStatsPath or not args.noStats:
if not args.noStats:
print('stats for %s:' % dstFontPath)
print(' Deleted %d groups and %d glyphs.' % (
len(stats.removedGroups), len(stats.removedGlyphs)))
print(' Renamed %d glyphs.' % len(stats.renamedGlyphs))
print(' Simplified %d kerning pairs.' % len(stats.simplifiedKerningPairs))
if args.saveStatsPath:
statsObj = {
'deletedGroups': stats.removedGroups,
'deletedGlyphs': stats.removedGlyphs,
'simplifiedKerningPairs': stats.simplifiedKerningPairs,
'renamedGlyphs': stats.renamedGlyphs,
}
f = sys.stdout
try:
if args.saveStatsPath != '-':
f = open(args.saveStatsPath, 'w')
print('Writing stats to', args.saveStatsPath)
json.dump(statsObj, sys.stdout, indent=2, separators=(',', ': '))
finally:
if f is not sys.stdout:
f.close()
if __name__ == '__main__':
main()

391
misc/fontinfo.py Executable file
View File

@ -0,0 +1,391 @@
#!/usr/bin/env python
# encoding: utf8
#
# Generates JSON-encoded information about fonts
#
import os
import sys
import argparse
import json
from fontTools import ttLib
from fontTools.misc import sstruct
from fontTools.ttLib.tables._h_e_a_d import headFormat
from fontTools.ttLib.tables._h_h_e_a import hheaFormat
from fontTools.ttLib.tables._m_a_x_p import maxpFormat_0_5, maxpFormat_1_0_add
from fontTools.ttLib.tables._p_o_s_t import postFormat
from fontTools.ttLib.tables.O_S_2f_2 import OS2_format_1, OS2_format_2, OS2_format_5
# from robofab.world import world, RFont, RGlyph, OpenFont, NewFont
# from robofab.objects.objectsRF import RFont, RGlyph, OpenFont, NewFont, RContour
_NAME_IDS = {}
def num(s):
return int(s) if s.find('.') == -1 else float(s)
def tableNamesToDict(table, names):
t = {}
for name in names:
if name.find('reserved') == 0:
continue
t[name] = getattr(table, name)
return t
def sstructTableToDict(table, format):
_, names, _ = sstruct.getformat(format)
return tableNamesToDict(table, names)
OUTPUT_TYPE_COMPLETE = 'complete'
OUTPUT_TYPE_GLYPHLIST = 'glyphlist'
GLYPHS_TYPE_UNKNOWN = '?'
GLYPHS_TYPE_TT = 'tt'
GLYPHS_TYPE_CFF = 'cff'
def getGlyphsType(tt):
if 'CFF ' in tt:
return GLYPHS_TYPE_CFF
elif 'glyf' in tt:
return GLYPHS_TYPE_TT
return GLYPHS_TYPE_UNKNOWN
class GlyphInfo:
def __init__(self, g, name, unicodes, type, glyphTable):
self._type = type # GLYPHS_TYPE_*
self._glyphTable = glyphTable
self.name = name
self.width = g.width
self.lsb = g.lsb
self.unicodes = unicodes
if g.height is not None:
self.tsb = g.tsb
self.height = g.height
else:
self.tsb = 0
self.height = 0
self.numContours = 0
self.contoursBBox = (0,0,0,0) # xMin, yMin, xMax, yMax
self.hasHints = False
if self._type is GLYPHS_TYPE_CFF:
self._addCFFInfo()
elif self._type is GLYPHS_TYPE_TT:
self._addTTInfo()
def _addTTInfo(self):
g = self._glyphTable[self.name]
self.numContours = g.numberOfContours
if g.numberOfContours:
self.contoursBBox = (g.xMin,g.xMin,g.xMax,g.yMax)
self.hasHints = hasattr(g, "program")
def _addCFFInfo(self):
# TODO: parse CFF dict tree
pass
@classmethod
def structKeys(cls, type):
v = [
'name',
'unicodes',
'width',
'lsb',
'height',
'tsb',
'hasHints',
]
if type is GLYPHS_TYPE_TT:
v += (
'numContours',
'contoursBBox',
)
return v
def structValues(self):
v = [
self.name,
self.unicodes,
self.width,
self.lsb,
self.height,
self.tsb,
self.hasHints,
]
if self._type is GLYPHS_TYPE_TT:
v += (
self.numContours,
self.contoursBBox,
)
return v
# exported convenience function
def GenGlyphList(font, withGlyphs=None):
if isinstance(font, str):
font = ttLib.TTFont(font)
return genGlyphsInfo(font, OUTPUT_TYPE_GLYPHLIST)
def genGlyphsInfo(tt, outputType, glyphsType=GLYPHS_TYPE_UNKNOWN, glyphsTable=None, withGlyphs=None):
unicodeMap = {}
glyphnameFilter = None
if isinstance(withGlyphs, str):
glyphnameFilter = withGlyphs.split(',')
if 'cmap' in tt:
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cmap.html
bestCodeSubTable = None
bestCodeSubTableFormat = 0
for st in tt['cmap'].tables:
if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
if st.format > bestCodeSubTableFormat:
bestCodeSubTable = st
bestCodeSubTableFormat = st.format
for cp, glyphname in bestCodeSubTable.cmap.items():
if glyphname in unicodeMap:
unicodeMap[glyphname].append(cp)
else:
unicodeMap[glyphname] = [cp]
glyphValues = []
glyphset = tt.getGlyphSet(preferCFF=glyphsType is GLYPHS_TYPE_CFF)
glyphnames = tt.getGlyphOrder() if glyphnameFilter is None else glyphnameFilter
if outputType is OUTPUT_TYPE_GLYPHLIST:
glyphValues = []
for glyphname in glyphnames:
v = [glyphname]
if glyphname in unicodeMap:
v += unicodeMap[glyphname]
glyphValues.append(v)
return glyphValues
for glyphname in glyphnames:
unicodes = unicodeMap[glyphname] if glyphname in unicodeMap else []
try:
g = glyphset[glyphname]
except KeyError:
raise Exception('no such glyph "'+glyphname+'"')
gi = GlyphInfo(g, glyphname, unicodes, glyphsType, glyphsTable)
glyphValues.append(gi.structValues())
return {
'keys': GlyphInfo.structKeys(glyphsType),
'values': glyphValues,
}
def copyDictEntry(srcD, srcName, dstD, dstName):
try:
dstD[dstName] = srcD[srcName]
except:
pass
def addCFFFontInfo(tt, info, cffTable):
d = cffTable.rawDict
nameDict = None
if 'name' not in info:
nameDict = {}
info['name'] = nameDict
else:
nameDict = info['name']
copyDictEntry(d, 'Weight', nameDict, 'weight')
copyDictEntry(d, 'version', nameDict, 'version')
def genFontInfo(fontpath, outputType, withGlyphs=True):
tt = ttLib.TTFont(fontpath) # lazy=True
info = {
'id': fontpath,
}
# for tableName in tt.keys():
# print 'table', tableName
nameDict = {}
if 'name' in tt:
nameDict = {}
for rec in tt['name'].names:
k = _NAME_IDS[rec.nameID] if rec.nameID in _NAME_IDS else ('#%d' % rec.nameID)
nameDict[k] = rec.toUnicode()
if 'fontId' in nameDict:
info['id'] = nameDict['fontId']
if 'postscriptName' in nameDict:
info['name'] = nameDict['postscriptName']
elif 'familyName' in nameDict:
info['name'] = nameDict['familyName'].replace(' ', '')
if 'subfamilyName' in nameDict:
info['name'] += '-' + nameDict['subfamilyName'].replace(' ', '')
if outputType is not OUTPUT_TYPE_GLYPHLIST:
if len(nameDict):
info['names'] = nameDict
if 'head' in tt:
info['head'] = sstructTableToDict(tt['head'], headFormat)
if 'hhea' in tt:
info['hhea'] = sstructTableToDict(tt['hhea'], hheaFormat)
if 'post' in tt:
info['post'] = sstructTableToDict(tt['post'], postFormat)
if 'OS/2' in tt:
t = tt['OS/2']
if t.version == 1:
info['os/2'] = sstructTableToDict(t, OS2_format_1)
elif t.version in (2, 3, 4):
info['os/2'] = sstructTableToDict(t, OS2_format_2)
elif t.version == 5:
info['os/2'] = sstructTableToDict(t, OS2_format_5)
info['os/2']['usLowerOpticalPointSize'] /= 20
info['os/2']['usUpperOpticalPointSize'] /= 20
if 'panose' in info['os/2']:
del info['os/2']['panose']
# if 'maxp' in tt:
# table = tt['maxp']
# _, names, _ = sstruct.getformat(maxpFormat_0_5)
# if table.tableVersion != 0x00005000:
# _, names_1_0, _ = sstruct.getformat(maxpFormat_1_0_add)
# names += names_1_0
# info['maxp'] = tableNamesToDict(table, names)
glyphsType = getGlyphsType(tt)
glyphsTable = None
if glyphsType is GLYPHS_TYPE_CFF:
cff = tt["CFF "].cff
cffDictIndex = cff.topDictIndex
if len(cffDictIndex) > 1:
sys.stderr.write(
'warning: multi-font CFF table is unsupported. Only reporting first table.\n'
)
cffTable = cffDictIndex[0]
if outputType is not OUTPUT_TYPE_GLYPHLIST:
addCFFFontInfo(tt, info, cffTable)
elif glyphsType is GLYPHS_TYPE_TT:
glyphsTable = tt["glyf"]
# print 'glyphs type:', glyphsType, 'flavor:', tt.flavor, 'sfntVersion:', tt.sfntVersion
if (withGlyphs is not False or outputType is OUTPUT_TYPE_GLYPHLIST) and withGlyphs is not '':
info['glyphs'] = genGlyphsInfo(tt, outputType, glyphsType, glyphsTable, withGlyphs)
# sys.exit(1)
return info
# ————————————————————————————————————————————————————————————————————————
# main
def main():
argparser = argparse.ArgumentParser(description='Generate JSON describing fonts')
argparser.add_argument('-out', dest='outfile', metavar='<file>', type=str,
help='Write JSON to <file>. Writes to stdout if not specified')
argparser.add_argument('-pretty', dest='prettyJson', action='store_const',
const=True, default=False,
help='Generate pretty JSON with linebreaks and indentation')
argparser.add_argument('-with-all-glyphs', dest='withGlyphs', action='store_const',
const=True, default=False,
help='Include glyph information on all glyphs.')
argparser.add_argument('-with-glyphs', dest='withGlyphs', metavar='glyphname[,glyphname ...]',
type=str,
help='Include glyph information on specific glyphs')
argparser.add_argument('-as-glyphlist', dest='asGlyphList',
action='store_const', const=True, default=False,
help='Only generate a list of glyphs and their unicode mappings.')
argparser.add_argument('fontpaths', metavar='<path>', type=str, nargs='+',
help='TrueType or OpenType font files')
args = argparser.parse_args()
fonts = {}
outputType = OUTPUT_TYPE_COMPLETE
if args.asGlyphList:
outputType = OUTPUT_TYPE_GLYPHLIST
n = 0
for fontpath in args.fontpaths:
if n > 0:
# workaround for a bug in fontTools.misc.sstruct where it keeps a global
# internal cache that mixes up values for different fonts.
reload(sstruct)
font = genFontInfo(fontpath, outputType=outputType, withGlyphs=args.withGlyphs)
fonts[font['id']] = font
n += 1
ostream = sys.stdout
if args.outfile is not None:
ostream = open(args.outfile, 'w')
if args.prettyJson:
json.dump(fonts, ostream, sort_keys=True, indent=2, separators=(',', ': '))
else:
json.dump(fonts, ostream, separators=(',', ':'))
if ostream is not sys.stdout:
ostream.close()
# "name" table name identifiers
_NAME_IDS = {
# TrueType & OpenType
0: 'copyright',
1: 'familyName',
2: 'subfamilyName',
3: 'fontId',
4: 'fullName',
5: 'version', # e.g. 'Version <number>.<number>'
6: 'postscriptName',
7: 'trademark',
8: 'manufacturerName',
9: 'designer',
10: 'description',
11: 'vendorURL',
12: 'designerURL',
13: 'licenseDescription',
14: 'licenseURL',
15: 'RESERVED',
16: 'typoFamilyName',
17: 'typoSubfamilyName',
18: 'macCompatibleFullName', # Mac only (FOND)
19: 'sampleText',
# OpenType
20: 'postScriptCIDName',
21: 'wwsFamilyName',
22: 'wwsSubfamilyName',
23: 'lightBackgoundPalette',
24: 'darkBackgoundPalette',
25: 'variationsPostScriptNamePrefix',
# 26-255: Reserved for future expansion
# 256-32767: Font-specific names (layout features and settings, variations, track names, etc.)
}
if __name__ == '__main__':
main()

245
misc/gen-glyphinfo.py Executable file
View File

@ -0,0 +1,245 @@
#!/usr/bin/env python
# encoding: utf8
#
# Grab http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
#
from __future__ import print_function
import os, sys, json, re
from argparse import ArgumentParser
from robofab.objects.objectsRF import OpenFont
from collections import OrderedDict
from unicode_util import parseUnicodeDataFile
# Regex matching "default" glyph names, like "uni2043" and "u01C5"
uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
def unicodeForDefaultGlyphName(glyphName):
m = uniNameRe.match(glyphName)
if m is not None:
try:
return int(m.group(1), 16)
except:
pass
return None
def loadAGL(filename): # -> { 2126: 'Omega', ... }
m = {}
with open(filename, 'r') as f:
for line in f:
# Omega;2126
# dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
line = line.strip()
if len(line) > 0 and line[0] != '#':
name, uc = tuple([c.strip() for c in line.split(';')])
if uc.find(' ') == -1:
# it's a 1:1 mapping
m[int(uc, 16)] = name
return m
def loadLocalNamesDB(fonts, agl, diacriticComps):
uc2names = None # { 2126: ['Omega', ...], ...}
allNames = OrderedDict() # {'Omega':True, ...}
for font in fonts:
_uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
if uc2names is None:
uc2names = _uc2names
else:
for uc, _names in _uc2names.iteritems():
names = uc2names.setdefault(uc, [])
for name in _names:
if name not in names:
names.append(name)
for g in font:
allNames.setdefault(g.name, True)
# agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
aglName2Ucs = {}
for uc, name in agl.iteritems():
aglName2Ucs.setdefault(name, []).append(uc)
for glyphName, comp in diacriticComps.iteritems():
aglUCs = aglName2Ucs.get(glyphName)
if aglUCs is None:
uc = unicodeForDefaultGlyphName(glyphName)
if uc is not None:
glyphName2 = agl.get(uc)
if glyphName2 is not None:
glyphName = glyphName2
names = uc2names.setdefault(uc, [])
if glyphName not in names:
names.append(glyphName)
allNames.setdefault(glyphName, True)
else:
allNames.setdefault(glyphName, True)
for uc in aglUCs:
names = uc2names.get(uc, [])
if glyphName not in names:
names.append(glyphName)
uc2names[uc] = names
name2ucs = {} # { 'Omega': [2126, ...], ...}
for uc, names in uc2names.iteritems():
for name in names:
name2ucs.setdefault(name, set()).add(uc)
return uc2names, name2ucs, allNames
def canonicalGlyphName(glyphName, uc2names):
uc = unicodeForDefaultGlyphName(glyphName)
if uc is not None:
names = uc2names.get(uc)
if names is not None and len(names) > 0:
return names[0]
return glyphName
def parseGlyphComposition(composite):
c = composite.split("=")
d = c[1].split("/")
glyphName = d[0]
if len(d) == 1:
offset = [0, 0]
else:
offset = [int(i) for i in d[1].split(",")]
accentString = c[0]
accents = accentString.split("+")
baseName = accents.pop(0)
accentNames = [i.split(":") for i in accents]
return (glyphName, baseName, accentNames, offset)
def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
compositions = OrderedDict()
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0 and line[0] != '#':
glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
compositions[glyphName] = (baseName, accentNames, offset)
return compositions
def rgbaToCSSColor(r=0, g=0, b=0, a=1):
R,G,B = int(r * 255), int(g * 255), int(b * 255)
if a == 1:
return '#%02x%02x%02x' % (R,G,B)
else:
return 'rgba(%d,%d,%d,%f)' % (R,G,B,a)
def unicodeName(cp):
if cp is not None and len(cp.name):
if cp.name[0] == '<':
return '[' + cp.categoryName + ']'
elif len(cp.name):
return cp.name
return None
def main():
argparser = ArgumentParser(
description='Generate info on name, unicodes and color mark for all glyphs')
argparser.add_argument(
'-ucd', dest='ucdFile', metavar='<file>', type=str,
help='UnicodeData.txt file from http://www.unicode.org/')
argparser.add_argument(
'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
args = argparser.parse_args()
markLibKey = 'com.typemytype.robofont.mark'
fontPaths = []
for fontPath in args.fontPaths:
fontPath = fontPath.rstrip('/ ')
if 'regular' or 'Regular' in fontPath:
fontPaths = [fontPath] + fontPaths
else:
fontPaths.append(fontPath)
fonts = [OpenFont(fontPath) for fontPath in args.fontPaths]
agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
diacriticComps = loadGlyphCompositions('src/diacritics.txt')
uc2names, name2ucs, allNames = loadLocalNamesDB(fonts, agl, diacriticComps)
ucd = {}
if args.ucdFile:
ucd = parseUnicodeDataFile(args.ucdFile)
glyphorder = OrderedDict()
with open(os.path.join(os.path.dirname(args.fontPaths[0]), 'glyphorder.txt'), 'r') as f:
for name in f.read().splitlines():
if len(name) and name[0] != '#':
glyphorder[name] = True
for name in diacriticComps.iterkeys():
glyphorder[name] = True
glyphNames = glyphorder.keys()
visitedGlyphNames = set()
glyphs = []
for font in fonts:
for name, v in glyphorder.iteritems():
if name in visitedGlyphNames:
continue
g = None
ucs = []
try:
g = font[name]
ucs = g.unicodes
except:
ucs = name2ucs.get(name)
if ucs is None:
continue
color = None
if g is not None and markLibKey in g.lib:
# TODO: translate from (r,g,b,a) to #RRGGBB (skip A)
rgba = g.lib[markLibKey]
if isinstance(rgba, list) or isinstance(rgba, tuple):
color = rgbaToCSSColor(*rgba)
elif name in diacriticComps:
color = '<derived>'
# name[, unicode[, unicodeName[, color]]]
if len(ucs):
for uc in ucs:
ucName = unicodeName(ucd.get(uc))
if not ucName and uc >= 0xE000 and uc <= 0xF8FF:
ucName = '[private use %04X]' % uc
if color:
glyph = [name, uc, ucName, color]
elif ucName:
glyph = [name, uc, ucName]
else:
glyph = [name, uc]
glyphs.append(glyph)
else:
glyph = [name, None, None, color] if color else [name]
glyphs.append(glyph)
visitedGlyphNames.add(name)
print('{"glyphs":[')
prefix = ' '
for g in glyphs:
print(prefix + json.dumps(g))
if prefix == ' ':
prefix = ', '
print(']}')
if __name__ == '__main__':
main()

65
misc/gen-glyphorder.py Executable file
View File

@ -0,0 +1,65 @@
#!/usr/bin/env python
# encoding: utf8
from __future__ import print_function
import os, plistlib
from collections import OrderedDict
from argparse import ArgumentParser
def parseGlyphComposition(composite):
c = composite.split("=")
d = c[1].split("/")
glyphName = d[0]
if len(d) == 1:
offset = [0, 0]
else:
offset = [int(i) for i in d[1].split(",")]
accentString = c[0]
accents = accentString.split("+")
baseName = accents.pop(0)
accentNames = [i.split(":") for i in accents]
return (glyphName, baseName, accentNames, offset)
def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
compositions = OrderedDict()
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0 and line[0] != '#':
glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
compositions[glyphName] = (baseName, accentNames, offset)
return compositions
def main():
argparser = ArgumentParser(description='Generate glyph order list from UFO files')
argparser.add_argument('fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO files')
args = argparser.parse_args()
glyphorderUnion = OrderedDict()
fontPaths = []
for fontPath in args.fontPaths:
if 'regular' or 'Regular' in fontPath:
fontPaths = [fontPath] + fontPaths
else:
fontPaths.append(fontPath)
for fontPath in fontPaths:
libPlist = plistlib.readPlist(os.path.join(fontPath, 'lib.plist'))
if 'public.glyphOrder' in libPlist:
for name in libPlist['public.glyphOrder']:
glyphorderUnion[name] = True
# incorporate src/diacritics.txt
# diacriticComps = loadGlyphCompositions('src/diacritics.txt')
# for glyphName in diacriticComps.iterkeys():
# glyphorderUnion[glyphName] = True
glyphorderUnionNames = glyphorderUnion.keys()
print('\n'.join(glyphorderUnionNames))
if __name__ == '__main__':
main()

37
misc/gen-kern.py Normal file
View File

@ -0,0 +1,37 @@
def parseFeaList(s):
v = []
for e in s.split(' '):
if e.find('-') != -1:
(a,b) = e.split('-')
#print 'split: %s, %s' % (a,chr(ord(a)+1))
i = ord(a)
end = ord(b)+1
while i < end:
v.append(chr(i))
i += 1
else:
v.append(e)
return v
UC_ROMAN = parseFeaList('A-Z AE AEacute Aacute Abreve Acircumflex Adieresis Agrave Alpha Alphatonos Amacron Aogonek Aogonek.NAV Aring Aringacute Atilde Beta Cacute Ccaron Ccedilla Ccircumflex Chi Dcaron Dcroat Delta Eacute Ebreve Ecaron Ecircumflex Edieresis Edotaccent Egrave Emacron Eng Eogonek Eogonek.NAV Epsilon Epsilontonos Eta Etatonos Eth Gamma Gbreve Gcircumflex Gcommaaccent Germandbls Hbar Hcircumflex IJ Iacute Ibreve Icircumflex Idieresis Igrave Imacron Iogonek Iota Iotadieresis Iotatonos Itilde Jcircumflex Kappa Kcommaaccent Lacute Lambda Lcaron Lcommaaccent Ldot Lslash Nacute Ncaron Ncommaaccent Ntilde Nu OE Oacute Obreve Ocircumflex Odieresis Ograve Ohungarumlaut Omacron Omega Omegatonos Omicron Omicrontonos Oogonek Oogonek.NAV Oslash Oslashacute Otilde Phi Pi Psi Racute Rcaron Rcommaaccent Rho Sacute Scaron Scedilla Scircumflex Sigma Tau Tbar Tcaron Theta Thorn Uacute Ubreve Ucircumflex Udieresis Ugrave Uhungarumlaut Umacron Uogonek Upsilon Upsilondieresis Upsilontonos Uring Utilde Wacute Wcircumflex Wdieresis Wgrave Xi Yacute Ycircumflex Ydieresis Ygrave Zacute Zcaron Zdotaccent Zeta ampersand uni010A uni0120 uni0162 uni0218 uni021A uni037F')
LC_ROMAN = parseFeaList('a-z ae aeacute aacute abreve acircumflex adieresis agrave alpha alphatonos amacron aogonek aogonek.NAV aring aringacute atilde beta cacute ccaron ccedilla ccircumflex chi dcaron dcroat delta eacute ebreve ecaron ecircumflex edieresis edotaccent egrave emacron eng eogonek eogonek.NAV epsilon epsilontonos eta etatonos eth gamma gbreve gcircumflex gcommaaccent germandbls hbar hcircumflex ij iacute ibreve icircumflex idieresis igrave imacron iogonek iota iotadieresis iotatonos itilde jcircumflex kappa kcommaaccent lacute lambda lcaron lcommaaccent ldot lslash nacute ncaron ncommaaccent ntilde nu oe oacute obreve ocircumflex odieresis ograve ohungarumlaut omacron omega omegatonos omicron omicrontonos oogonek oogonek.NAV oslash oslashacute otilde phi pi psi racute rcaron rcommaaccent rho sacute scaron scedilla scircumflex sigma tau tbar tcaron theta thorn uacute ubreve ucircumflex udieresis ugrave uhungarumlaut umacron uogonek upsilon upsilondieresis upsilontonos uring utilde wacute wcircumflex wdieresis wgrave xi yacute ycircumflex ydieresis ygrave zacute zcaron zdotaccent zeta ampersand uni010B uni0121 uni0163 uni0219 uni021B uni03F3')
UC_AF = parseFeaList('A-F')
LC_AF = parseFeaList('a-f')
LNUM = parseFeaList('zero one two three four five six seven eight nine')
HEXNUM = LNUM + UC_AF + LC_AF
ALL = UC_ROMAN + LC_ROMAN + LNUM
glyphs = HEXNUM
for g in glyphs:
print ' <key>%s</key><dict>' % g
for g in glyphs:
print ' <key>%s</key><integer>-256</integer>' % g
print ' </dict>'
# print ', '.join(LC_ROMAN)

10
misc/gen-num-pairs.js Normal file
View File

@ -0,0 +1,10 @@
const chars = '0 1 2 3 4 5 6 7 8 9 A B C D E F a b c d e f'.split(' ')
for (let c1 of chars) {
let s = []
for (let c2 of chars) {
s.push(c1 + c2)
}
console.log(s.join(' '))
}

63
misc/glyf-props.py Executable file
View File

@ -0,0 +1,63 @@
#!/usr/bin/env python
# encoding: utf8
from __future__ import print_function
import os, sys
from argparse import ArgumentParser
from robofab.objects.objectsRF import OpenFont
dryRun = False
def renameProps(font, renames):
for g in font:
for currname, newname in renames:
if currname in g.lib:
if newname in g.lib:
raise Exception('property %r already exist in glyph %r' % (newname, g))
g.lib[newname] = g.lib[currname]
del g.lib[currname]
def main():
argparser = ArgumentParser(
description='Operate on UFO glyf "lib" properties')
argparser.add_argument(
'-dry', dest='dryRun', action='store_const', const=True, default=False,
help='Do not modify anything, but instead just print what would happen.')
argparser.add_argument(
'-m', dest='renameProps', metavar='<currentName>=<newName>[,...]', type=str,
help='Rename properties')
argparser.add_argument(
'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
args = argparser.parse_args()
dryRun = args.dryRun
renames = []
if args.renameProps:
renames = [tuple(s.split('=')) for s in args.renameProps.split(',')]
# TODO: verify data structure
print('renaming properties:')
for rename in renames:
print(' %r => %r' % rename)
# Strip trailing slashes from font paths and iterate
for fontPath in [s.rstrip('/ ') for s in args.fontPaths]:
font = OpenFont(fontPath)
if len(renames):
print('Renaming properties in %s' % fontPath)
renameProps(font, renames)
if dryRun:
print('Saving changes to %s (dry run)' % fontPath)
if not dryRun:
print('Saving changes to %s' % fontPath)
font.save()
if __name__ == '__main__':
main()

25
misc/mac-tmp-disk-mount.sh Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
set -e
cd "$(dirname "$0")/.."
# Create if needed
if [[ ! -f build/tmp.sparseimage ]]; then
echo "Creating sparse disk image with case-sensitive file system build/tmp.sparseimage"
mkdir -p build
hdiutil create build/tmp.sparseimage \
-size 1g \
-type SPARSE \
-fs JHFS+X \
-volname tmp
fi
# Mount if needed
if ! (diskutil info build/tmp >/dev/null); then
echo "Mounting sparse disk image with case-sensitive file system at build/tmp"
hdiutil attach build/tmp.sparseimage \
-readwrite \
-mountpoint "$(pwd)/build/tmp" \
-nobrowse \
-noautoopen \
-noidmereveal
fi

5
misc/mac-tmp-disk-unmount.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/bash
set -e
cd "$(dirname "$0")/.."
diskutil unmount build/tmp

41
misc/notify Executable file
View File

@ -0,0 +1,41 @@
#!/bin/bash
#
# Shows macOS desktop notifications when a command completes.
# Depending on exit status of the command, a different notification message is shown.
#
# Examples:
# misc/nofify make -j 8 >/dev/null
# Make all font styles in all formats without printing detailed messages
#
# misc/notify make Regular
# Make the regular style in all formats
#
HAS_NOTIFIER=true
if ! (which terminal-notifier >/dev/null); then
HAS_NOTIFIER=false
echo "$0: terminal-notifier not found in PATH (will not notify)" >&2
echo "$0: You can install through: brew install terminal-notifier"
fi
CMDS="$@"
"$@"
STATUS=$?
if $HAS_NOTIFIER; then
if [[ $STATUS -eq 0 ]]; then
terminal-notifier \
-title "$1 ✅" \
-message "$CMDS" \
-activate com.apple.Terminal \
-timeout 8 >/dev/null &
else
terminal-notifier \
-title "$1 failed ❌" \
-message "$CMDS => $STATUS" \
-activate com.apple.Terminal \
-timeout 20 >/dev/null &
fi
fi
exit $STATUS

View File

@ -0,0 +1,300 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import os
import sys
from booleanOperations import BooleanOperationManager
from cu2qu.ufo import fonts_to_quadratic
from fontTools.misc.transform import Transform
from robofab.world import OpenFont
from ufo2ft import compileOTF, compileTTF
from fontbuild.decomposeGlyph import decomposeGlyph
from fontbuild.features import readFeatureFile, writeFeatureFile
from fontbuild.generateGlyph import generateGlyph
from fontbuild.instanceNames import setInfoRF
from fontbuild.italics import italicizeGlyph
from fontbuild.markFeature import RobotoFeatureCompiler, RobotoKernWriter
from fontbuild.mitreGlyph import mitreGlyph
from fontbuild.mix import Mix,Master,narrowFLGlyph
class FontProject:
def __init__(self, basefont, basedir, configfile, buildTag=''):
self.basefont = basefont
self.basedir = basedir
self.config = ConfigParser.RawConfigParser()
self.configfile = os.path.join(self.basedir, configfile)
self.config.read(self.configfile)
self.buildTag = buildTag
self.diacriticList = [
line.strip() for line in self.openResource("diacriticfile")
if not line.startswith("#")]
self.adobeGlyphList = dict(
line.split(";") for line in self.openResource("agl_glyphlistfile")
if not line.startswith("#"))
self.glyphOrder = self.openResource("glyphorder")
# map exceptional glyph names in Roboto to names in the AGL
roboNames = (
('Obar', 'Ocenteredtilde'), ('obar', 'obarred'),
('eturn', 'eturned'), ('Iota1', 'Iotaafrican'))
for roboName, aglName in roboNames:
self.adobeGlyphList[roboName] = self.adobeGlyphList[aglName]
self.builddir = "out"
self.decompose = self.config.get("glyphs","decompose").split()
self.predecompose = self.config.get("glyphs","predecompose").split()
self.lessItalic = self.config.get("glyphs","lessitalic").split()
self.deleteList = self.config.get("glyphs","delete").split()
self.noItalic = self.config.get("glyphs","noitalic").split()
self.buildOTF = False
self.compatible = False
self.generatedFonts = []
def openResource(self, name):
with open(os.path.join(
self.basedir, self.config.get("res", name))) as resourceFile:
resource = resourceFile.read()
return resource.splitlines()
def generateOutputPath(self, font, ext):
family = font.info.familyName.replace(" ", "")
style = font.info.styleName.replace(" ", "")
path = os.path.join(self.basedir, self.builddir, family + ext.upper())
if not os.path.exists(path):
os.makedirs(path)
return os.path.join(path, "%s-%s.%s" % (family, style, ext))
def generateFont(self, mix, names, italic=False, swapSuffixes=None, stemWidth=185,
italicMeanYCenter=-825, italicNarrowAmount=1):
n = names.split("/")
log("---------------------\n%s %s\n----------------------" %(n[0],n[1]))
log(">> Mixing masters")
if isinstance( mix, Mix):
f = mix.generateFont(self.basefont)
else:
f = mix.copy()
if italic == True:
log(">> Italicizing")
i = 0
for g in f:
i += 1
if i % 10 == 0: print g.name
if g.name == "uniFFFD":
continue
decomposeGlyph(f, g)
removeGlyphOverlap(g)
if g.name in self.lessItalic:
italicizeGlyph(f, g, 9, stemWidth=stemWidth,
meanYCenter=italicMeanYCenter,
narrowAmount=italicNarrowAmount)
elif g.name not in self.noItalic:
italicizeGlyph(f, g, 10, stemWidth=stemWidth,
meanYCenter=italicMeanYCenter,
narrowAmount=italicNarrowAmount)
if g.width != 0:
g.width += 10
# set the oblique flag in fsSelection
f.info.openTypeOS2Selection.append(9)
if swapSuffixes != None:
for swap in swapSuffixes:
swapList = [g.name for g in f if g.name.endswith(swap)]
for gname in swapList:
print gname
swapContours(f, gname.replace(swap,""), gname)
for gname in self.predecompose:
if f.has_key(gname):
decomposeGlyph(f, f[gname])
log(">> Generating glyphs")
generateGlyphs(f, self.diacriticList, self.adobeGlyphList)
log(">> Copying features")
readFeatureFile(f, self.basefont.features.text)
log(">> Decomposing")
for g in f:
if len(g.components) > 0:
decomposeGlyph(f, g)
# for gname in self.decompose:
# if f.has_key(gname):
# decomposeGlyph(f, f[gname])
copyrightHolderName = ''
if self.config.has_option('main', 'copyrightHolderName'):
copyrightHolderName = self.config.get('main', 'copyrightHolderName')
def getcfg(name, fallback=''):
if self.config.has_option('main', name):
return self.config.get('main', name)
else:
return fallback
setInfoRF(f, n, {
'foundry': getcfg('foundry'),
'foundryURL': getcfg('foundryURL'),
'designer': getcfg('designer'),
'copyrightHolderName': getcfg('copyrightHolderName'),
'build': self.buildTag,
'version': getcfg('version'),
'license': getcfg('license'),
'licenseURL': getcfg('licenseURL'),
})
if not self.compatible:
cleanCurves(f)
deleteGlyphs(f, self.deleteList)
log(">> Generating font files")
ufoName = self.generateOutputPath(f, "ufo")
f.save(ufoName)
self.generatedFonts.append(ufoName)
if self.buildOTF:
log(">> Generating OTF file")
newFont = OpenFont(ufoName)
otfName = self.generateOutputPath(f, "otf")
saveOTF(newFont, otfName, self.glyphOrder)
def generateTTFs(self):
"""Build TTF for each font generated since last call to generateTTFs."""
fonts = [OpenFont(ufo) for ufo in self.generatedFonts]
self.generatedFonts = []
log(">> Converting curves to quadratic")
# using a slightly higher max error (e.g. 0.0025 em), dots will have
# fewer control points and look noticeably different
max_err = 0.001
if self.compatible:
fonts_to_quadratic(fonts, max_err_em=max_err, dump_stats=True, reverse_direction=True)
else:
for font in fonts:
fonts_to_quadratic([font], max_err_em=max_err, dump_stats=True, reverse_direction=True)
log(">> Generating TTF files")
for font in fonts:
ttfName = self.generateOutputPath(font, "ttf")
log(os.path.basename(ttfName))
saveOTF(font, ttfName, self.glyphOrder, truetype=True)
def transformGlyphMembers(g, m):
g.width = int(g.width * m.a)
g.Transform(m)
for a in g.anchors:
p = Point(a.p)
p.Transform(m)
a.p = p
for c in g.components:
# Assumes that components have also been individually transformed
p = Point(0,0)
d = Point(c.deltas[0])
d.Transform(m)
p.Transform(m)
d1 = d - p
c.deltas[0].x = d1.x
c.deltas[0].y = d1.y
s = Point(c.scale)
s.Transform(m)
#c.scale = s
def swapContours(f,gName1,gName2):
try:
g1 = f[gName1]
g2 = f[gName2]
except KeyError:
log("swapGlyphs failed for %s %s" % (gName1, gName2))
return
g3 = g1.copy()
while g1.contours:
g1.removeContour(0)
for contour in g2.contours:
g1.appendContour(contour)
g1.width = g2.width
while g2.contours:
g2.removeContour(0)
for contour in g3.contours:
g2.appendContour(contour)
g2.width = g3.width
def log(msg):
print msg
def generateGlyphs(f, glyphNames, glyphList={}):
log(">> Generating diacritics")
glyphnames = [gname for gname in glyphNames if not gname.startswith("#") and gname != ""]
for glyphName in glyphNames:
generateGlyph(f, glyphName, glyphList)
def cleanCurves(f):
log(">> Removing overlaps")
for g in f:
removeGlyphOverlap(g)
# log(">> Mitring sharp corners")
# for g in f:
# mitreGlyph(g, 3., .7)
# log(">> Converting curves to quadratic")
# for g in f:
# glyphCurvesToQuadratic(g)
def deleteGlyphs(f, deleteList):
for name in deleteList:
if f.has_key(name):
f.removeGlyph(name)
def removeGlyphOverlap(glyph):
"""Remove overlaps in contours from a glyph."""
#TODO(jamesgk) verify overlaps exist first, as per library's recommendation
manager = BooleanOperationManager()
contours = glyph.contours
glyph.clearContours()
manager.union(contours, glyph.getPointPen())
def saveOTF(font, destFile, glyphOrder, truetype=False):
"""Save a RoboFab font as an OTF binary using ufo2fdk."""
if truetype:
otf = compileTTF(font, featureCompilerClass=RobotoFeatureCompiler,
kernWriter=RobotoKernWriter, glyphOrder=glyphOrder,
convertCubics=False,
useProductionNames=False)
else:
otf = compileOTF(font, featureCompilerClass=RobotoFeatureCompiler,
kernWriter=RobotoKernWriter, glyphOrder=glyphOrder,
useProductionNames=False)
otf.save(destFile)

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1 @@
https://github.com/google/roboto/tree/master/scripts/lib/fontbuild

View File

@ -0,0 +1,6 @@
"""
fontbuild
A collection of font production tools written for FontLab
"""
version = "0.1"

View File

@ -0,0 +1,173 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
from numpy.linalg import lstsq
def alignCorners(glyph, va, subsegments):
out = va.copy()
# for i,c in enumerate(subsegments):
# segmentCount = len(glyph.contours[i].segments) - 1
# n = len(c)
# for j,s in enumerate(c):
# if j < segmentCount:
# seg = glyph.contours[i].segments[j]
# if seg.type == "line":
# subIndex = subsegmentIndex(i,j,subsegments)
# out[subIndex] = alignPoints(va[subIndex])
for i,c in enumerate(subsegments):
segmentCount = len(glyph.contours[i].segments)
n = len(c)
for j,s in enumerate(c):
if j < segmentCount - 1:
segType = glyph.contours[i].segments[j].type
segnextType = glyph.contours[i].segments[j+1].type
next = j+1
elif j == segmentCount -1 and s[1] > 3:
segType = glyph.contours[i].segments[j].type
segNextType = "line"
next = j+1
elif j == segmentCount:
segType = "line"
segnextType = glyph.contours[i].segments[1].type
if glyph.name == "J":
print s[1]
print segnextType
next = 1
else:
break
if segType == "line" and segnextType == "line":
subIndex = subsegmentIndex(i,j,subsegments)
pts = va[subIndex]
ptsnext = va[subsegmentIndex(i,next,subsegments)]
# out[subIndex[-1]] = (out[subIndex[-1]] - 500) * 3 + 500 #findCorner(pts, ptsnext)
# print subIndex[-1], subIndex, subsegmentIndex(i,next,subsegments)
try:
out[subIndex[-1]] = findCorner(pts, ptsnext)
except:
pass
# print glyph.name, "Can't find corner: parallel lines"
return out
def subsegmentIndex(contourIndex, segmentIndex, subsegments):
# This whole thing is so dumb. Need a better data model for subsegments
contourOffset = 0
for i,c in enumerate(subsegments):
if i == contourIndex:
break
contourOffset += c[-1][0]
n = subsegments[contourIndex][-1][0]
# print contourIndex, contourOffset, n
startIndex = subsegments[contourIndex][segmentIndex-1][0]
segmentCount = subsegments[contourIndex][segmentIndex][1]
endIndex = (startIndex + segmentCount + 1) % (n)
indices = np.array([(startIndex + i) % (n) + contourOffset for i in range(segmentCount + 1)])
return indices
def alignPoints(pts, start=None, end=None):
if start == None or end == None:
start, end = fitLine(pts)
out = pts.copy()
for i,p in enumerate(pts):
out[i] = nearestPoint(start, end, p)
return out
def findCorner(pp, nn):
if len(pp) < 4 or len(nn) < 4:
assert 0, "line too short to fit"
pStart,pEnd = fitLine(pp)
nStart,nEnd = fitLine(nn)
prev = pEnd - pStart
next = nEnd - nStart
# print int(np.arctan2(prev[1],prev[0]) / math.pi * 180),
# print int(np.arctan2(next[1],next[0]) / math.pi * 180)
# if lines are parallel, return simple average of end and start points
if np.dot(prev / np.linalg.norm(prev),
next / np.linalg.norm(next)) > .999999:
# print "parallel lines", np.arctan2(prev[1],prev[0]), np.arctan2(next[1],next[0])
# print prev, next
assert 0, "parallel lines"
if glyph.name is None:
# Never happens, but here to fix a bug in Python 2.7 with -OO
print ''
return lineIntersect(pStart, pEnd, nStart, nEnd)
def lineIntersect((x1,y1),(x2,y2),(x3,y3),(x4,y4)):
x12 = x1 - x2
x34 = x3 - x4
y12 = y1 - y2
y34 = y3 - y4
det = x12 * y34 - y12 * x34
if det == 0:
print "parallel!"
a = x1 * y2 - y1 * x2
b = x3 * y4 - y3 * x4
x = (a * x34 - b * x12) / det
y = (a * y34 - b * y12) / det
return (x,y)
def fitLineLSQ(pts):
"returns a line fit with least squares. Fails for vertical lines"
n = len(pts)
a = np.ones((n,2))
for i in range(n):
a[i,0] = pts[i,0]
line = lstsq(a,pts[:,1])[0]
return line
def fitLine(pts):
"""returns a start vector and direction vector
Assumes points segments that already form a somewhat smooth line
"""
n = len(pts)
if n < 1:
return (0,0),(0,0)
a = np.zeros((n-1,2))
for i in range(n-1):
v = pts[i] - pts[i+1]
a[i] = v / np.linalg.norm(v)
direction = np.mean(a[1:-1], axis=0)
start = np.mean(pts[1:-1], axis=0)
return start, start+direction
def nearestPoint(a,b,c):
"nearest point to point c on line a_b"
magnitude = np.linalg.norm(b-a)
if magnitude == 0:
raise Exception, "Line segment cannot be 0 length"
return (b-a) * np.dot((c-a) / magnitude, (b-a) / magnitude) + a
# pts = np.array([[1,1],[2,2],[3,3],[4,4]])
# pts2 = np.array([[1,0],[2,0],[3,0],[4,0]])
# print alignPoints(pts2, start = pts[0], end = pts[0]+pts[0])
# # print findCorner(pts,pts2)

View File

@ -0,0 +1,77 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def getGlyph(gname, font):
return font[gname] if font.has_key(gname) else None
def getComponentByName(f, g, componentName):
for c in g.components:
if c.baseGlyph == componentName:
return c
def getAnchorByName(g,anchorName):
for a in g.anchors:
if a.name == anchorName:
return a
def moveMarkAnchors(f, g, anchorName, accentName, dx, dy):
if "top"==anchorName:
anchors = f[accentName].anchors
for anchor in anchors:
if "mkmktop_acc" == anchor.name:
for anc in g.anchors:
if anc.name == "top":
g.removeAnchor(anc)
break
g.appendAnchor("top", (anchor.x + int(dx), anchor.y + int(dy)))
elif anchorName in ["bottom", "bottomu"]:
anchors = f[accentName].anchors
for anchor in anchors:
if "mkmkbottom_acc" == anchor.name:
for anc in g.anchors:
if anc.name == "bottom":
g.removeAnchor(anc)
break
x = anchor.x + int(dx)
for anc in anchors:
if "top" == anc.name:
x = anc.x + int(dx)
g.appendAnchor("bottom", (x, anchor.y + int(dy)))
def alignComponentToAnchor(f,glyphName,baseName,accentName,anchorName):
g = getGlyph(glyphName,f)
base = getGlyph(baseName,f)
accent = getGlyph(accentName,f)
if g == None or base == None or accent == None:
return
a1 = getAnchorByName(base,anchorName)
a2 = getAnchorByName(accent,"_" + anchorName)
if a1 == None or a2 == None:
return
offset = (a1.x - a2.x, a1.y - a2.y)
c = getComponentByName(f, g, accentName)
c.offset = offset
moveMarkAnchors(f, g, anchorName, accentName, offset[0], offset[1])
def alignComponentsToAnchors(f,glyphName,baseName,accentNames):
for a in accentNames:
if len(a) == 1:
continue
alignComponentToAnchor(f,glyphName,baseName,a[0],a[1])

View File

@ -0,0 +1,102 @@
#! /usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Converts a cubic bezier curve to a quadratic spline with
exactly two off curve points.
"""
import numpy
from numpy import array,cross,dot
from fontTools.misc import bezierTools
from robofab.objects.objectsRF import RSegment
def replaceSegments(contour, segments):
while len(contour):
contour.removeSegment(0)
for s in segments:
contour.appendSegment(s.type, [(p.x, p.y) for p in s.points], s.smooth)
def calcIntersect(a,b,c,d):
numpy.seterr(all='raise')
e = b-a
f = d-c
p = array([-e[1], e[0]])
try:
h = dot((a-c),p) / dot(f,p)
except:
print a,b,c,d
raise
return c + dot(f,h)
def simpleConvertToQuadratic(p0,p1,p2,p3):
p = [array(i.x,i.y) for i in [p0,p1,p2,p3]]
off = calcIntersect(p[0],p[1],p[2],p[3])
# OFFCURVE_VECTOR_CORRECTION = -.015
OFFCURVE_VECTOR_CORRECTION = 0
def convertToQuadratic(p0,p1,p2,p3):
# TODO: test for accuracy and subdivide further if needed
p = [(i.x,i.y) for i in [p0,p1,p2,p3]]
# if p[0][0] == p[1][0] and p[0][0] == p[2][0] and p[0][0] == p[2][0] and p[0][0] == p[3][0]:
# return (p[0],p[1],p[2],p[3])
# if p[0][1] == p[1][1] and p[0][1] == p[2][1] and p[0][1] == p[2][1] and p[0][1] == p[3][1]:
# return (p[0],p[1],p[2],p[3])
seg1,seg2 = bezierTools.splitCubicAtT(p[0], p[1], p[2], p[3], .5)
pts1 = [array([i[0], i[1]]) for i in seg1]
pts2 = [array([i[0], i[1]]) for i in seg2]
on1 = seg1[0]
on2 = seg2[3]
try:
off1 = calcIntersect(pts1[0], pts1[1], pts1[2], pts1[3])
off2 = calcIntersect(pts2[0], pts2[1], pts2[2], pts2[3])
except:
return (p[0],p[1],p[2],p[3])
off1 = (on1 - off1) * OFFCURVE_VECTOR_CORRECTION + off1
off2 = (on2 - off2) * OFFCURVE_VECTOR_CORRECTION + off2
return (on1,off1,off2,on2)
def cubicSegmentToQuadratic(c,sid):
segment = c[sid]
if (segment.type != "curve"):
print "Segment type not curve"
return
#pSegment,junk = getPrevAnchor(c,sid)
pSegment = c[sid-1] #assumes that a curve type will always be proceeded by another point on the same contour
points = convertToQuadratic(pSegment.points[-1],segment.points[0],
segment.points[1],segment.points[2])
return RSegment(
'qcurve', [[int(i) for i in p] for p in points[1:]], segment.smooth)
def glyphCurvesToQuadratic(g):
for c in g:
segments = []
for i in range(len(c)):
s = c[i]
if s.type == "curve":
try:
segments.append(cubicSegmentToQuadratic(c, i))
except Exception:
print g.name, i
raise
else:
segments.append(s)
replaceSegments(c, segments)

View File

@ -0,0 +1,422 @@
#! /opt/local/bin/pythonw2.7
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["SubsegmentPen","SubsegmentsToCurvesPen", "segmentGlyph", "fitGlyph"]
from fontTools.pens.basePen import BasePen
import numpy as np
from numpy import array as v
from numpy.linalg import norm
from robofab.pens.adapterPens import GuessSmoothPointPen
from robofab.pens.pointPen import BasePointToSegmentPen
class SubsegmentsToCurvesPointPen(BasePointToSegmentPen):
def __init__(self, glyph, subsegmentGlyph, subsegments):
BasePointToSegmentPen.__init__(self)
self.glyph = glyph
self.subPen = SubsegmentsToCurvesPen(None, glyph.getPen(), subsegmentGlyph, subsegments)
def setMatchTangents(self, b):
self.subPen.matchTangents = b
def _flushContour(self, segments):
#
# adapted from robofab.pens.adapterPens.rfUFOPointPen
#
assert len(segments) >= 1
# if we only have one point and it has a name, we must have an anchor
first = segments[0]
segmentType, points = first
pt, smooth, name, kwargs = points[0]
if len(segments) == 1 and name != None:
self.glyph.appendAnchor(name, pt)
return
else:
segmentType, points = segments[-1]
movePt, smooth, name, kwargs = points[-1]
if smooth:
# last point is smooth, set pen to start smooth
self.subPen.setLastSmooth(True)
if segmentType == 'line':
del segments[-1]
self.subPen.moveTo(movePt)
# do the rest of the segments
for segmentType, points in segments:
isSmooth = True in [smooth for pt, smooth, name, kwargs in points]
pp = [pt for pt, smooth, name, kwargs in points]
if segmentType == "line":
assert len(pp) == 1
if isSmooth:
self.subPen.smoothLineTo(pp[0])
else:
self.subPen.lineTo(pp[0])
elif segmentType == "curve":
assert len(pp) == 3
if isSmooth:
self.subPen.smoothCurveTo(*pp)
else:
self.subPen.curveTo(*pp)
elif segmentType == "qcurve":
assert 0, "qcurve not supported"
else:
assert 0, "illegal segmentType: %s" % segmentType
self.subPen.closePath()
def addComponent(self, glyphName, transform):
self.subPen.addComponent(glyphName, transform)
class SubsegmentsToCurvesPen(BasePen):
def __init__(self, glyphSet, otherPen, subsegmentGlyph, subsegments):
BasePen.__init__(self, None)
self.otherPen = otherPen
self.ssglyph = subsegmentGlyph
self.subsegments = subsegments
self.contourIndex = -1
self.segmentIndex = -1
self.lastPoint = (0,0)
self.lastSmooth = False
self.nextSmooth = False
def setLastSmooth(self, b):
self.lastSmooth = b
def _moveTo(self, (x, y)):
self.contourIndex += 1
self.segmentIndex = 0
self.startPoint = (x,y)
p = self.ssglyph.contours[self.contourIndex][0].points[0]
self.otherPen.moveTo((p.x, p.y))
self.lastPoint = (x,y)
def _lineTo(self, (x, y)):
self.segmentIndex += 1
index = self.subsegments[self.contourIndex][self.segmentIndex][0]
p = self.ssglyph.contours[self.contourIndex][index].points[0]
self.otherPen.lineTo((p.x, p.y))
self.lastPoint = (x,y)
self.lastSmooth = False
def smoothLineTo(self, (x, y)):
self.lineTo((x,y))
self.lastSmooth = True
def smoothCurveTo(self, (x1, y1), (x2, y2), (x3, y3)):
self.nextSmooth = True
self.curveTo((x1, y1), (x2, y2), (x3, y3))
self.nextSmooth = False
self.lastSmooth = True
def _curveToOne(self, (x1, y1), (x2, y2), (x3, y3)):
self.segmentIndex += 1
c = self.ssglyph.contours[self.contourIndex]
n = len(c)
startIndex = (self.subsegments[self.contourIndex][self.segmentIndex-1][0])
segmentCount = (self.subsegments[self.contourIndex][self.segmentIndex][1])
endIndex = (startIndex + segmentCount + 1) % (n)
indices = [(startIndex + i) % (n) for i in range(segmentCount + 1)]
points = np.array([(c[i].points[0].x, c[i].points[0].y) for i in indices])
prevPoint = (c[(startIndex - 1)].points[0].x, c[(startIndex - 1)].points[0].y)
nextPoint = (c[(endIndex) % n].points[0].x, c[(endIndex) % n].points[0].y)
prevTangent = prevPoint - points[0]
nextTangent = nextPoint - points[-1]
tangent1 = points[1] - points[0]
tangent3 = points[-2] - points[-1]
prevTangent /= np.linalg.norm(prevTangent)
nextTangent /= np.linalg.norm(nextTangent)
tangent1 /= np.linalg.norm(tangent1)
tangent3 /= np.linalg.norm(tangent3)
tangent1, junk = self.smoothTangents(tangent1, prevTangent, self.lastSmooth)
tangent3, junk = self.smoothTangents(tangent3, nextTangent, self.nextSmooth)
if self.matchTangents == True:
cp = fitBezier(points, tangent1, tangent3)
cp[1] = norm(cp[1] - cp[0]) * tangent1 / norm(tangent1) + cp[0]
cp[2] = norm(cp[2] - cp[3]) * tangent3 / norm(tangent3) + cp[3]
else:
cp = fitBezier(points)
# if self.ssglyph.name == 'r':
# print "-----------"
# print self.lastSmooth, self.nextSmooth
# print "%i %i : %i %i \n %i %i : %i %i \n %i %i : %i %i"%(x1,y1, cp[1,0], cp[1,1], x2,y2, cp[2,0], cp[2,1], x3,y3, cp[3,0], cp[3,1])
self.otherPen.curveTo((cp[1,0], cp[1,1]), (cp[2,0], cp[2,1]), (cp[3,0], cp[3,1]))
self.lastPoint = (x3, y3)
self.lastSmooth = False
def smoothTangents(self,t1,t2,forceSmooth = False):
if forceSmooth or (abs(t1.dot(t2)) > .95 and norm(t1-t2) > 1):
# print t1,t2,
t1 = (t1 - t2) / 2
t2 = -t1
# print t1,t2
return t1 / norm(t1), t2 / norm(t2)
def _closePath(self):
self.otherPen.closePath()
def _endPath(self):
self.otherPen.endPath()
def addComponent(self, glyphName, transformation):
self.otherPen.addComponent(glyphName, transformation)
class SubsegmentPointPen(BasePointToSegmentPen):
def __init__(self, glyph, resolution):
BasePointToSegmentPen.__init__(self)
self.glyph = glyph
self.resolution = resolution
self.subPen = SubsegmentPen(None, glyph.getPen())
def getSubsegments(self):
return self.subPen.subsegments[:]
def _flushContour(self, segments):
#
# adapted from robofab.pens.adapterPens.rfUFOPointPen
#
assert len(segments) >= 1
# if we only have one point and it has a name, we must have an anchor
first = segments[0]
segmentType, points = first
pt, smooth, name, kwargs = points[0]
if len(segments) == 1 and name != None:
self.glyph.appendAnchor(name, pt)
return
else:
segmentType, points = segments[-1]
movePt, smooth, name, kwargs = points[-1]
if segmentType == 'line':
del segments[-1]
self.subPen.moveTo(movePt)
# do the rest of the segments
for segmentType, points in segments:
points = [pt for pt, smooth, name, kwargs in points]
if segmentType == "line":
assert len(points) == 1
self.subPen.lineTo(points[0])
elif segmentType == "curve":
assert len(points) == 3
self.subPen.curveTo(*points)
elif segmentType == "qcurve":
assert 0, "qcurve not supported"
else:
assert 0, "illegal segmentType: %s" % segmentType
self.subPen.closePath()
def addComponent(self, glyphName, transform):
self.subPen.addComponent(glyphName, transform)
class SubsegmentPen(BasePen):
def __init__(self, glyphSet, otherPen, resolution=25):
BasePen.__init__(self,glyphSet)
self.resolution = resolution
self.otherPen = otherPen
self.subsegments = []
self.startContour = (0,0)
self.contourIndex = -1
def _moveTo(self, (x, y)):
self.contourIndex += 1
self.segmentIndex = 0
self.subsegments.append([])
self.subsegmentCount = 0
self.subsegments[self.contourIndex].append([self.subsegmentCount, 0])
self.startContour = (x,y)
self.lastPoint = (x,y)
self.otherPen.moveTo((x,y))
def _lineTo(self, (x, y)):
count = self.stepsForSegment((x,y),self.lastPoint)
if count < 1:
count = 1
self.subsegmentCount += count
self.subsegments[self.contourIndex].append([self.subsegmentCount, count])
for i in range(1,count+1):
x1 = self.lastPoint[0] + (x - self.lastPoint[0]) * i/float(count)
y1 = self.lastPoint[1] + (y - self.lastPoint[1]) * i/float(count)
self.otherPen.lineTo((x1,y1))
self.lastPoint = (x,y)
def _curveToOne(self, (x1, y1), (x2, y2), (x3, y3)):
count = self.stepsForSegment((x3,y3),self.lastPoint)
if count < 2:
count = 2
self.subsegmentCount += count
self.subsegments[self.contourIndex].append([self.subsegmentCount,count])
x = self.renderCurve((self.lastPoint[0],x1,x2,x3),count)
y = self.renderCurve((self.lastPoint[1],y1,y2,y3),count)
assert len(x) == count
if (x3 == self.startContour[0] and y3 == self.startContour[1]):
count -= 1
for i in range(count):
self.otherPen.lineTo((x[i],y[i]))
self.lastPoint = (x3,y3)
def _closePath(self):
if not (self.lastPoint[0] == self.startContour[0] and self.lastPoint[1] == self.startContour[1]):
self._lineTo(self.startContour)
# round values used by otherPen (a RoboFab SegmentToPointPen) to decide
# whether to delete duplicate points at start and end of contour
#TODO(jamesgk) figure out why we have to do this hack, then remove it
c = self.otherPen.contour
for i in [0, -1]:
c[i] = [[round(n, 5) for n in c[i][0]]] + list(c[i][1:])
self.otherPen.closePath()
def _endPath(self):
self.otherPen.endPath()
def addComponent(self, glyphName, transformation):
self.otherPen.addComponent(glyphName, transformation)
def stepsForSegment(self, p1, p2):
dist = np.linalg.norm(v(p1) - v(p2))
out = int(dist / self.resolution)
return out
def renderCurve(self,p,count):
curvePoints = []
t = 1.0 / float(count)
temp = t * t
f = p[0]
fd = 3 * (p[1] - p[0]) * t
fdd_per_2 = 3 * (p[0] - 2 * p[1] + p[2]) * temp
fddd_per_2 = 3 * (3 * (p[1] - p[2]) + p[3] - p[0]) * temp * t
fddd = fddd_per_2 + fddd_per_2
fdd = fdd_per_2 + fdd_per_2
fddd_per_6 = fddd_per_2 * (1.0 / 3)
for i in range(count):
f = f + fd + fdd_per_2 + fddd_per_6
fd = fd + fdd + fddd_per_2
fdd = fdd + fddd
fdd_per_2 = fdd_per_2 + fddd_per_2
curvePoints.append(f)
return curvePoints
def fitBezierSimple(pts):
T = [np.linalg.norm(pts[i]-pts[i-1]) for i in range(1,len(pts))]
tsum = np.sum(T)
T = [0] + T
T = [np.sum(T[0:i+1])/tsum for i in range(len(pts))]
T = [[t**3, t**2, t, 1] for t in T]
T = np.array(T)
M = np.array([[-1, 3, -3, 1],
[ 3, -6, 3, 0],
[-3, 3, 0, 0],
[ 1, 0, 0, 0]])
T = T.dot(M)
T = np.concatenate((T, np.array([[100,0,0,0], [0,0,0,100]])))
# pts = np.vstack((pts, pts[0] * 100, pts[-1] * 100))
C = np.linalg.lstsq(T, pts)
return C[0]
def subdivideLineSegment(pts):
out = [pts[0]]
for i in range(1, len(pts)):
out.append(pts[i-1] + (pts[i] - pts[i-1]) * .5)
out.append(pts[i])
return np.array(out)
def fitBezier(pts,tangent0=None,tangent3=None):
if len(pts < 4):
pts = subdivideLineSegment(pts)
T = [np.linalg.norm(pts[i]-pts[i-1]) for i in range(1,len(pts))]
tsum = np.sum(T)
T = [0] + T
T = [np.sum(T[0:i+1])/tsum for i in range(len(pts))]
T = [[t**3, t**2, t, 1] for t in T]
T = np.array(T)
M = np.array([[-1, 3, -3, 1],
[ 3, -6, 3, 0],
[-3, 3, 0, 0],
[ 1, 0, 0, 0]])
T = T.dot(M)
n = len(pts)
pout = pts.copy()
pout[:,0] -= (T[:,0] * pts[0,0]) + (T[:,3] * pts[-1,0])
pout[:,1] -= (T[:,0] * pts[0,1]) + (T[:,3] * pts[-1,1])
TT = np.zeros((n*2,4))
for i in range(n):
for j in range(2):
TT[i*2,j*2] = T[i,j+1]
TT[i*2+1,j*2+1] = T[i,j+1]
pout = pout.reshape((n*2,1),order="C")
if tangent0 != None and tangent3 != None:
tangentConstraintsT = np.array([
[tangent0[1], -tangent0[0], 0, 0],
[0, 0, tangent3[1], -tangent3[0]]
])
tangentConstraintsP = np.array([
[pts[0][1] * -tangent0[0] + pts[0][0] * tangent0[1]],
[pts[-1][1] * -tangent3[0] + pts[-1][0] * tangent3[1]]
])
TT = np.concatenate((TT, tangentConstraintsT * 1000))
pout = np.concatenate((pout, tangentConstraintsP * 1000))
C = np.linalg.lstsq(TT,pout)[0].reshape((2,2))
return np.array([pts[0], C[0], C[1], pts[-1]])
def segmentGlyph(glyph,resolution=50):
g1 = glyph.copy()
g1.clear()
dp = SubsegmentPointPen(g1, resolution)
glyph.drawPoints(dp)
return g1, dp.getSubsegments()
def fitGlyph(glyph, subsegmentGlyph, subsegmentIndices, matchTangents=True):
outGlyph = glyph.copy()
outGlyph.clear()
fitPen = SubsegmentsToCurvesPointPen(outGlyph, subsegmentGlyph, subsegmentIndices)
fitPen.setMatchTangents(matchTangents)
# smoothPen = GuessSmoothPointPen(fitPen)
glyph.drawPoints(fitPen)
outGlyph.width = subsegmentGlyph.width
return outGlyph
if __name__ == '__main__':
p = SubsegmentPen(None, None)
pts = np.array([
[0,0],
[.5,.5],
[.5,.5],
[1,1]
])
print np.array(p.renderCurve(pts,10)) * 10

View File

@ -0,0 +1,23 @@
def decomposeGlyph(font, glyph):
"""Moves the components of a glyph to its outline."""
if len(glyph.components):
deepCopyContours(font, glyph, glyph, (0, 0), (1, 1))
glyph.clearComponents()
def deepCopyContours(font, parent, component, offset, scale):
"""Copy contours to parent from component, including nested components."""
for nested in component.components:
deepCopyContours(
font, parent, font[nested.baseGlyph],
(offset[0] + nested.offset[0], offset[1] + nested.offset[1]),
(scale[0] * nested.scale[0], scale[1] * nested.scale[1]))
if component == parent:
return
for contour in component:
contour = contour.copy()
contour.scale(scale)
contour.move(offset)
parent.appendContour(contour)

189
misc/pylib/fontbuild/features.py Executable file
View File

@ -0,0 +1,189 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from feaTools import parser
from feaTools.writers.fdkSyntaxWriter import FDKSyntaxFeatureWriter
class FilterFeatureWriter(FDKSyntaxFeatureWriter):
"""Feature writer to detect invalid references and duplicate definitions."""
def __init__(self, refs=set(), name=None, isFeature=False):
"""Initializes the set of known references, empty by default."""
self.refs = refs
self.featureNames = set()
self.lookupNames = set()
self.tableNames = set()
self.languageSystems = set()
super(FilterFeatureWriter, self).__init__(
name=name, isFeature=isFeature)
# error to print when undefined reference is found in glyph class
self.classErr = ('Undefined reference "%s" removed from glyph class '
'definition %s.')
# error to print when undefined reference is found in sub or pos rule
subErr = ['Substitution rule with undefined reference "%s" removed']
if self._name:
subErr.append(" from ")
subErr.append("feature" if self._isFeature else "lookup")
subErr.append(' "%s"' % self._name)
subErr.append(".")
self.subErr = "".join(subErr)
self.posErr = self.subErr.replace("Substitution", "Positioning")
def _subwriter(self, name, isFeature):
"""Use this class for nested expressions e.g. in feature definitions."""
return FilterFeatureWriter(self.refs, name, isFeature)
def _flattenRefs(self, refs, flatRefs):
"""Flatten a list of references."""
for ref in refs:
if type(ref) == list:
self._flattenRefs(ref, flatRefs)
elif ref != "'": # ignore contextual class markings
flatRefs.append(ref)
def _checkRefs(self, refs, errorMsg):
"""Check a list of references found in a sub or pos rule."""
flatRefs = []
self._flattenRefs(refs, flatRefs)
for ref in flatRefs:
# trailing apostrophes should be ignored
if ref[-1] == "'":
ref = ref[:-1]
if ref not in self.refs:
print errorMsg % ref
# insert an empty instruction so that we can't end up with an
# empty block, which is illegal syntax
super(FilterFeatureWriter, self).rawText(";")
return False
return True
def classDefinition(self, name, contents):
"""Check that contents are valid, then add name to known references."""
if name in self.refs:
return
newContents = []
for ref in contents:
if ref not in self.refs and ref != "-":
print self.classErr % (ref, name)
else:
newContents.append(ref)
self.refs.add(name)
super(FilterFeatureWriter, self).classDefinition(name, newContents)
def gsubType1(self, target, replacement):
"""Check a sub rule with one-to-one replacement."""
if self._checkRefs([target, replacement], self.subErr):
super(FilterFeatureWriter, self).gsubType1(target, replacement)
def gsubType4(self, target, replacement):
"""Check a sub rule with many-to-one replacement."""
if self._checkRefs([target, replacement], self.subErr):
super(FilterFeatureWriter, self).gsubType4(target, replacement)
def gsubType6(self, precedingContext, target, trailingContext, replacement):
"""Check a sub rule with contextual replacement."""
refs = [precedingContext, target, trailingContext, replacement]
if self._checkRefs(refs, self.subErr):
super(FilterFeatureWriter, self).gsubType6(
precedingContext, target, trailingContext, replacement)
def gposType1(self, target, value):
"""Check a single positioning rule."""
if self._checkRefs([target], self.posErr):
super(FilterFeatureWriter, self).gposType1(target, value)
def gposType2(self, target, value, needEnum=False):
"""Check a pair positioning rule."""
if self._checkRefs(target, self.posErr):
super(FilterFeatureWriter, self).gposType2(target, value, needEnum)
# these rules may contain references, but they aren't present in Roboto
def gsubType3(self, target, replacement):
raise NotImplementedError
def feature(self, name):
"""Adds a feature definition only once."""
if name not in self.featureNames:
self.featureNames.add(name)
return super(FilterFeatureWriter, self).feature(name)
# we must return a new writer even if we don't add it to this one
return FDKSyntaxFeatureWriter(name, True)
def lookup(self, name):
"""Adds a lookup block only once."""
if name not in self.lookupNames:
self.lookupNames.add(name)
return super(FilterFeatureWriter, self).lookup(name)
# we must return a new writer even if we don't add it to this one
return FDKSyntaxFeatureWriter(name, False)
def languageSystem(self, langTag, scriptTag):
"""Adds a language system instruction only once."""
system = (langTag, scriptTag)
if system not in self.languageSystems:
self.languageSystems.add(system)
super(FilterFeatureWriter, self).languageSystem(langTag, scriptTag)
def table(self, name, data):
"""Adds a table only once."""
if name in self.tableNames:
return
self.tableNames.add(name)
self._instructions.append("table %s {" % name)
self._instructions.extend([" %s %s;" % line for line in data])
self._instructions.append("} %s;" % name)
def compileFeatureRE(name):
"""Compiles a feature-matching regex."""
# this is the pattern used internally by feaTools:
# https://github.com/typesupply/feaTools/blob/master/Lib/feaTools/parser.py
featureRE = list(parser.featureContentRE)
featureRE.insert(2, name)
featureRE.insert(6, name)
return re.compile("".join(featureRE))
def updateFeature(font, name, value):
"""Add a feature definition, or replace existing one."""
featureRE = compileFeatureRE(name)
if featureRE.search(font.features.text):
font.features.text = featureRE.sub(value, font.features.text)
else:
font.features.text += "\n" + value
def readFeatureFile(font, text, prepend=True):
"""Incorporate valid definitions from feature text into font."""
writer = FilterFeatureWriter(set(font.keys()))
if prepend:
text += font.features.text
else:
text = font.features.text + text
parser.parseFeatures(writer, text)
font.features.text = writer.write()
def writeFeatureFile(font, path):
"""Write the font's features to an external file."""
fout = open(path, "w")
fout.write(font.features.text)
fout.close()

View File

@ -0,0 +1,97 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from string import find
from anchors import alignComponentsToAnchors, getAnchorByName
def parseComposite(composite):
c = composite.split("=")
d = c[1].split("/")
glyphName = d[0]
if len(d) == 1:
offset = [0, 0]
else:
offset = [int(i) for i in d[1].split(",")]
accentString = c[0]
accents = accentString.split("+")
baseName = accents.pop(0)
accentNames = [i.split(":") for i in accents]
return (glyphName, baseName, accentNames, offset)
def copyMarkAnchors(f, g, srcname, width):
for anchor in f[srcname].anchors:
if anchor.name in ("top_dd", "bottom_dd", "top0315"):
g.appendAnchor(anchor.name, (anchor.x + width, anchor.y))
if ("top" == anchor.name and
not any(a.name == "parent_top" for a in g.anchors)):
g.appendAnchor("parent_top", anchor.position)
if ("bottom" == anchor.name and
not any(a.name == "bottom" for a in g.anchors)):
g.appendAnchor("bottom", anchor.position)
if any(a.name == "top" for a in g.anchors):
return
anchor_parent_top = getAnchorByName(g, "parent_top")
if anchor_parent_top is not None:
g.appendAnchor("top", anchor_parent_top.position)
def generateGlyph(f,gname,glyphList={}):
glyphName, baseName, accentNames, offset = parseComposite(gname)
if f.has_key(glyphName):
print('Existing glyph "%s" found in font, ignoring composition rule '
'"%s"' % (glyphName, gname))
return
if baseName.find("_") != -1:
g = f.newGlyph(glyphName)
for componentName in baseName.split("_"):
g.appendComponent(componentName, (g.width, 0))
g.width += f[componentName].width
setUnicodeValue(g, glyphList)
else:
try:
f.compileGlyph(glyphName, baseName, accentNames)
except KeyError as e:
print('KeyError raised for composition rule "%s", likely "%s" '
'anchor not found in glyph "%s"' % (gname, e, baseName))
return
g = f[glyphName]
setUnicodeValue(g, glyphList)
copyMarkAnchors(f, g, baseName, offset[1] + offset[0])
if len(accentNames) > 0:
alignComponentsToAnchors(f, glyphName, baseName, accentNames)
if offset[0] != 0 or offset[1] != 0:
g.width += offset[1] + offset[0]
g.move((offset[0], 0), anchors=False)
def setUnicodeValue(glyph, glyphList):
"""Try to ensure glyph has a unicode value -- used by FDK to make OTFs."""
if glyph.name in glyphList:
glyph.unicode = int(glyphList[glyph.name], 16)
else:
uvNameMatch = re.match("uni([\dA-F]{4})$", glyph.name)
if uvNameMatch:
glyph.unicode = int(uvNameMatch.group(1), 16)

View File

@ -0,0 +1,232 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import date
import re
from random import randint
import string
class InstanceNames:
"Class that allows easy setting of FontLab name fields. TODO: Add proper italic flags"
foundry = ""
foundryURL = ""
copyrightHolderName = ""
build = ""
version = "1.0"
year = date.today().year
designer = ""
designerURL = ""
license = ""
licenseURL = ""
def __init__(self,names):
if type(names) == type(" "):
names = names.split("/")
#print names
self.longfamily = names[0]
self.longstyle = names[1]
self.shortstyle = names[2]
self.subfamilyAbbrev = names[3]
self.width = self._getWidth()
self.italic = self._getItalic()
self.weight = self._getWeight()
self.fullname = "%s %s" %(self.longfamily, self.longstyle)
self.postscript = re.sub(' ','', self.longfamily) + "-" + re.sub(' ','',self.longstyle)
if self.subfamilyAbbrev != "" and self.subfamilyAbbrev != None and self.subfamilyAbbrev != "Rg":
self.shortfamily = "%s %s" %(self.longfamily, self.longstyle.split()[0])
else:
self.shortfamily = self.longfamily
def setRFNames(self,f, version=1, versionMinor=0):
f.info.familyName = self.longfamily
f.info.styleName = self.longstyle
f.info.styleMapFamilyName = self.shortfamily
f.info.styleMapStyleName = self.shortstyle.lower()
f.info.versionMajor = version
f.info.versionMinor = versionMinor
f.info.year = self.year
if len(self.copyrightHolderName) > 0:
f.info.copyright = "Copyright %s %s" % (self.year, self.copyrightHolderName)
f.info.trademark = "%s is a trademark of %s." %(self.longfamily, self.foundry.rstrip('.'))
if len(self.designer) > 0:
f.info.openTypeNameDesigner = self.designer
if len(self.designerURL) > 0:
f.info.openTypeNameDesignerURL = self.designerURL
f.info.openTypeNameManufacturer = self.foundry
f.info.openTypeNameManufacturerURL = self.foundryURL
f.info.openTypeNameLicense = self.license
f.info.openTypeNameLicenseURL = self.licenseURL
f.info.openTypeNameVersion = "Version %i.%i" %(version, versionMinor)
if self.build is not None and len(self.build):
f.info.openTypeNameUniqueID = "%s:%s:%s" %(self.fullname, self.build, self.year)
else:
f.info.openTypeNameUniqueID = "%s:%s" %(self.fullname, self.year)
# f.info.openTypeNameDescription = ""
# f.info.openTypeNameCompatibleFullName = ""
# f.info.openTypeNameSampleText = ""
if (self.subfamilyAbbrev != "Rg"):
f.info.openTypeNamePreferredFamilyName = self.longfamily
f.info.openTypeNamePreferredSubfamilyName = self.longstyle
f.info.openTypeOS2WeightClass = self._getWeightCode(self.weight)
f.info.macintoshFONDName = re.sub(' ','',self.longfamily) + " " + re.sub(' ','',self.longstyle)
f.info.postscriptFontName = f.info.macintoshFONDName.replace(" ", "-")
if self.italic:
f.info.italicAngle = -12.0
def setFLNames(self,flFont):
from FL import NameRecord
flFont.family_name = self.shortfamily
flFont.mac_compatible = self.fullname
flFont.style_name = self.longstyle
flFont.full_name = self.fullname
flFont.font_name = self.postscript
flFont.font_style = self._getStyleCode()
flFont.menu_name = self.shortfamily
flFont.apple_name = re.sub(' ','',self.longfamily) + " " + re.sub(' ','',self.longstyle)
flFont.fond_id = randint(1000,9999)
flFont.pref_family_name = self.longfamily
flFont.pref_style_name = self.longstyle
flFont.weight = self.weight
flFont.weight_code = self._getWeightCode(self.weight)
flFont.width = self.width
if len(self.italic):
flFont.italic_angle = -12
fn = flFont.fontnames
fn.clean()
#fn.append(NameRecord(0,1,0,0, "Font data copyright %s %s" %(self.foundry, self.year) ))
#fn.append(NameRecord(0,3,1,1033, "Font data copyright %s %s" %(self.foundry, self.year) ))
copyrightHolderName = self.copyrightHolderName if len(self.copyrightHolderName) > 0 else self.foundry
fn.append(NameRecord(0,1,0,0, "Copyright %s %s" %(self.year, copyrightHolderName) ))
fn.append(NameRecord(0,3,1,1033, "Copyright %s %s" %(self.year, copyrightHolderName) ))
fn.append(NameRecord(1,1,0,0, self.longfamily ))
fn.append(NameRecord(1,3,1,1033, self.shortfamily ))
fn.append(NameRecord(2,1,0,0, self.longstyle ))
fn.append(NameRecord(2,3,1,1033, self.longstyle ))
#fn.append(NameRecord(3,1,0,0, "%s:%s:%s" %(self.foundry, self.longfamily, self.year) ))
#fn.append(NameRecord(3,3,1,1033, "%s:%s:%s" %(self.foundry, self.longfamily, self.year) ))
fn.append(NameRecord(3,1,0,0, "%s:%s:%s" %(self.foundry, self.fullname, self.year) ))
fn.append(NameRecord(3,3,1,1033, "%s:%s:%s" %(self.foundry, self.fullname, self.year) ))
fn.append(NameRecord(4,1,0,0, self.fullname ))
fn.append(NameRecord(4,3,1,1033, self.fullname ))
if len(self.build) > 0:
fn.append(NameRecord(5,1,0,0, "Version %s%s; %s" %(self.version, self.build, self.year) ))
fn.append(NameRecord(5,3,1,1033, "Version %s%s; %s" %(self.version, self.build, self.year) ))
else:
fn.append(NameRecord(5,1,0,0, "Version %s; %s" %(self.version, self.year) ))
fn.append(NameRecord(5,3,1,1033, "Version %s; %s" %(self.version, self.year) ))
fn.append(NameRecord(6,1,0,0, self.postscript ))
fn.append(NameRecord(6,3,1,1033, self.postscript ))
fn.append(NameRecord(7,1,0,0, "%s is a trademark of %s." %(self.longfamily, self.foundry) ))
fn.append(NameRecord(7,3,1,1033, "%s is a trademark of %s." %(self.longfamily, self.foundry) ))
fn.append(NameRecord(9,1,0,0, self.foundry ))
fn.append(NameRecord(9,3,1,1033, self.foundry ))
fn.append(NameRecord(11,1,0,0, self.foundryURL ))
fn.append(NameRecord(11,3,1,1033, self.foundryURL ))
fn.append(NameRecord(12,1,0,0, self.designer ))
fn.append(NameRecord(12,3,1,1033, self.designer ))
fn.append(NameRecord(13,1,0,0, self.license ))
fn.append(NameRecord(13,3,1,1033, self.license ))
fn.append(NameRecord(14,1,0,0, self.licenseURL ))
fn.append(NameRecord(14,3,1,1033, self.licenseURL ))
if (self.subfamilyAbbrev != "Rg"):
fn.append(NameRecord(16,3,1,1033, self.longfamily ))
fn.append(NameRecord(17,3,1,1033, self.longstyle))
#else:
#fn.append(NameRecord(17,3,1,1033,""))
#fn.append(NameRecord(18,1,0,0, re.sub("Italic","It", self.fullname)))
def _getSubstyle(self, regex):
substyle = re.findall(regex, self.longstyle)
if len(substyle) > 0:
return substyle[0]
else:
return ""
def _getItalic(self):
return self._getSubstyle(r"Italic|Oblique|Obliq")
def _getWeight(self):
w = self._getSubstyle(r"Extrabold|Superbold|Super|Fat|Black|Bold|Semibold|Demibold|Medium|Light|Thin")
if w == "":
w = "Regular"
return w
def _getWidth(self):
w = self._getSubstyle(r"Condensed|Extended|Narrow|Wide")
if w == "":
w = "Normal"
return w
def _getStyleCode(self):
#print "shortstyle:", self.shortstyle
styleCode = 0
if self.shortstyle == "Bold":
styleCode = 32
if self.shortstyle == "Italic":
styleCode = 1
if self.shortstyle == "Bold Italic":
styleCode = 33
if self.longstyle == "Regular":
styleCode = 64
return styleCode
def _getWeightCode(self,weight):
if weight == "Thin":
return 250
elif weight == "Light":
return 300
elif weight == "Bold":
return 700
elif weight == "Medium":
return 500
elif weight == "Semibold":
return 600
elif weight == "Black":
return 900
elif weight == "Fat":
return 900
return 400
def setNames(f,names,foundry="",version="1.0",build=""):
InstanceNames.foundry = foundry
InstanceNames.version = version
InstanceNames.build = build
i = InstanceNames(names)
i.setFLNames(f)
def setInfoRF(f, names, attrs={}):
i = InstanceNames(names)
version, versionMinor = (1, 0)
for k,v in attrs.iteritems():
if k == 'version':
if v.find('.') != -1:
version, versionMinor = [int(num) for num in v.split(".")]
else:
version = int(v)
setattr(i, k, v)
i.setRFNames(f, version=version, versionMinor=versionMinor)

View File

@ -0,0 +1,308 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from fontTools.misc.transform import Transform
import numpy as np
from numpy.linalg import norm
from scipy.sparse.linalg import cg
from scipy.ndimage.filters import gaussian_filter1d as gaussian
from scipy.cluster.vq import vq, whiten
from fontbuild.alignpoints import alignCorners
from fontbuild.curveFitPen import fitGlyph, segmentGlyph
def italicizeGlyph(f, g, angle=10, stemWidth=185, meanYCenter=-825, narrowAmount=1):
unic = g.unicode #save unicode
glyph = f[g.name]
slope = np.tanh(math.pi * angle / 180)
# determine how far on the x axis the glyph should slide
# to compensate for the slant.
# meanYCenter:
# -600 is a magic number that assumes a 2048 unit em square,
# and -825 for a 2816 unit em square. (UPM*0.29296875)
m = Transform(1, 0, slope, 1, 0, 0)
xoffset, junk = m.transformPoint((0, meanYCenter))
m = Transform(narrowAmount, 0, slope, 1, xoffset, 0)
if len(glyph) > 0:
g2 = italicize(f[g.name], angle, xoffset=xoffset, stemWidth=stemWidth)
f.insertGlyph(g2, g.name)
transformFLGlyphMembers(f[g.name], m)
if unic > 0xFFFF: #restore unicode
g.unicode = unic
def italicize(glyph, angle=12, stemWidth=180, xoffset=-50):
CURVE_CORRECTION_WEIGHT = .03
CORNER_WEIGHT = 10
# decompose the glyph into smaller segments
ga, subsegments = segmentGlyph(glyph,25)
va, e = glyphToMesh(ga)
n = len(va)
grad = mapEdges(lambda a,(p,n): normalize(p-a), va, e)
cornerWeights = mapEdges(lambda a,(p,n): normalize(p-a).dot(normalize(a-n)), grad, e)[:,0].reshape((-1,1))
smooth = np.ones((n,1)) * CURVE_CORRECTION_WEIGHT
controlPoints = findControlPointsInMesh(glyph, va, subsegments)
smooth[controlPoints > 0] = 1
smooth[cornerWeights < .6] = CORNER_WEIGHT
# smooth[cornerWeights >= .9999] = 1
out = va.copy()
hascurves = False
for c in glyph.contours:
for s in c.segments:
if s.type == "curve":
hascurves = True
break
if hascurves:
break
if stemWidth > 100:
outCorrected = skewMesh(recompose(skewMesh(out, angle * 1.6), grad, e, smooth=smooth), -angle * 1.6)
# out = copyMeshDetails(va, out, e, 6)
else:
outCorrected = out
# create a transform for italicizing
normals = edgeNormals(out, e)
center = va + normals * stemWidth * .4
if stemWidth > 130:
center[:, 0] = va[:, 0] * .7 + center[:,0] * .3
centerSkew = skewMesh(center.dot(np.array([[.97,0],[0,1]])), angle * .9)
# apply the transform
out = outCorrected + (centerSkew - center)
out[:,1] = outCorrected[:,1]
# make some corrections
smooth = np.ones((n,1)) * .1
out = alignCorners(glyph, out, subsegments)
out = copyMeshDetails(skewMesh(va, angle), out, e, 7, smooth=smooth)
# grad = mapEdges(lambda a,(p,n): normalize(p-a), skewMesh(outCorrected, angle*.9), e)
# out = recompose(out, grad, e, smooth=smooth)
out = skewMesh(out, angle * .1)
out[:,0] += xoffset
# out[:,1] = outCorrected[:,1]
out[va[:,1] == 0, 1] = 0
gOut = meshToGlyph(out, ga)
# gOut.width *= .97
# gOut.width += 10
# return gOut
# recompose the glyph into original segments
return fitGlyph(glyph, gOut, subsegments)
def transformFLGlyphMembers(g, m, transformAnchors = True):
# g.transform(m)
g.width = g.width * m[0]
p = m.transformPoint((0,0))
for c in g.components:
d = m.transformPoint(c.offset)
c.offset = (d[0] - p[0], d[1] - p[1])
if transformAnchors:
for a in g.anchors:
aa = m.transformPoint((a.x,a.y))
a.x = aa[0]
# a.x,a.y = (aa[0] - p[0], aa[1] - p[1])
# a.x = a.x - m[4]
def glyphToMesh(g):
points = []
edges = {}
offset = 0
for c in g.contours:
if len(c) < 2:
continue
for i,prev,next in rangePrevNext(len(c)):
points.append((c[i].points[0].x, c[i].points[0].y))
edges[i + offset] = np.array([prev + offset, next + offset], dtype=int)
offset += len(c)
return np.array(points), edges
def meshToGlyph(points, g):
g1 = g.copy()
j = 0
for c in g1.contours:
if len(c) < 2:
continue
for i in range(len(c)):
c[i].points[0].x = points[j][0]
c[i].points[0].y = points[j][1]
j += 1
return g1
def quantizeGradient(grad, book=None):
if book == None:
book = np.array([(1,0),(0,1),(0,-1),(-1,0)])
indexArray = vq(whiten(grad), book)[0]
out = book[indexArray]
for i,v in enumerate(out):
out[i] = normalize(v)
return out
def findControlPointsInMesh(glyph, va, subsegments):
controlPointIndices = np.zeros((len(va),1))
index = 0
for i,c in enumerate(subsegments):
segmentCount = len(glyph.contours[i].segments) - 1
for j,s in enumerate(c):
if j < segmentCount:
if glyph.contours[i].segments[j].type == "line":
controlPointIndices[index] = 1
index += s[1]
return controlPointIndices
def recompose(v, grad, e, smooth=1, P=None, distance=None):
n = len(v)
if distance == None:
distance = mapEdges(lambda a,(p,n): norm(p - a), v, e)
if (P == None):
P = mP(v,e)
P += np.identity(n) * smooth
f = v.copy()
for i,(prev,next) in e.iteritems():
f[i] = (grad[next] * distance[next] - grad[i] * distance[i])
out = v.copy()
f += v * smooth
for i in range(len(out[0,:])):
out[:,i] = cg(P, f[:,i])[0]
return out
def mP(v,e):
n = len(v)
M = np.zeros((n,n))
for i, edges in e.iteritems():
w = -2 / float(len(edges))
for index in edges:
M[i,index] = w
M[i,i] = 2
return M
def normalize(v):
n = np.linalg.norm(v)
if n == 0:
return v
return v/n
def mapEdges(func,v,e,*args):
b = v.copy()
for i, edges in e.iteritems():
b[i] = func(v[i], [v[j] for j in edges], *args)
return b
def getNormal(a,b,c):
"Assumes TT winding direction"
p = np.roll(normalize(b - a), 1)
n = -np.roll(normalize(c - a), 1)
p[1] *= -1
n[1] *= -1
# print p, n, normalize((p + n) * .5)
return normalize((p + n) * .5)
def edgeNormals(v,e):
"Assumes a mesh where each vertex has exactly least two edges"
return mapEdges(lambda a,(p,n) : getNormal(a,p,n),v,e)
def rangePrevNext(count):
c = np.arange(count,dtype=int)
r = np.vstack((c, np.roll(c, 1), np.roll(c, -1)))
return r.T
def skewMesh(v,angle):
slope = np.tanh([math.pi * angle / 180])
return v.dot(np.array([[1,0],[slope,1]]))
def labelConnected(e):
label = 0
labels = np.zeros((len(e),1))
for i,(prev,next) in e.iteritems():
labels[i] = label
if next <= i:
label += 1
return labels
def copyGradDetails(a,b,e,scale=15):
n = len(a)
labels = labelConnected(e)
out = a.astype(float).copy()
for i in range(labels[-1]+1):
mask = (labels==i).flatten()
out[mask,:] = gaussian(b[mask,:], scale, mode="wrap", axis=0) + a[mask,:] - gaussian(a[mask,:], scale, mode="wrap", axis=0)
return out
def copyMeshDetails(va,vb,e,scale=5,smooth=.01):
gradA = mapEdges(lambda a,(p,n): normalize(p-a), va, e)
gradB = mapEdges(lambda a,(p,n): normalize(p-a), vb, e)
grad = copyGradDetails(gradA, gradB, e, scale)
grad = mapEdges(lambda a,(p,n): normalize(a), grad, e)
return recompose(vb, grad, e, smooth=smooth)
def condenseGlyph(glyph, scale=.8, stemWidth=185):
ga, subsegments = segmentGlyph(glyph, 25)
va, e = glyphToMesh(ga)
n = len(va)
normals = edgeNormals(va,e)
cn = va.dot(np.array([[scale, 0],[0,1]]))
grad = mapEdges(lambda a,(p,n): normalize(p-a), cn, e)
# ograd = mapEdges(lambda a,(p,n): normalize(p-a), va, e)
cn[:,0] -= normals[:,0] * stemWidth * .5 * (1 - scale)
out = recompose(cn, grad, e, smooth=.5)
# out = recompose(out, grad, e, smooth=.1)
out = recompose(out, grad, e, smooth=.01)
# cornerWeights = mapEdges(lambda a,(p,n): normalize(p-a).dot(normalize(a-n)), grad, e)[:,0].reshape((-1,1))
# smooth = np.ones((n,1)) * .1
# smooth[cornerWeights < .6] = 10
#
# grad2 = quantizeGradient(grad).astype(float)
# grad2 = copyGradDetails(grad, grad2, e, scale=10)
# grad2 = mapEdges(lambda a,e: normalize(a), grad2, e)
# out = recompose(out, grad2, e, smooth=smooth)
out[:,0] += 15
out[:,1] = va[:,1]
# out = recompose(out, grad, e, smooth=.5)
gOut = meshToGlyph(out, ga)
gOut = fitGlyph(glyph, gOut, subsegments)
for i,seg in enumerate(gOut):
gOut[i].points[0].y = glyph[i].points[0].y
return gOut

View File

@ -0,0 +1,55 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ufo2ft.kernFeatureWriter import KernFeatureWriter
from ufo2ft.makeotfParts import FeatureOTFCompiler
class RobotoFeatureCompiler(FeatureOTFCompiler):
def precompile(self):
self.overwriteFeatures = True
def setupAnchorPairs(self):
self.anchorPairs = [
["top", "_marktop"],
["bottom", "_markbottom"],
["top_dd", "_marktop_dd"],
["bottom_dd", "_markbottom_dd"],
["rhotichook", "_markrhotichook"],
["top0315", "_marktop0315"],
["parent_top", "_markparent_top"],
["parenthesses.w1", "_markparenthesses.w1"],
["parenthesses.w2", "_markparenthesses.w2"],
["parenthesses.w3", "_markparenthesses.w3"]]
self.mkmkAnchorPairs = [
["mkmktop", "_marktop"],
["mkmkbottom_acc", "_markbottom"],
# By providing a pair with accent anchor _bottom and no base anchor,
# we designate all glyphs with _bottom as accents (so that they will
# be used as base glyphs for mkmk features) without generating any
# positioning rules actually using this anchor (which is instead
# used to generate composite glyphs). This is all for consistency
# with older roboto versions.
["", "_bottom"],
]
self.ligaAnchorPairs = []
class RobotoKernWriter(KernFeatureWriter):
leftFeaClassRe = r"@_(.+)_L$"
rightFeaClassRe = r"@_(.+)_R$"

View File

@ -0,0 +1,111 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mitre Glyph:
mitreSize : Length of the segment created by the mitre. The default is 4.
maxAngle : Maximum angle in radians at which segments will be mitred. The default is .9 (about 50 degrees).
Works for both inside and outside angles
"""
import math
from robofab.objects.objectsRF import RPoint, RSegment
from fontbuild.convertCurves import replaceSegments
def getTangents(contours):
tmap = []
for c in contours:
clen = len(c)
for i in range(clen):
s = c[i]
p = s.points[-1]
ns = c[(i + 1) % clen]
ps = c[(clen + i - 1) % clen]
np = ns.points[1] if ns.type == 'curve' else ns.points[-1]
pp = s.points[2] if s.type == 'curve' else ps.points[-1]
tmap.append((pp - p, np - p))
return tmap
def normalizeVector(p):
m = getMagnitude(p);
if m != 0:
return p*(1/m)
else:
return RPoint(0,0)
def getMagnitude(p):
return math.sqrt(p.x*p.x + p.y*p.y)
def getDistance(v1,v2):
return getMagnitude(RPoint(v1.x - v2.x, v1.y - v2.y))
def getAngle(v1,v2):
angle = math.atan2(v1.y,v1.x) - math.atan2(v2.y,v2.x)
return (angle + (2*math.pi)) % (2*math.pi)
def angleDiff(a,b):
return math.pi - abs((abs(a - b) % (math.pi*2)) - math.pi)
def getAngle2(v1,v2):
return abs(angleDiff(math.atan2(v1.y, v1.x), math.atan2(v2.y, v2.x)))
def getMitreOffset(n,v1,v2,mitreSize=4,maxAngle=.9):
# dont mitre if segment is too short
if abs(getMagnitude(v1)) < mitreSize * 2 or abs(getMagnitude(v2)) < mitreSize * 2:
return
angle = getAngle2(v2,v1)
v1 = normalizeVector(v1)
v2 = normalizeVector(v2)
if v1.x == v2.x and v1.y == v2.y:
return
# only mitre corners sharper than maxAngle
if angle > maxAngle:
return
radius = mitreSize / abs(getDistance(v1,v2))
offset1 = RPoint(round(v1.x * radius), round(v1.y * radius))
offset2 = RPoint(round(v2.x * radius), round(v2.y * radius))
return offset1, offset2
def mitreGlyph(g,mitreSize,maxAngle):
if g == None:
return
tangents = getTangents(g.contours)
sid = -1
for c in g.contours:
segments = []
needsMitring = False
for s in c:
sid += 1
v1, v2 = tangents[sid]
off = getMitreOffset(s,v1,v2,mitreSize,maxAngle)
s1 = s.copy()
if off != None:
offset1, offset2 = off
p2 = s.points[-1] + offset2
s2 = RSegment('line', [(p2.x, p2.y)])
s1.points[0] += offset1
segments.append(s1)
segments.append(s2)
needsMitring = True
else:
segments.append(s1)
if needsMitring:
replaceSegments(c, segments)

360
misc/pylib/fontbuild/mix.py Normal file
View File

@ -0,0 +1,360 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import array, append
import copy
import json
from robofab.objects.objectsRF import RPoint, RGlyph
from robofab.world import OpenFont
from decomposeGlyph import decomposeGlyph
class FFont:
"Font wrapper for floating point operations"
def __init__(self,f=None):
self.glyphs = {}
self.hstems = []
self.vstems = []
self.kerning = {}
if isinstance(f,FFont):
#self.glyphs = [g.copy() for g in f.glyphs]
for key,g in f.glyphs.iteritems():
self.glyphs[key] = g.copy()
self.hstems = list(f.hstems)
self.vstems = list(f.vstems)
self.kerning = dict(f.kerning)
elif f != None:
self.copyFromFont(f)
def copyFromFont(self, f):
for g in f:
self.glyphs[g.name] = FGlyph(g)
self.hstems = [s for s in f.info.postscriptStemSnapH]
self.vstems = [s for s in f.info.postscriptStemSnapV]
self.kerning = f.kerning.asDict()
def copyToFont(self, f):
for g in f:
try:
gF = self.glyphs[g.name]
gF.copyToGlyph(g)
except:
print "Copy to glyph failed for" + g.name
f.info.postscriptStemSnapH = self.hstems
f.info.postscriptStemSnapV = self.vstems
for pair in self.kerning:
f.kerning[pair] = self.kerning[pair]
def getGlyph(self, gname):
try:
return self.glyphs[gname]
except:
return None
def setGlyph(self, gname, glyph):
self.glyphs[gname] = glyph
def addDiff(self,b,c):
newFont = FFont(self)
for key,g in newFont.glyphs.iteritems():
gB = b.getGlyph(key)
gC = c.getGlyph(key)
try:
newFont.glyphs[key] = g.addDiff(gB,gC)
except:
print "Add diff failed for '%s'" %key
return newFont
class FGlyph:
"provides a temporary floating point compatible glyph data structure"
def __init__(self, g=None):
self.contours = []
self.width = 0.
self.components = []
self.anchors = []
if g != None:
self.copyFromGlyph(g)
def copyFromGlyph(self,g):
self.name = g.name
valuesX = []
valuesY = []
self.width = len(valuesX)
valuesX.append(g.width)
for c in g.components:
self.components.append((len(valuesX), len(valuesY)))
valuesX.append(c.scale[0])
valuesY.append(c.scale[1])
valuesX.append(c.offset[0])
valuesY.append(c.offset[1])
for a in g.anchors:
self.anchors.append((len(valuesX), len(valuesY)))
valuesX.append(a.x)
valuesY.append(a.y)
for i in range(len(g)):
self.contours.append([])
for j in range (len(g[i].points)):
self.contours[i].append((len(valuesX), len(valuesY)))
valuesX.append(g[i].points[j].x)
valuesY.append(g[i].points[j].y)
self.dataX = array(valuesX, dtype=float)
self.dataY = array(valuesY, dtype=float)
def copyToGlyph(self,g):
g.width = self._derefX(self.width)
if len(g.components) == len(self.components):
for i in range(len(self.components)):
g.components[i].scale = (self._derefX(self.components[i][0] + 0, asInt=False),
self._derefY(self.components[i][1] + 0, asInt=False))
g.components[i].offset = (self._derefX(self.components[i][0] + 1),
self._derefY(self.components[i][1] + 1))
if len(g.anchors) == len(self.anchors):
for i in range(len(self.anchors)):
g.anchors[i].x = self._derefX( self.anchors[i][0])
g.anchors[i].y = self._derefY( self.anchors[i][1])
for i in range(len(g)) :
for j in range (len(g[i].points)):
g[i].points[j].x = self._derefX(self.contours[i][j][0])
g[i].points[j].y = self._derefY(self.contours[i][j][1])
def isCompatible(self, g):
return (len(self.dataX) == len(g.dataX) and
len(self.dataY) == len(g.dataY) and
len(g.contours) == len(self.contours))
def __add__(self,g):
if self.isCompatible(g):
newGlyph = self.copy()
newGlyph.dataX = self.dataX + g.dataX
newGlyph.dataY = self.dataY + g.dataY
return newGlyph
else:
print "Add failed for '%s'" %(self.name)
raise Exception
def __sub__(self,g):
if self.isCompatible(g):
newGlyph = self.copy()
newGlyph.dataX = self.dataX - g.dataX
newGlyph.dataY = self.dataY - g.dataY
return newGlyph
else:
print "Subtract failed for '%s'" %(self.name)
raise Exception
def __mul__(self,scalar):
newGlyph = self.copy()
newGlyph.dataX = self.dataX * scalar
newGlyph.dataY = self.dataY * scalar
return newGlyph
def scaleX(self,scalar):
newGlyph = self.copy()
if len(self.dataX) > 0:
newGlyph.dataX = self.dataX * scalar
for i in range(len(newGlyph.components)):
newGlyph.dataX[newGlyph.components[i][0]] = self.dataX[newGlyph.components[i][0]]
return newGlyph
def shift(self,ammount):
newGlyph = self.copy()
newGlyph.dataX = self.dataX + ammount
for i in range(len(newGlyph.components)):
newGlyph.dataX[newGlyph.components[i][0]] = self.dataX[newGlyph.components[i][0]]
return newGlyph
def interp(self, g, v):
gF = self.copy()
if not self.isCompatible(g):
print "Interpolate failed for '%s'; outlines incompatible" %(self.name)
raise Exception
gF.dataX += (g.dataX - gF.dataX) * v.x
gF.dataY += (g.dataY - gF.dataY) * v.y
return gF
def copy(self):
ng = FGlyph()
ng.contours = list(self.contours)
ng.width = self.width
ng.components = list(self.components)
ng.anchors = list(self.anchors)
ng.dataX = self.dataX.copy()
ng.dataY = self.dataY.copy()
ng.name = self.name
return ng
def _derefX(self,id, asInt=True):
val = self.dataX[id]
return int(round(val)) if asInt else val
def _derefY(self,id, asInt=True):
val = self.dataY[id]
return int(round(val)) if asInt else val
def addDiff(self,gB,gC):
newGlyph = self + (gB - gC)
return newGlyph
class Master:
def __init__(self, font=None, v=0, kernlist=None, overlay=None):
if isinstance(font, FFont):
self.font = None
self.ffont = font
elif isinstance(font,str):
self.openFont(font,overlay)
elif isinstance(font,Mix):
self.font = font
else:
self.font = font
self.ffont = FFont(font)
if isinstance(v,float) or isinstance(v,int):
self.v = RPoint(v, v)
else:
self.v = v
if kernlist != None:
kerns = [i.strip().split() for i in open(kernlist).readlines()]
self.kernlist = [{'left':k[0], 'right':k[1], 'value': k[2]}
for k in kerns
if not k[0].startswith("#")
and not k[0] == ""]
#TODO implement class based kerning / external kerning file
def openFont(self, path, overlayPath=None):
self.font = OpenFont(path)
for g in self.font:
size = len(g)
csize = len(g.components)
if (size > 0 and csize > 0):
decomposeGlyph(self.font, self.font[g.name])
if overlayPath != None:
overlayFont = OpenFont(overlayPath)
font = self.font
for overlayGlyph in overlayFont:
font.insertGlyph(overlayGlyph)
self.ffont = FFont(self.font)
class Mix:
def __init__(self,masters,v):
self.masters = masters
if isinstance(v,float) or isinstance(v,int):
self.v = RPoint(v,v)
else:
self.v = v
def getFGlyph(self, master, gname):
if isinstance(master.font, Mix):
return font.mixGlyphs(gname)
return master.ffont.getGlyph(gname)
def getGlyphMasters(self,gname):
masters = self.masters
if len(masters) <= 2:
return self.getFGlyph(masters[0], gname), self.getFGlyph(masters[-1], gname)
def generateFFont(self):
ffont = FFont(self.masters[0].ffont)
for key,g in ffont.glyphs.iteritems():
ffont.glyphs[key] = self.mixGlyphs(key)
ffont.kerning = self.mixKerns()
return ffont
def generateFont(self, baseFont):
newFont = baseFont.copy()
#self.mixStems(newFont) todo _ fix stems code
for g in newFont:
gF = self.mixGlyphs(g.name)
if gF == None:
g.mark = True
elif isinstance(gF, RGlyph):
newFont[g.name] = gF.copy()
else:
gF.copyToGlyph(g)
newFont.kerning.clear()
newFont.kerning.update(self.mixKerns() or {})
return newFont
def mixGlyphs(self,gname):
gA,gB = self.getGlyphMasters(gname)
try:
return gA.interp(gB,self.v)
except:
print "mixglyph failed for %s" %(gname)
if gA != None:
return gA.copy()
def getKerning(self, master):
if isinstance(master.font, Mix):
return master.font.mixKerns()
return master.ffont.kerning
def mixKerns(self):
masters = self.masters
kA, kB = self.getKerning(masters[0]), self.getKerning(masters[-1])
return interpolateKerns(kA, kB, self.v)
def narrowFLGlyph(g, gThin, factor=.75):
gF = FGlyph(g)
if not isinstance(gThin,FGlyph):
gThin = FGlyph(gThin)
gCondensed = gThin.scaleX(factor)
try:
gNarrow = gF + (gCondensed - gThin)
gNarrow.copyToGlyph(g)
except:
print "No dice for: " + g.name
def interpolate(a,b,v,e=0):
if e == 0:
return a+(b-a)*v
qe = (b-a)*v*v*v + a #cubic easing
le = a+(b-a)*v # linear easing
return le + (qe-le) * e
def interpolateKerns(kA, kB, v):
# to yield correct kerning for Roboto output, we must emulate the behavior
# of old versions of this code; namely, take the kerning values of the first
# master instead of actually interpolating.
# old code:
# https://github.com/google/roboto/blob/7f083ac31241cc86d019ea6227fa508b9fcf39a6/scripts/lib/fontbuild/mix.py
# bug:
# https://github.com/google/roboto/issues/213
# return dict(kA)
kerns = {}
for pair, val in kA.items():
kerns[pair] = interpolate(val, kB.get(pair, 0), v.x)
for pair, val in kB.items():
lerped_val = interpolate(val, kA.get(pair, 0), 1 - v.x)
if pair in kerns:
assert abs(kerns[pair] - lerped_val) < 1e-6
else:
kerns[pair] = lerped_val
return kerns

View File

@ -0,0 +1,431 @@
#!/usr/bin/env python
# encoding: utf8
#
# This script was used specifically to re-introduce a bunch of kerning values
# that where lost in an old kerning cleanup that failed to account for
# automatically composed glyphs defined in diacritics.txt.
#
# Steps:
# 1. git diff 10e15297b 10e15297b^ > 10e15297b.diff
# 2. edit 10e15297b.diff and remove the python script add
# 3. fetch copies of kerning.plist and groups.plist from before the loss change
# bold-groups.plist
# bold-kerning.plist
# regular-groups.plist
# regular-kerning.plist
# 4. run this script
#
from __future__ import print_function
import os, sys, plistlib, json
from collections import OrderedDict
from ConfigParser import RawConfigParser
from argparse import ArgumentParser
from fontTools import ttLib
from robofab.objects.objectsRF import OpenFont
srcFontPaths = ['src/Interface-Regular.ufo', 'src/Interface-Bold.ufo']
def getTTGlyphList(font): # -> { 'Omega': [2126, ...], ... }
if isinstance(font, str):
font = ttLib.TTFont(font)
if not 'cmap' in font:
raise Exception('missing cmap table')
gl = {}
bestCodeSubTable = None
bestCodeSubTableFormat = 0
for st in font['cmap'].tables:
if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
if st.format > bestCodeSubTableFormat:
bestCodeSubTable = st
bestCodeSubTableFormat = st.format
if bestCodeSubTable is not None:
for cp, glyphname in bestCodeSubTable.cmap.items():
if glyphname in gl:
gl[glyphname].append(cp)
else:
gl[glyphname] = [cp]
return gl, font
def parseAGL(filename): # -> { 2126: 'Omega', ... }
m = {}
with open(filename, 'r') as f:
for line in f:
# Omega;2126
# dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
line = line.strip()
if len(line) > 0 and line[0] != '#':
name, uc = tuple([c.strip() for c in line.split(';')])
if uc.find(' ') == -1:
# it's a 1:1 mapping
m[int(uc, 16)] = name
return m
def parseGlyphComposition(composite):
c = composite.split("=")
d = c[1].split("/")
glyphName = d[0]
if len(d) == 1:
offset = [0, 0]
else:
offset = [int(i) for i in d[1].split(",")]
accentString = c[0]
accents = accentString.split("+")
baseName = accents.pop(0)
accentNames = [i.split(":") for i in accents]
return (glyphName, baseName, accentNames, offset)
def loadGlyphCompositions(filename):
compositions = OrderedDict()
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0 and line[0] != '#':
glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
compositions[glyphName] = (baseName, accentNames, offset)
return compositions
def loadNamesFromDiff(diffFilename):
with open(diffFilename, 'r') as f:
diffLines = [s.strip() for s in f.read().splitlines() if s.startswith('+\t')]
diffLines = [s for s in diffLines if not s.startswith('<int')]
namesInDiff = set()
for s in diffLines:
if s.startswith('<int') or s.startswith('<arr') or s.startswith('</'):
continue
p = s.find('>')
if p != -1:
p2 = s.find('<', p+1)
if p2 != -1:
name = s[p+1:p2]
try:
int(name)
except:
if not name.startswith('@'):
namesInDiff.add(s[p+1:p2])
return namesInDiff
def loadGroups(filename):
groups = plistlib.readPlist(filename)
nameMap = {} # { glyphName => set(groupName) }
for groupName, glyphNames in groups.iteritems():
for glyphName in glyphNames:
nameMap.setdefault(glyphName, set()).add(groupName)
return groups, nameMap
def loadKerning(filename):
kerning = plistlib.readPlist(filename)
# <dict>
# <key>@KERN_LEFT_A</key>
# <dict>
# <key>@KERN_RIGHT_C</key>
# <integer>-96</integer>
leftIndex = {} # { glyph-name => <ref to plist right-hand side dict> }
rightIndex = {} # { glyph-name => [(left-hand-side-name, kernVal), ...] }
rightGroupIndex = {} # { group-name => [(left-hand-side-name, kernVal), ...] }
for leftName, right in kerning.iteritems():
if leftName[0] != '@':
leftIndex[leftName] = right
for rightName, kernVal in right.iteritems():
if rightName[0] != '@':
rightIndex.setdefault(rightName, []).append((leftName, kernVal))
else:
rightGroupIndex.setdefault(rightName, []).append((leftName, kernVal))
return kerning, leftIndex, rightIndex, rightGroupIndex
def loadAltNamesDB(agl, fontFilename):
uc2names = {} # { 2126: ['Omega', ...], ...}
name2ucs = {} # { 'Omega': [2126, ...], ...}
name2ucs, _ = getTTGlyphList(fontFilename)
# -> { 'Omega': [2126, ...], ... }
for name, ucs in name2ucs.iteritems():
for uc in ucs:
uc2names.setdefault(uc, []).append(name)
for uc, name in agl.iteritems():
name2ucs.setdefault(name, []).append(uc)
uc2names.setdefault(uc, []).append(name)
# -> { 2126: 'Omega', ... }
return uc2names, name2ucs
def loadLocalNamesDB(agl, diacriticComps): # { 2126: ['Omega', ...], ...}
uc2names = None
for fontPath in srcFontPaths:
font = OpenFont(fontPath)
if uc2names is None:
uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
else:
for uc, names in font.getCharacterMapping().iteritems():
names2 = uc2names.get(uc, [])
for name in names:
if name not in names2:
names2.append(name)
uc2names[uc] = names2
# agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
aglName2Ucs = {}
for uc, name in agl.iteritems():
aglName2Ucs.setdefault(name, []).append(uc)
for glyphName, comp in diacriticComps.iteritems():
for uc in aglName2Ucs.get(glyphName, []):
names = uc2names.get(uc, [])
if glyphName not in names:
names.append(glyphName)
uc2names[uc] = names
name2ucs = {}
for uc, names in uc2names.iteritems():
for name in names:
name2ucs.setdefault(name, set()).add(uc)
return uc2names, name2ucs
def _canonicalGlyphName(name, localName2ucs, localUc2Names, altName2ucs):
ucs = localName2ucs.get(name)
if ucs:
return name, list(ucs)[0]
ucs = altName2ucs.get(name)
if ucs:
for uc in ucs:
localNames = localUc2Names.get(uc)
if localNames and len(localNames):
return localNames[0], uc
return None, None
def main():
argparser = ArgumentParser(description='Restore lost kerning')
argparser.add_argument(
'-dry', dest='dryRun', action='store_const', const=True, default=False,
help='Do not modify anything, but instead just print what would happen.')
argparser.add_argument(
'srcFont', metavar='<fontfile>', type=str,
help='TrueType, OpenType or UFO fonts to gather glyph info from')
argparser.add_argument(
'diffFile', metavar='<diffile>', type=str, help='Diff file')
args = argparser.parse_args()
dryRun = args.dryRun
agl = parseAGL('src/glyphlist.txt')
diacriticComps = loadGlyphCompositions('src/diacritics.txt')
altUc2names, altName2ucs = loadAltNamesDB(agl, args.srcFont)
localUc2Names, localName2ucs = loadLocalNamesDB(agl, diacriticComps)
canonicalGlyphName = lambda name: _canonicalGlyphName(
name, localName2ucs, localUc2Names, altName2ucs)
deletedNames = loadNamesFromDiff(args.diffFile) # 10e15297b.diff
deletedDiacriticNames = OrderedDict()
for glyphName, comp in diacriticComps.iteritems():
if glyphName in deletedNames:
deletedDiacriticNames[glyphName] = comp
for fontPath in srcFontPaths:
addedGroupNames = set()
oldFilenamePrefix = 'regular'
if fontPath.find('Bold') != -1:
oldFilenamePrefix = 'bold'
oldGroups, oldNameToGroups = loadGroups(
oldFilenamePrefix + '-groups.plist')
oldKerning, oldLIndex, oldRIndex, oldRGroupIndex = loadKerning(
oldFilenamePrefix + '-kerning.plist')
# lIndex : { name => <ref to plist right-hand side dict> }
# rIndex : { name => [(left-hand-side-name, kernVal), ...] }
currGroupFilename = os.path.join(fontPath, 'groups.plist')
currKerningFilename = os.path.join(fontPath, 'kerning.plist')
currGroups, currNameToGroups = loadGroups(currGroupFilename)
currKerning, currLIndex, currRIndex, currRGroupIndex = loadKerning(currKerningFilename)
for glyphName, comp in deletedDiacriticNames.iteritems():
oldGroupMemberships = oldNameToGroups.get(glyphName)
localGlyphName, localUc = canonicalGlyphName(glyphName)
# if glyphName != 'dcaron':
# continue # XXX DEBUG
if localGlyphName is None:
# glyph does no longer exist -- ignore
print('[IGNORE]', glyphName)
continue
if oldGroupMemberships:
# print('group', localGlyphName,
# '=>', localUc,
# 'in old group:', oldGroupMemberships, ', curr group:', currGroupMemberships)
for oldGroupName in oldGroupMemberships:
currGroup = currGroups.get(oldGroupName) # None|[glyphname, ...]
# print('GM ', localGlyphName, oldGroupName, len(currGroup) if currGroup else 0)
if currGroup is not None:
if localGlyphName not in currGroup:
# print('[UPDATE group]', oldGroupName, 'append', localGlyphName)
currGroup.append(localGlyphName)
else:
# group does not currently exist
if currNameToGroups.get(localGlyphName):
raise Exception('TODO: case where glyph is in some current groups, but not the' +
'original-named group')
print('[ADD group]', oldGroupName, '=> [', localGlyphName, ']')
currGroups[oldGroupName] = [localGlyphName]
addedGroupNames.add(oldGroupName)
# if oldGroupName in oldKerning:
# print('TODO: effects of oldGroupName being in oldKerning:',
# oldKerning[oldGroupName])
if oldGroupName in oldRGroupIndex:
print('TODO: effects of oldGroupName being in oldRGroupIndex:',
oldRGroupIndex[oldGroupName])
else: # if not oldGroupMemberships
ucs = localName2ucs.get(glyphName)
if not ucs:
raise Exception(
'TODO non-group, non-local name ' + glyphName + ' -- lookup in alt names')
asLeft = oldLIndex.get(glyphName)
atRightOf = oldRIndex.get(glyphName)
# print('individual', glyphName,
# '=>', ', '.join([str(uc) for uc in ucs]),
# '\n as left:', asLeft is not None,
# '\n at right of:', atRightOf is not None)
if asLeft:
currKern = currKerning.get(localGlyphName)
if currKern is None:
rightValues = {}
for rightName, kernValue in asLeft.iteritems():
if rightName[0] == '@':
currGroup = currGroups.get(rightName)
if currGroup and localGlyphName not in currGroup:
rightValues[rightName] = kernValue
else:
localName, localUc = canonicalGlyphName(rightName)
if localName:
rightValues[localName] = kernValue
if len(rightValues) > 0:
print('[ADD currKerning]', localGlyphName, '=>', rightValues)
currKerning[localGlyphName] = rightValues
if atRightOf:
for parentLeftName, kernVal in atRightOf:
# print('atRightOf:', parentLeftName, kernVal)
if parentLeftName[0] == '@':
if parentLeftName in currGroups:
k = currKerning.get(parentLeftName)
if k:
if localGlyphName not in k:
print('[UPDATE currKerning g]',
parentLeftName, '+= {', localGlyphName, ':', kernVal, '}')
k[localGlyphName] = kernVal
else:
print('TODO: left-group is NOT in currKerning; left-group', parentLeftName)
else:
localParentLeftGlyphName, _ = canonicalGlyphName(parentLeftName)
if localParentLeftGlyphName:
k = currKerning.get(localParentLeftGlyphName)
if k:
if localGlyphName not in k:
print('[UPDATE currKerning i]',
localParentLeftGlyphName, '+= {', localGlyphName, ':', kernVal, '}')
k[localGlyphName] = kernVal
else:
print('[ADD currKerning i]',
localParentLeftGlyphName, '=> {', localGlyphName, ':', kernVal, '}')
currKerning[localParentLeftGlyphName] = {localGlyphName: kernVal}
for groupName in addedGroupNames:
print('————————————————————————————————————————————')
print('re-introduce group', groupName, 'to kerning')
oldRKern = oldKerning.get(groupName)
if oldRKern is not None:
newRKern = {}
for oldRightName, kernVal in oldRKern.iteritems():
if oldRightName[0] == '@':
if oldRightName in currGroups:
newRKern[oldRightName] = kernVal
else:
# Note: (oldRightName in addedGroupNames) should always be False here
# as we would have added it to currGroups already.
print('[DROP group]', oldRightName, kernVal)
if oldRightName in currGroups:
del currGroups[oldRightName]
else:
localGlyphName, _ = canonicalGlyphName(oldRightName)
if localGlyphName:
newRKern[localGlyphName] = kernVal
print('localGlyphName', localGlyphName)
if len(newRKern):
print('[ADD currKerning g]', groupName, newRKern)
currKerning[groupName] = newRKern
# oldRGroupIndex : { group-name => [(left-hand-side-name, kernVal), ...] }
oldLKern = oldRGroupIndex.get(groupName)
if oldLKern:
for oldRightName, kernVal in oldLKern:
if oldRightName[0] == '@':
if oldRightName in currGroups:
k = currKerning.get(oldRightName)
if k is not None:
print('[UPDATE kerning g]', oldRightName, '+= {', groupName, ':', kernVal, '}')
k[groupName] = kernVal
else:
currKerning[oldRightName] = {groupName: kernVal}
print('[ADD kerning g]', oldRightName, '= {', groupName, ':', kernVal, '}')
else:
localGlyphName, _ = canonicalGlyphName(oldRightName)
if localGlyphName:
k = currKerning.get(localGlyphName)
if k is not None:
print('[UPDATE kerning i]', localGlyphName, '+= {', groupName, ':', kernVal, '}')
k[groupName] = kernVal
else:
currKerning[localGlyphName] = {groupName: kernVal}
print('[ADD kerning i]', localGlyphName, '= {', groupName, ':', kernVal, '}')
print('Write', currGroupFilename)
if not dryRun:
plistlib.writePlist(currGroups, currGroupFilename)
print('Write', currKerningFilename)
if not dryRun:
plistlib.writePlist(currKerning, currKerningFilename)
# end: for fontPath
main()

305
misc/rewrite-glyphorder.py Executable file
View File

@ -0,0 +1,305 @@
#!/usr/bin/env python
# encoding: utf8
from __future__ import print_function
import os, sys, plistlib, json, re
from collections import OrderedDict
from argparse import ArgumentParser
from ConfigParser import RawConfigParser
from fontTools import ttLib
from robofab.objects.objectsRF import OpenFont
# Regex matching "default" glyph names, like "uni2043" and "u01C5"
uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
class PList:
def __init__(self, filename):
self.filename = filename
self.plist = None
def load(self):
self.plist = plistlib.readPlist(self.filename)
def save(self):
if self.plist is not None:
plistlib.writePlist(self.plist, self.filename)
def get(self, k, defaultValue=None):
if self.plist is None:
self.load()
return self.plist.get(k, defaultValue)
def __getitem__(self, k):
if self.plist is None:
self.load()
return self.plist[k]
def __setitem__(self, k, v):
if self.plist is None:
self.load()
self.plist[k] = v
def __delitem__(self, k):
if self.plist is None:
self.load()
del self.plist[k]
def parseAGL(filename): # -> { 2126: 'Omega', ... }
m = {}
with open(filename, 'r') as f:
for line in f:
# Omega;2126
# dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
line = line.strip()
if len(line) > 0 and line[0] != '#':
name, uc = tuple([c.strip() for c in line.split(';')])
if uc.find(' ') == -1:
# it's a 1:1 mapping
m[int(uc, 16)] = name
return m
def revCharMap(ucToNames):
# {2126:['Omega','Omegagr']} -> {'Omega':2126, 'Omegagr':2126}
# {2126:'Omega'} -> {'Omega':2126}
m = {}
if len(ucToNames) == 0:
return m
lists = True
for v in ucToNames.itervalues():
lists = not isinstance(v, str)
break
if lists:
for uc, names in ucToNames.iteritems():
for name in names:
m[name] = uc
else:
for uc, name in ucToNames.iteritems():
m[name] = uc
return m
def loadJSONGlyphOrder(jsonFilename):
gol = None
if jsonFilename == '-':
gol = json.load(sys.stdin)
else:
with open(jsonFilename, 'r') as f:
gol = json.load(f)
if not isinstance(gol, list):
raise Exception('expected [[string, int|null]')
if len(gol) > 0:
for v in gol:
if not isinstance(v, list):
raise Exception('expected [[string, int|null]]')
break
return gol
def loadTTGlyphOrder(font):
if isinstance(font, str):
font = ttLib.TTFont(font)
if not 'cmap' in font:
raise Exception('missing cmap table')
bestCodeSubTable = None
bestCodeSubTableFormat = 0
for st in font['cmap'].tables:
if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
if st.format > bestCodeSubTableFormat:
bestCodeSubTable = st
bestCodeSubTableFormat = st.format
ucmap = {}
if bestCodeSubTable is not None:
for cp, glyphname in bestCodeSubTable.cmap.items():
ucmap[glyphname] = cp
gol = []
for name in font.getGlyphOrder():
gol.append((name, ucmap.get(name)))
return gol
def loadSrcGlyphOrder(jsonFilename, fontFilename): # -> [ ('Omegagreek', 2126|None), ...]
if jsonFilename:
return loadJSONGlyphOrder(jsonFilename)
elif fontFilename:
return loadTTGlyphOrder(fontFilename.rstrip('/ '))
return None
def loadUFOGlyphNames(ufoPath):
font = OpenFont(ufoPath)
libPlist = PList(os.path.join(ufoPath, 'lib.plist'))
orderedNames = libPlist['public.glyphOrder'] # [ 'Omega', ...]
# append any glyphs that are missing in orderedNames
allNames = set(font.keys())
for name in orderedNames:
allNames.discard(name)
for name in allNames:
orderedNames.append(name)
ucToNames = font.getCharacterMapping() # { 2126: [ 'Omega', ...], ...}
nameToUc = revCharMap(ucToNames) # { 'Omega': 2126, ...}
gol = OrderedDict() # OrderedDict{ ('Omega', 2126|None), ...}
for name in orderedNames:
gol[name] = nameToUc.get(name)
# gol.append((name, nameToUc.get(name)))
return gol, ucToNames, nameToUc, libPlist
def saveUFOGlyphOrder(libPlist, orderedNames, dryRun):
libPlist['public.glyphOrder'] = orderedNames
roboSort = libPlist.get('com.typemytype.robofont.sort')
if roboSort is not None:
# lib['com.typemytype.robofont.sort'] has schema
# [ { type: "glyphList", ascending: [glyphname, ...] }, ...]
for i in range(len(roboSort)):
ent = roboSort[i]
if isinstance(ent, dict) and ent.get('type') == 'glyphList':
roboSort[i] = {'type':'glyphList', 'ascending':orderedNames}
break
print('Writing', libPlist.filename)
if not dryRun:
libPlist.save()
def getConfigResFile(config, basedir, name):
fn = os.path.join(basedir, config.get("res", name))
if not os.path.isfile(fn):
basedir = os.path.dirname(basedir)
fn = os.path.join(basedir, config.get("res", name))
if not os.path.isfile(fn):
fn = None
return fn
def main():
argparser = ArgumentParser(description='Rewrite glyph order of UFO fonts')
argparser.add_argument(
'-dry', dest='dryRun', action='store_const', const=True, default=False,
help='Do not modify anything, but instead just print what would happen.')
argparser.add_argument(
'-src-json', dest='srcJSONFile', metavar='<file>', type=str,
help='JSON file to read glyph order from.' +
' Should be a list e.g. [["Omega", 2126], [".notdef", null], ...]')
argparser.add_argument(
'-src-font', dest='srcFontFile', metavar='<file>', type=str,
help='TrueType or OpenType font to read glyph order from.')
argparser.add_argument(
'-out', dest='outFile', metavar='<file>', type=str,
help='Write each name per line to <file>')
argparser.add_argument(
'dstFontsPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
args = argparser.parse_args()
dryRun = args.dryRun
if args.srcJSONFile and args.srcFontFile:
argparser.error('Both -src-json and -src-font specified -- please provide only one.')
srcGol = loadSrcGlyphOrder(args.srcJSONFile, args.srcFontFile)
if srcGol is None:
argparser.error('No source provided (-src-* argument missing)')
# Load Adobe Glyph List database
srcDir = os.path.dirname(args.dstFontsPaths[0])
config = RawConfigParser(dict_type=OrderedDict)
config.read(os.path.join(srcDir, 'fontbuild.cfg'))
aglUcToName = parseAGL(getConfigResFile(config, srcDir, 'agl_glyphlistfile'))
aglNameToUc = revCharMap(aglUcToName)
glyphorderUnion = OrderedDict()
for dstFontPath in args.dstFontsPaths:
glyphOrder, ucToNames, nameToUc, libPlist = loadUFOGlyphNames(dstFontPath)
newGol = OrderedDict()
for name, uc in srcGol:
if uc is None:
# if there's no unicode associated, derive from name if possible
m = uniNameRe.match(name)
if m:
try:
uc = int(m.group(1), 16)
except:
pass
if uc is None:
uc = aglNameToUc.get(name)
# has same glyph mapped to same unicode
names = ucToNames.get(uc)
if names is not None:
for name in names:
# print('U %s U+%04X' % (name, uc))
newGol[name] = uc
continue
# has same name in dst?
uc2 = glyphOrder.get(name)
if uc2 is not None:
# print('N %s U+%04X' % (name, uc2))
newGol[name] = uc2
continue
# Try AGL[uc] -> name == name
if uc is not None:
name2 = aglUcToName.get(uc)
if name2 is not None:
uc2 = glyphOrder.get(name2)
if uc2 is not None:
# print('A %s U+%04X' % (name2, uc2))
newGol[name2] = uc2
continue
# else: ignore glyph name in srcGol not found in target
# if uc is None:
# print('x %s -' % name)
# else:
# print('x %s U+%04X' % (name, uc))
# add remaining glyphs from original glyph order
for name, uc in glyphOrder.iteritems():
if name not in newGol:
# print('E %s U+%04X' % (name, uc))
newGol[name] = uc
orderedNames = []
for name in newGol.iterkeys():
orderedNames.append(name)
glyphorderUnion[name] = True
saveUFOGlyphOrder(libPlist, orderedNames, dryRun)
if args.outFile:
print('Write', args.outFile)
glyphorderUnionNames = glyphorderUnion.keys()
if not dryRun:
with open(args.outFile, 'w') as f:
f.write('\n'.join(glyphorderUnionNames) + '\n')
if __name__ == '__main__':
main()

View File

@ -0,0 +1,53 @@
#
# This script changes the width of all glyphs by applying a multiplier.
# It keeps the contours centered as glyphs get wider or tighter.
#
from mojo.roboFont import version
from math import ceil, floor
if __name__ == "__main__":
font = CurrentFont()
print "Resizing glyph margins for %r" % font
# how much to add or remove from each glyph's margin
A = -16
if font is not None:
for g in font:
# skip glyphs
if g.name in ('c', 'e', 'o', 'r', 'j'):
continue
if g.width < 2:
print '"%s": ["ignore", "zero-width"],' % (g.name)
continue
if g.box is None:
print '"%s": ["ignore", "empty"],' % (g.name)
continue
if g.width % 16 != 0:
print '"%s": ["ignore", "misaligned"],' % (g.name)
continue
if g.leftMargin <= 0 or g.rightMargin <= 0:
print '"%s": ["ignore", "zero-or-negative"],' % (g.name)
continue
leftMargin = int(max(0, g.leftMargin + A))
rightMargin = int(max(0, g.rightMargin + A))
#print '"%s": ["update", %g, %g],' % (g.name, leftMargin, rightMargin)
if 'interface.spaceadjust' in g.lib:
g.lib['interface.width-adjustments'].append(A)
else:
g.lib['interface.width-adjustments'] = [A]
# order of assignment is probably important
g.rightMargin = int(rightMargin)
g.leftMargin = int(leftMargin)
font.update()
else:
print "No fonts open"
print "Done"

View File

@ -0,0 +1,107 @@
# Change upm
# Jens Kutilek 2013-01-02
from mojo.roboFont import version
def scalePoints(glyph, factor):
if version == "1.4":
# stupid workaround for bug in RoboFont 1.4
for contour in glyph:
for point in contour.points:
point.x *= factor
point.y *= factor
glyph.width *= factor
else:
glyph *= factor
def scaleGlyph(glyph, factor, scaleWidth=True, roundCoordinates=True):
if not(scaleWidth):
oldWidth = glyph.width
if len(glyph.components) == 0:
scalePoints(glyph, factor)
if roundCoordinates:
glyph.round()
else:
# save components
# this may be a tad too convoluted ...
components = []
for i in range(len(glyph.components)):
components.append(glyph.components[i])
for c in components:
glyph.removeComponent(c)
scalePoints(glyph, factor)
if roundCoordinates:
glyph.round()
# restore components
for i in range(len(components)):
newOffset = (int(round(components[i].offset[0] * factor)),
int(round(components[i].offset[1] * factor)))
glyph.appendComponent(components[i].baseGlyph, newOffset, components[i].scale)
if not(scaleWidth):
# restore width
glyph.width = oldWidth
def changeUPM(font, factor, roundCoordinates=True):
# Glyphs
for g in font:
scaleGlyph(g, factor)
for guide in g.guides:
# another thing that doesn't work in RoboFont 1.4 - 1.5.1
guide.x *= factor
guide.y *= factor
# Glyph layers
mainLayer = "foreground"
for layerName in font.layerOrder:
if layerName != mainLayer:
for g in font:
g.flipLayers(mainLayer, layerName)
scaleGlyph(g, factor, scaleWidth=False)
g.flipLayers(layerName, mainLayer)
# Kerning
if font.kerning:
font.kerning.scale(factor)
if roundCoordinates:
if not version in ["1.4", "1.5", "1.5.1"]:
font.kerning.round(1)
else:
print "WARNING: kerning values cannot be rounded to integer in this RoboFont version"
# TODO: Change positioning feature code?
# Vertical dimensions
font.info.descender = int(round(font.info.descender * factor))
font.info.xHeight = int(round(font.info.xHeight * factor))
font.info.capHeight = int(round(font.info.capHeight * factor))
font.info.ascender = int(round(font.info.ascender * factor))
# Finally set new UPM
font.info.unitsPerEm = newUpm
font.update()
if __name__ == "__main__":
from robofab.interface.all.dialogs import AskString
print "Change Units Per Em"
if CurrentFont() is not None:
oldUpm = CurrentFont().info.unitsPerEm
newUpm = CurrentFont().info.unitsPerEm
try:
newUpm = int(AskString("New units per em size?", oldUpm))
except:
pass
if newUpm == oldUpm:
print " Not changing upm size."
else:
factor = float(newUpm) / oldUpm
print " Scaling all font measurements by", factor
changeUPM(CurrentFont(), factor)
else:
print " Open a font first to change upm, please."
print " Done."

View File

@ -0,0 +1,83 @@
#
# This script changes the width of any glyph which width is not an even multiple of 256.
# For glyphs that are updated, the shape(s) inside the glyph are centered as well.
#
from mojo.roboFont import version
from math import ceil, floor
if __name__ == "__main__":
font = CurrentFont()
print "Fitting glyphs to EM grid at 256 %r" % font
# Strategy to use for centering a glyph when resizing its EM:
# "center" Ignore existing margins and center in EM at on integer units.
# "adjust-margins" Attempt to retain existing margins w/o centering inside EM.
centeringStrategy = 'center'
if font is not None:
for g in font:
# only consider adjusting the listed glyphs
# if g.unicode not in (0x212B, 0x005A, 0x0387):
# continue
if g.width < 2:
# ignore zero width glyph
# print 'ignoring %r -- zero width' % g
continue
if g.width % 256 == 0:
# ignore already aligned glyph
# print 'ignoring %r -- already aligned' % g
continue
width = g.width
if g.rightMargin < 128:
width = ceil(width / 256) * 256
else:
width = round(width / 256) * 256
# center glyph in EM
leftMargin = g.leftMargin
rightMargin = g.rightMargin
if centeringStrategy == 'adjust-margins':
# Adjust margins to place the glyph in the center while retaining original
# left/right margins.
widthDelta = width - g.width
leftMargin = g.leftMargin + int(floor(widthDelta / 2))
rightMargin = g.rightMargin + int(ceil(widthDelta / 2))
elif centeringStrategy == 'center':
# Read g.box (effective bounds of the glyph) and truly center the
# glyph, but we could run the risk of losing some intentionally-left or right
# aligned glyph, e.g. "|x |" -> "| x |"
if g.box is not None:
xMin, yMin, xMax, yMax = g.box
graphicWidth = xMax - xMin
leftMargin = round((width - graphicWidth) / 2)
else:
print 'Unexpected centeringStrategy value'
break
# log message
uniname = ''
if g.unicode is not None:
uniname = ' U+%04X' % g.unicode
print 'Adjusting "%s"%s from %g to %g' % (g.name, uniname, g.width, width)
# write changes to glyph
g.lib['interface.gridadjust.original'] = repr({
"rightMargin": g.rightMargin,
"leftMargin": g.leftMargin,
"width": g.width,
})
# order of assignment is probably important
g.rightMargin = int(rightMargin)
g.leftMargin = int(leftMargin)
g.width = int(width)
font.update()
else:
print "No fonts open"
print "Done"

View File

@ -0,0 +1,15 @@
#
# Removes local guides from all glyphs
#
if __name__ == "__main__":
font = CurrentFont()
print "Removing local guides from all glyphs of %r" % font
if font is not None:
for g in font:
if 'com.typemytype.robofont.guides' in g.lib:
del(g.lib['com.typemytype.robofont.guides'])
font.update()
else:
print "No fonts open"
print "Done"

View File

@ -0,0 +1,384 @@
#
# Removes unused glyphs
#
from mojo.roboFont import version
SC_ROMAN = [
"A.smcp",
"B.smcp",
"C.smcp",
"D.smcp",
"E.smcp",
"F.smcp",
"G.smcp",
"H.smcp",
"I.smcp",
"J.smcp",
"K.smcp",
"L.smcp",
"M.smcp",
"N.smcp",
"O.smcp",
"P.smcp",
"Q.smcp",
"R.smcp",
"S.smcp",
"T.smcp",
"U.smcp",
"V.smcp",
"W.smcp",
"X.smcp",
"Y.smcp",
"Z.smcp",
"AE.smcp",
"AEacute.smcp",
"Aacute.smcp",
"Abreve.smcp",
"Acircumflex.smcp",
"Adieresis.smcp",
"Agrave.smcp",
"Alpha.smcp",
"Alphatonos.smcp",
"Amacron.smcp",
"Aogonek.smcp",
"Aogonek.smcp.NAV",
"Aring.smcp",
"Aringacute.smcp",
"Atilde.smcp",
"Beta.smcp",
"Cacute.smcp",
"Ccaron.smcp",
"Ccedilla.smcp",
"Ccircumflex.smcp",
"Chi.smcp",
"Dcaron.smcp",
"Dcroat.smcp",
"Delta.smcp",
"Eacute.smcp",
"Ebreve.smcp",
"Ecaron.smcp",
"Ecircumflex.smcp",
"Edieresis.smcp",
"Edotaccent.smcp",
"Egrave.smcp",
"Emacron.smcp",
"Eng.smcp",
"Eogonek.smcp",
"Eogonek.smcp.NAV",
"Epsilon.smcp",
"Epsilontonos.smcp",
"Eta.smcp",
"Etatonos.smcp",
"Eth.smcp",
"Gamma.smcp",
"Gbreve.smcp",
"Gcircumflex.smcp",
"Gcommaaccent.smcp",
"Germandbls.smcp",
"Hbar.smcp",
"Hcircumflex.smcp",
"IJ.smcp",
"Iacute.smcp",
"Ibreve.smcp",
"Icircumflex.smcp",
"Idieresis.smcp",
"Igrave.smcp",
"Imacron.smcp",
"Iogonek.smcp",
"Iota.smcp",
"Iotadieresis.smcp",
"Iotatonos.smcp",
"Itilde.smcp",
"Jcircumflex.smcp",
"Kappa.smcp",
"Kcommaaccent.smcp",
"Lacute.smcp",
"Lambda.smcp",
"Lcaron.smcp",
"Lcommaaccent.smcp",
"Ldot.smcp",
"Lslash.smcp",
"Nacute.smcp",
"Ncaron.smcp",
"Ncommaaccent.smcp",
"Ntilde.smcp",
"Nu.smcp",
"OE.smcp",
"Oacute.smcp",
"Obreve.smcp",
"Ocircumflex.smcp",
"Odieresis.smcp",
"Ograve.smcp",
"Ohungarumlaut.smcp",
"Omacron.smcp",
"Omega.smcp",
"Omegatonos.smcp",
"Omicron.smcp",
"Omicrontonos.smcp",
"Oogonek.smcp",
"Oogonek.smcp.NAV",
"Oslash.smcp",
"Oslashacute.smcp",
"Otilde.smcp",
"Phi.smcp",
"Pi.smcp",
"Psi.smcp",
"Racute.smcp",
"Rcaron.smcp",
"Rcommaaccent.smcp",
"Rho.smcp",
"Sacute.smcp",
"Scaron.smcp",
"Scedilla.smcp",
"Scircumflex.smcp",
"Sigma.smcp",
"Tau.smcp",
"Tbar.smcp",
"Tcaron.smcp",
"Theta.smcp",
"Thorn.smcp",
"Uacute.smcp",
"Ubreve.smcp",
"Ucircumflex.smcp",
"Udieresis.smcp",
"Ugrave.smcp",
"Uhungarumlaut.smcp",
"Umacron.smcp",
"Uogonek.smcp",
"Upsilon.smcp",
"Upsilondieresis.smcp",
"Upsilontonos.smcp",
"Uring.smcp",
"Utilde.smcp",
"Wacute.smcp",
"Wcircumflex.smcp",
"Wdieresis.smcp",
"Wgrave.smcp",
"Xi.smcp",
"Yacute.smcp",
"Ycircumflex.smcp",
"Ydieresis.smcp",
"Ygrave.smcp",
"Zacute.smcp",
"Zcaron.smcp",
"Zdotaccent.smcp",
"Zeta.smcp",
"ampersand.smcp",
"uni010A.smcp",
"uni0120.smcp",
"uni0162.smcp",
"Scommaaccent.smcp",
"Tcommaaccent.smcp",
"uni037F.smcp"
]
SC_SET1 = [
"zero.smcp",
"one.smcp",
"two.smcp",
"three.smcp",
"four.smcp",
"five.smcp",
"six.smcp",
"seven.smcp",
"eight.smcp",
"nine.smcp",
"Euro.smcp",
"Idotaccent.smcp",
"Mu.smcp",
"dollar.smcp",
"lira.smcp",
"sterling.smcp",
"uni0401.smcp",
"uni0402.smcp",
"uni0403.smcp",
"uni0404.smcp",
"uni0405.smcp",
"uni0406.smcp",
"uni0407.smcp",
"uni0408.smcp",
"uni0409.smcp",
"uni040A.smcp",
"uni040B.smcp",
"uni040C.smcp",
"uni040E.smcp",
"uni040F.smcp",
"uni0410.smcp",
"uni0411.smcp",
"uni0412.smcp",
"uni0413.smcp",
"uni0414.smcp",
"uni0415.smcp",
"uni0416.smcp",
"uni0417.smcp",
"uni0418.smcp",
"uni0419.smcp",
"uni041A.smcp",
"uni041B.smcp",
"uni041C.smcp",
"uni041D.smcp",
"uni041E.smcp",
"uni041F.smcp",
"uni0420.smcp",
"uni0421.smcp",
"uni0422.smcp",
"uni0423.smcp",
"uni0424.smcp",
"uni0425.smcp",
"uni0426.smcp",
"uni0427.smcp",
"uni0428.smcp",
"uni0429.smcp",
"uni042A.smcp",
"uni042B.smcp",
"uni042C.smcp",
"uni042D.smcp",
"uni042E.smcp",
"uni042F.smcp",
"uni0490.smcp",
"uni0492.smcp",
"uni0496.smcp",
"uni0498.smcp",
"uni049A.smcp",
"uni049C.smcp",
"uni04A0.smcp",
"uni04A2.smcp",
"uni04A8.smcp",
"uni04AA.smcp",
"uni04AE.smcp",
"uni04B0.smcp",
"uni04B2.smcp",
"uni04B4.smcp",
"uni04B8.smcp",
"uni04BA.smcp",
"uni04BC.smcp",
"uni04BE.smcp",
"uni04D8.smcp",
"uni04E0.smcp",
"uni04E2.smcp",
"uni04E8.smcp",
"uni04EE.smcp",
"uni20B4.smcp",
"uni20B8.smcp",
"uni20BD.smcp",
"uni2116.smcp",
"yen.smcp"
]
SC_SET2 = [
"I.smcp",
"Sigma.smcp",
"Mu.smcp",
"uni0410.smcp",
"uni0411.smcp",
"uni0412.smcp",
"uni0413.smcp",
"uni0414.smcp",
"uni0415.smcp",
"uni0416.smcp",
"uni0417.smcp",
"uni0418.smcp",
"uni0419.smcp",
"uni041A.smcp",
"uni041B.smcp",
"uni041C.smcp",
"uni041D.smcp",
"uni041E.smcp",
"uni041F.smcp",
"uni0420.smcp",
"uni0421.smcp",
"uni0422.smcp",
"uni0423.smcp",
"uni0424.smcp",
"uni0425.smcp",
"uni0426.smcp",
"uni0427.smcp",
"uni0428.smcp",
"uni0429.smcp",
"uni042A.smcp",
"uni042B.smcp",
"uni042C.smcp",
"uni042D.smcp",
"uni042E.smcp",
"uni042F.smcp",
"uni0401.smcp",
"uni0402.smcp",
"uni0403.smcp",
"uni0404.smcp",
"uni0405.smcp",
"uni0406.smcp",
"uni0407.smcp",
"uni0408.smcp",
"uni0409.smcp",
"uni040A.smcp",
"uni040B.smcp",
"uni040C.smcp",
"uni040E.smcp",
"uni040F.smcp",
"uni0490.smcp",
"uni0492.smcp",
"uni0496.smcp",
"uni0498.smcp",
"uni049A.smcp",
"uni049C.smcp",
"uni04A0.smcp",
"uni04A2.smcp",
"uni04A8.smcp",
"uni04AA.smcp",
"uni04AE.smcp",
"uni04B0.smcp",
"uni04B2.smcp",
"uni04B4.smcp",
"uni04B8.smcp",
"uni04BA.smcp",
"uni04BC.smcp",
"uni04BE.smcp",
"uni04D8.smcp",
"uni04E0.smcp",
"uni04E2.smcp",
"uni04E8.smcp",
"uni04EE.smcp"
]
STRIP_NAME_SET = set(SC_ROMAN).union(SC_SET1).union(SC_SET2)
STRIP_SUFFIXES = (
'.smcp',
'.unic',
'.alt',
'.alt2',
'.ss06',
'.ss07',
'.onum',
'.pnum',
'.tnum'
)
def hasStripSuffix(g):
name = g.name
for suffix in STRIP_SUFFIXES:
if str.endswith(name, suffix):
return True
return False
if __name__ == "__main__":
font = CurrentFont()
if font is not None:
for g in font:
if g.name in STRIP_NAME_SET or hasStripSuffix(g):
if g.unicode is not None:
# glyph maps to a codepoint -- keep it
continue
print 'Removing "%s"' % g.name
font.removeGlyph(g.name)
font.update()
else:
print "No fonts open"
print "Done"

View File

@ -0,0 +1,26 @@
#
# This script changes the width of all glyphs by applying a multiplier.
# It keeps the contours centered as glyphs get wider or tighter.
#
from mojo.roboFont import version
from math import ceil, floor
if __name__ == "__main__":
font = CurrentFont()
print "Resizing glyph margins for %r" % font
if font is not None:
for g in font:
leftMargin = g.leftMargin
rightMargin = g.rightMargin
if leftMargin < 0 or rightMargin < 0:
g.rightMargin = int(max(0, rightMargin))
g.leftMargin = int(max(0, leftMargin))
print("adjust %s" % g.name)
font.update()
else:
print "No fonts open"
print "Done"

25
misc/stems.txt Normal file
View File

@ -0,0 +1,25 @@
================================================================================================
Regular
••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
Horizontal:
220 A B D E F G H L P R T Z two three(center) four five seven
200 a e f t z minus
Vertical:
248 B D E F G H I J K L N P R T U Y one four
236 a b d f g h i j k l m n p q r t u
232 M
================================================================================================
Bold
••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
Horizontal:
380 ?
Vertical:
464 ?

435
misc/svgsync.py Executable file
View File

@ -0,0 +1,435 @@
#!/usr/bin/env python
# encoding: utf8
#
# Sync glyph shapes between SVG and UFO, creating a bridge between UFO and Figma.
#
import os
import sys
import argparse
import re
from xml.dom.minidom import parseString as xmlparseString
# from robofab.world import world, RFont, RGlyph, OpenFont, NewFont
from robofab.objects.objectsRF import RFont, RGlyph, OpenFont, NewFont, RContour
from robofab.objects.objectsBase import MOVE, LINE, CORNER, CURVE, QCURVE, OFFCURVE
font = None # RFont
ufopath = ''
svgdir = ''
effectiveAscender = 0
def num(s):
return int(s) if s.find('.') == -1 else float(s)
def glyphToSVGPath(g, yMul):
commands = {'move':'M','line':'L','curve':'Y','offcurve':'X','offCurve':'X'}
svg = ''
contours = []
if len(g.components):
font.newGlyph('__svgsync')
new = font['__svgsync']
new.width = g.width
new.appendGlyph(g)
new.decompose()
g = new
if len(g):
for c in range(len(g)):
contours.append(g[c])
for i in range(len(contours)):
c = contours[i]
contour = end = ''
curve = False
points = c.points
if points[0].type == 'offCurve':
points.append(points.pop(0))
if points[0].type == 'offCurve':
points.append(points.pop(0))
for x in range(len(points)):
p = points[x]
command = commands[str(p.type)]
if command == 'X':
if curve == True:
command = ''
else:
command = 'C'
curve = True
if command == 'Y':
command = ''
curve = False
if x == 0:
command = 'M'
if p.type == 'curve':
end = ' ' + str(p.x) + ' ' + str(p.y * yMul)
contour += ' ' + command + str(p.x) + ' ' + str(p.y * yMul)
svg += ' ' + contour + end + 'z'
if font.has_key('__svgsync'):
font.removeGlyph('__svgsync')
return svg.strip()
def maybeAddMove(contour, x, y, smooth):
if len(contour.segments) == 0:
contour.appendSegment(MOVE, [(x, y)], smooth=smooth)
svgPathDataRegEx = re.compile(r'(?:([A-Z])\s*|)([0-9\.\-\+eE]+)')
def drawSVGPath(g, d, tr):
yMul = -1
xOffs = tr[0]
yOffs = -(font.info.unitsPerEm - tr[1])
for pathd in d.split('M'):
pathd = pathd.strip()
# print 'pathd', pathd
if len(pathd) == 0:
continue
i = 0
closePath = False
if pathd[-1] == 'z':
closePath = True
pathd = pathd[0:-1]
pv = []
for m in svgPathDataRegEx.finditer('M' + pathd):
if m.group(1) is not None:
pv.append(m.group(1) + m.group(2))
else:
pv.append(m.group(2))
initX = 0
initY = 0
pen = g.getPen()
while i < len(pv):
pd = pv[i]; i += 1
cmd = pd[0]
x = num(pd[1:]) + xOffs
y = (num(pv[i]) + yOffs) * yMul; i += 1
if cmd == 'M':
# print cmd, x, y, '/', num(pv[i-2][1:])
initX = x
initY = y
pen.moveTo((x, y))
continue
if cmd == 'C':
# Bezier curve: "C x1 y1, x2 y2, x y"
x1 = x
y1 = y
x2 = num(pv[i]) + xOffs; i += 1
y2 = (num(pv[i]) + yOffs) * yMul; i += 1
x = num(pv[i]) + xOffs; i += 1
y = (num(pv[i]) + yOffs) * yMul; i += 1
pen.curveTo((x1, y1), (x2, y2), (x, y))
# print cmd, x1, y1, x2, y2, x, y
elif cmd == 'L':
pen.lineTo((x, y))
else:
raise Exception('unexpected SVG path command %r' % cmd)
if closePath:
pen.closePath()
else:
pen.endPath()
# print 'path ended. closePath:', closePath
def glyphToSVG(g):
width = g.width
height = font.info.unitsPerEm
d = {
'name': g.name,
'width': width,
'height': effectiveAscender - font.info.descender,
'effectiveAscender': effectiveAscender,
'leftMargin': g.leftMargin,
'rightMargin': g.rightMargin,
'glyphSVGPath': glyphToSVGPath(g, -1),
'ascender': font.info.ascender,
'descender': font.info.descender,
'baselineOffset': height + font.info.descender,
'unitsPerEm': font.info.unitsPerEm,
}
# for kv in d.iteritems():
# if kv[0] == 'glyphSVGPath':
# print ' %s: ...' % kv[0]
# else:
# print ' %s: %r' % kv
svg = '''
<svg xmlns="http://www.w3.org/2000/svg" width="%(width)d" height="%(height)d">
<g id="%(name)s">
<path d="%(glyphSVGPath)s" transform="translate(0 %(effectiveAscender)d)" />
<rect x="0" y="0" width="%(width)d" height="%(height)d" fill="" stroke="black" />
</g>
</svg>
''' % d
# print svg
return svg.strip()
def _findPathNodes(n, paths, defs, uses, isDef=False):
for cn in n.childNodes:
if cn.nodeName == 'path':
if isDef:
defs[cn.getAttribute('id')] = cn
else:
paths.append(cn)
elif cn.nodeName == 'use':
uses[cn.getAttribute('xlink:href').lstrip('#')] = {'useNode': cn, 'targetNode': None}
elif cn.nodeName == 'defs':
_findPathNodes(cn, paths, defs, uses, isDef=True)
elif not isinstance(cn, basestring) and cn.childNodes and len(cn.childNodes) > 0:
_findPathNodes(cn, paths, defs, uses, isDef)
# return translate
def findPathNodes(n, isDef=False):
paths = []
defs = {}
uses = {}
# <g id="Canvas" transform="translate(-3677 -24988)">
# <g id="six 2">
# <g id="six">
# <g id="Vector">
# <use xlink:href="#path0_fill" transform="translate(3886 25729)"/>
# ...
# <defs>
# <path id="path0_fill" ...
#
_findPathNodes(n, paths, defs, uses)
# flatten uses & defs
for k in uses.keys():
dfNode = defs.get(k)
if dfNode is not None:
v = uses[k]
v['targetNode'] = dfNode
if dfNode.nodeName == 'path':
useNode = v['useNode']
useNode.parentNode.replaceChild(dfNode, useNode)
attrs = useNode.attributes
for k in attrs.keys():
if k != 'xlink:href':
dfNode.setAttribute(k, attrs[k])
paths.append(dfNode)
else:
del defs[k]
return paths
def nodeTranslation(path, x=0, y=0):
tr = path.getAttribute('transform')
if tr is not None:
if not isinstance(tr, basestring):
tr = tr.value
if len(tr) > 0:
m = re.match(r"translate\s*\(\s*(?P<x>[\-\d\.eE]+)[\s,]*(?P<y>[\-\d\.eE]+)\s*\)", tr)
if m is not None:
x += num(m.group('x'))
y += num(m.group('y'))
else:
raise Exception('Unable to handle transform="%s"' % tr)
# m = re.match(r"matrix\s*\(\s*(?P<a>[\-\d\.eE]+)[\s,]*(?P<b>[\-\d\.eE]+)[\s,]*(?P<c>[\-\d\.eE]+)[\s,]*(?P<d>[\-\d\.eE]+)[\s,]*(?P<e>[\-\d\.eE]+)[\s,]*(?P<f>[\-\d\.eE]+)[\s,]*", tr)
# if m is not None:
# a, b, c = num(m.group('a')), num(m.group('b')), num(m.group('c'))
# d, e, f = num(m.group('d')), num(m.group('e')), num(m.group('f'))
# # matrix -1 0 0 -1 -660.719 31947
# print 'matrix', a, b, c, d, e, f
# # matrix(-1 0 -0 -1 -2553 31943)
pn = path.parentNode
if pn is not None and pn.nodeName != '#document':
x, y = nodeTranslation(pn, x, y)
return (x, y)
def glyphUpdateFromSVG(g, svgCode):
doc = xmlparseString(svgCode)
svg = doc.documentElement
paths = findPathNodes(svg)
if len(paths) == 0:
raise Exception('no <path> found in SVG')
path = paths[0]
if len(paths) != 1:
for p in paths:
id = p.getAttribute('id')
if id is not None and id.find('stroke') == -1:
path = p
break
tr = nodeTranslation(path)
d = path.getAttribute('d')
g.clearContours()
drawSVGPath(g, d, tr)
def stat(path):
try:
return os.stat(path)
except OSError as e:
return None
def writeFile(file, s):
with open(file, 'w') as f:
f.write(s)
def writeFileAndMkDirsIfNeeded(file, s):
try:
writeFile(file, s)
except IOError as e:
if e.errno == 2:
os.makedirs(os.path.dirname(file))
writeFile(file, s)
def syncGlyphUFOToSVG(glyphname, svgFile, mtime):
print glyphname + ': UFO -> SVG'
g = font.getGlyph(glyphname)
svg = glyphToSVG(g)
writeFileAndMkDirsIfNeeded(svgFile, svg)
os.utime(svgFile, (mtime, mtime))
print 'write', svgFile
def syncGlyphSVGToUFO(glyphname, svgFile):
print glyphname + ': SVG -> UFO'
svg = ''
with open(svgFile, 'r') as f:
svg = f.read()
g = font.getGlyph(glyphname)
glyphUpdateFromSVG(g, svg)
def findGlifFile(glyphname):
# glyphname.glif
# glyphname_.glif
# glyphname__.glif
# glyphname___.glif
for underscoreCount in range(0, 5):
fn = os.path.join(ufopath, 'glyphs', glyphname + ('_' * underscoreCount) + '.glif')
st = stat(fn)
if st is not None:
return fn, st
if glyphname.find('.') != -1:
# glyph_.name.glif
# glyph__.name.glif
# glyph___.name.glif
for underscoreCount in range(0, 5):
nv = glyphname.split('.')
nv[0] = nv[0] + ('_' * underscoreCount)
ns = '.'.join(nv)
fn = os.path.join(ufopath, 'glyphs', ns + '.glif')
st = stat(fn)
if st is not None:
return fn, st
if glyphname.find('_') != -1:
# glyph_name.glif
# glyph_name_.glif
# glyph_name__.glif
# glyph__name.glif
# glyph__name_.glif
# glyph__name__.glif
# glyph___name.glif
# glyph___name_.glif
# glyph___name__.glif
for x in range(0, 4):
for y in range(0, 5):
ns = glyphname.replace('_', '__' + ('_' * x))
fn = os.path.join(ufopath, 'glyphs', ns + ('_' * y) + '.glif')
st = stat(fn)
if st is not None:
return fn, st
return ('', None)
def syncGlyph(glyphname):
glyphFile, glyphStat = findGlifFile(glyphname)
svgFile = os.path.join(svgdir, glyphname + '.svg')
svgStat = stat(svgFile)
if glyphStat is None and svgStat is None:
raise Exception("glyph %r doesn't exist in UFO or SVG directory" % glyphname)
c = cmp(
0 if glyphStat is None else glyphStat.st_mtime,
0 if svgStat is None else svgStat.st_mtime
)
if c < 0:
syncGlyphSVGToUFO(glyphname, svgFile)
return (glyphFile, svgStat.st_mtime) # glif file in UFO change + it's new mtime
elif c > 0:
syncGlyphUFOToSVG(glyphname, svgFile, glyphStat.st_mtime)
# else:
# print glyphname + ': up to date'
return (None, 0) # UFO did not change
# ————————————————————————————————————————————————————————————————————————
# main
argparser = argparse.ArgumentParser(description='Convert UFO glyphs to SVG')
argparser.add_argument('--svgdir', dest='svgdir', metavar='<dir>', type=str,
default='',
help='Write SVG files to <dir>. If not specified, SVG files are' +
' written to: {dirname(<ufopath>)/svg/<familyname>/<style>')
argparser.add_argument('ufopath', metavar='<ufopath>', type=str,
help='Path to UFO packages')
argparser.add_argument('glyphs', metavar='<glyphname>', type=str, nargs='*',
help='Glyphs to convert. Converts all if none specified.')
args = argparser.parse_args()
ufopath = args.ufopath.rstrip('/')
font = OpenFont(ufopath)
effectiveAscender = max(font.info.ascender, font.info.unitsPerEm)
svgdir = args.svgdir
if len(svgdir) == 0:
svgdir = os.path.join(
os.path.dirname(ufopath),
'svg',
font.info.familyName,
font.info.styleName
)
print 'sync %s (%s)' % (font.info.familyName, font.info.styleName)
glyphnames = args.glyphs if len(args.glyphs) else font.keys()
modifiedGlifFiles = []
for glyphname in glyphnames:
glyphFile, mtime = syncGlyph(glyphname)
if glyphFile is not None:
modifiedGlifFiles.append((glyphFile, mtime))
if len(modifiedGlifFiles) > 0:
print 'Saving font'
font.save()
for glyphFile, mtime in modifiedGlifFiles:
os.utime(glyphFile, (mtime, mtime))
print 'write', glyphFile

626
misc/svgsync2.py Executable file
View File

@ -0,0 +1,626 @@
#!/usr/bin/env python
# encoding: utf8
#
# Sync glyph shapes between SVG and UFO, creating a bridge between UFO and Figma.
#
import os
import sys
import argparse
import re
from StringIO import StringIO
from hashlib import sha256
from xml.dom.minidom import parseString as xmlparseString
from svgpathtools import svg2paths, parse_path, Path, Line, CubicBezier
from base64 import b64encode
# from robofab.world import world, RFont, RGlyph, OpenFont, NewFont
from robofab.objects.objectsRF import RFont, RGlyph, OpenFont, NewFont, RContour
from robofab.objects.objectsBase import MOVE, LINE, CORNER, CURVE, QCURVE, OFFCURVE
font = None # RFont
ufopath = ''
svgdir = ''
effectiveAscender = 0
def num(s):
return int(s) if s.find('.') == -1 else float(s)
def glyphToSVGPath(g, yMul=-1):
commands = {'move':'M','line':'L','curve':'Y','offcurve':'X','offCurve':'X'}
svg = ''
contours = []
if len(g.components):
font.newGlyph('__svgsync')
new = font['__svgsync']
new.width = g.width
new.appendGlyph(g)
new.decompose()
g = new
if len(g):
for c in range(len(g)):
contours.append(g[c])
for i in range(len(contours)):
c = contours[i]
contour = end = ''
curve = False
points = c.points
if points[0].type == 'offCurve':
points.append(points.pop(0))
if points[0].type == 'offCurve':
points.append(points.pop(0))
for x in range(len(points)):
p = points[x]
command = commands[str(p.type)]
if command == 'X':
if curve == True:
command = ''
else:
command = 'C'
curve = True
if command == 'Y':
command = ''
curve = False
if x == 0:
command = 'M'
if p.type == 'curve':
end = ' ' + str(p.x) + ' ' + str(p.y * yMul)
contour += ' ' + command + str(p.x) + ' ' + str(p.y * yMul)
svg += ' ' + contour + end + 'z'
if font.has_key('__svgsync'):
font.removeGlyph('__svgsync')
return svg.strip()
def vec2(x, y):
return float(x) + float(y) * 1j
def glyphToPaths(g, yMul=-1):
paths = []
contours = []
yOffs = -font.info.unitsPerEm
# decompose components
if len(g.components):
font.newGlyph('__svgsync')
ng = font['__svgsync']
ng.width = g.width
ng.appendGlyph(g)
ng.decompose()
g = ng
for c in g:
curve = False
points = c.points
path = Path()
currentPos = 0j
controlPoints = []
for x in range(len(points)):
p = points[x]
# print 'p#' + str(x) + '.type = ' + repr(p.type)
if p.type == 'move':
currentPos = vec2(p.x, (p.y + yOffs) * yMul)
elif p.type == 'offcurve':
controlPoints.append(p)
elif p.type == 'curve':
pos = vec2(p.x, (p.y + yOffs) * yMul)
if len(controlPoints) == 2:
cp1, cp2 = controlPoints
path.append(CubicBezier(
currentPos,
vec2(cp1.x, (cp1.y + yOffs) * yMul),
vec2(cp2.x, (cp2.y + yOffs) * yMul),
pos))
else:
if len(controlPoints) != 1:
raise Exception('unexpected number of control points for curve')
cp = controlPoints[0]
path.append(QuadraticBezier(currentPos, vec2(cp.x, (cp.y + yOffs) * yMul), pos))
currentPos = pos
controlPoints = []
elif p.type == 'line':
pos = vec2(p.x, (p.y + yOffs) * yMul)
path.append(Line(currentPos, pos))
currentPos = pos
paths.append(path)
if font.has_key('__svgsync'):
font.removeGlyph('__svgsync')
return paths
def maybeAddMove(contour, x, y, smooth):
if len(contour.segments) == 0:
contour.appendSegment(MOVE, [(x, y)], smooth=smooth)
svgPathDataRegEx = re.compile(r'(?:([A-Z])\s*|)([0-9\.\-\+eE]+)')
def drawSVGPath(g, d, tr):
yMul = -1
xOffs = tr[0]
yOffs = -(font.info.unitsPerEm - tr[1])
for pathd in d.split('M'):
pathd = pathd.strip()
# print 'pathd', pathd
if len(pathd) == 0:
continue
i = 0
closePath = False
if pathd[-1] == 'z':
closePath = True
pathd = pathd[0:-1]
pv = []
for m in svgPathDataRegEx.finditer('M' + pathd):
if m.group(1) is not None:
pv.append(m.group(1) + m.group(2))
else:
pv.append(m.group(2))
initX = 0
initY = 0
pen = g.getPen()
while i < len(pv):
pd = pv[i]; i += 1
cmd = pd[0]
x = num(pd[1:]) + xOffs
y = (num(pv[i]) + yOffs) * yMul; i += 1
if cmd == 'M':
# print cmd, x, y, '/', num(pv[i-2][1:])
initX = x
initY = y
pen.moveTo((x, y))
continue
if cmd == 'C':
# Bezier curve: "C x1 y1, x2 y2, x y"
x1 = x
y1 = y
x2 = num(pv[i]) + xOffs; i += 1
y2 = (num(pv[i]) + yOffs) * yMul; i += 1
x = num(pv[i]) + xOffs; i += 1
y = (num(pv[i]) + yOffs) * yMul; i += 1
pen.curveTo((x1, y1), (x2, y2), (x, y))
# print cmd, x1, y1, x2, y2, x, y
elif cmd == 'L':
pen.lineTo((x, y))
else:
raise Exception('unexpected SVG path command %r' % cmd)
if closePath:
pen.closePath()
else:
pen.endPath()
# print 'path ended. closePath:', closePath
def glyphToSVG(g, path, hash):
width = g.width
height = font.info.unitsPerEm
d = {
'name': g.name,
'width': width,
'height': effectiveAscender - font.info.descender,
'effectiveAscender': effectiveAscender,
'leftMargin': g.leftMargin,
'rightMargin': g.rightMargin,
'd': path.d(use_closed_attrib=True),
'ascender': font.info.ascender,
'descender': font.info.descender,
'baselineOffset': height + font.info.descender,
'unitsPerEm': font.info.unitsPerEm,
'hash': hash,
}
svg = '''
<svg xmlns="http://www.w3.org/2000/svg" width="%(width)d" height="%(height)d" data-svgsync-hash="%(hash)s">
<g id="%(name)s">
<path d="%(d)s" transform="translate(0 %(effectiveAscender)d)" />
<rect x="0" y="0" width="%(width)d" height="%(height)d" fill="" stroke="black" />
</g>
</svg>
''' % d
# print svg
return svg.strip()
def _findPathNodes(n, paths, defs, uses, isDef=False):
for cn in n.childNodes:
if cn.nodeName == 'path':
if isDef:
defs[cn.getAttribute('id')] = cn
else:
paths.append(cn)
elif cn.nodeName == 'use':
uses[cn.getAttribute('xlink:href').lstrip('#')] = {'useNode': cn, 'targetNode': None}
elif cn.nodeName == 'defs':
_findPathNodes(cn, paths, defs, uses, isDef=True)
elif not isinstance(cn, basestring) and cn.childNodes and len(cn.childNodes) > 0:
_findPathNodes(cn, paths, defs, uses, isDef)
# return translate
def findPathNodes(n, isDef=False):
paths = []
defs = {}
uses = {}
# <g id="Canvas" transform="translate(-3677 -24988)">
# <g id="six 2">
# <g id="six">
# <g id="Vector">
# <use xlink:href="#path0_fill" transform="translate(3886 25729)"/>
# ...
# <defs>
# <path id="path0_fill" ...
#
_findPathNodes(n, paths, defs, uses)
# flatten uses & defs
for k in uses.keys():
dfNode = defs.get(k)
if dfNode is not None:
v = uses[k]
v['targetNode'] = dfNode
if dfNode.nodeName == 'path':
useNode = v['useNode']
useNode.parentNode.replaceChild(dfNode, useNode)
attrs = useNode.attributes
for k in attrs.keys():
if k != 'xlink:href':
dfNode.setAttribute(k, attrs[k])
paths.append(dfNode)
else:
del defs[k]
return paths
def nodeTranslation(path, x=0, y=0):
tr = path.getAttribute('transform')
if tr is not None:
if not isinstance(tr, basestring):
tr = tr.value
if len(tr) > 0:
m = re.match(r"translate\s*\(\s*(?P<x>[\-\d\.eE]+)[\s,]*(?P<y>[\-\d\.eE]+)\s*\)", tr)
if m is not None:
x += num(m.group('x'))
y += num(m.group('y'))
else:
raise Exception('Unable to handle transform="%s"' % tr)
# m = re.match(r"matrix\s*\(\s*(?P<a>[\-\d\.eE]+)[\s,]*(?P<b>[\-\d\.eE]+)[\s,]*(?P<c>[\-\d\.eE]+)[\s,]*(?P<d>[\-\d\.eE]+)[\s,]*(?P<e>[\-\d\.eE]+)[\s,]*(?P<f>[\-\d\.eE]+)[\s,]*", tr)
# if m is not None:
# a, b, c = num(m.group('a')), num(m.group('b')), num(m.group('c'))
# d, e, f = num(m.group('d')), num(m.group('e')), num(m.group('f'))
# # matrix -1 0 0 -1 -660.719 31947
# print 'matrix', a, b, c, d, e, f
# # matrix(-1 0 -0 -1 -2553 31943)
pn = path.parentNode
if pn is not None and pn.nodeName != '#document':
x, y = nodeTranslation(pn, x, y)
return (x, y)
def glyphUpdateFromSVG(g, svgCode):
doc = xmlparseString(svgCode)
svg = doc.documentElement
paths = findPathNodes(svg)
if len(paths) == 0:
raise Exception('no <path> found in SVG')
path = paths[0]
if len(paths) != 1:
for p in paths:
id = p.getAttribute('id')
if id is not None and id.find('stroke') == -1:
path = p
break
tr = nodeTranslation(path)
d = path.getAttribute('d')
g.clearContours()
drawSVGPath(g, d, tr)
def stat(path):
try:
return os.stat(path)
except OSError as e:
return None
def writeFile(file, s):
with open(file, 'w') as f:
f.write(s)
def writeFileAndMkDirsIfNeeded(file, s):
try:
writeFile(file, s)
except IOError as e:
if e.errno == 2:
os.makedirs(os.path.dirname(file))
writeFile(file, s)
def findSvgSyncHashInSVG(svgCode):
# with open(svgFile, 'r') as f:
# svgCode = f.readline(512)
r = re.compile(r'^\s*<svg[^>]+data-svgsync-hash="([^"]*)".+')
m = r.match(svgCode)
if m is not None:
return m.group(1)
return None
def computeSVGHashFromSVG(g):
# h = sha256()
return 'abc123'
def encodePath(o, path):
o.write(path.d())
def hashPaths(paths):
h = sha256()
for path in paths:
h.update(path.d()+';')
return b64encode(h.digest(), '-_')
def svgGetPaths(svgCode):
doc = xmlparseString(svgCode)
svg = doc.documentElement
paths = findPathNodes(svg)
isFigmaSVG = svgCode.find('Figma</desc>') != -1
if len(paths) == 0:
return paths, (0,0)
paths2 = []
for path in paths:
id = path.getAttribute('id')
if not isFigmaSVG or (id is None or id.find('stroke') == -1):
tr = nodeTranslation(path)
d = path.getAttribute('d')
paths2.append((d, tr))
return paths2, isFigmaSVG
def translatePath(path, trX, trY):
pass
def parseSVG(svgFile):
svgCode = None
with open(svgFile, 'r') as f:
svgCode = f.read()
existingSvgHash = findSvgSyncHashInSVG(svgCode)
print 'hash in SVG file:', existingSvgHash
svgPathDefs, isFigmaSVG = svgGetPaths(svgCode)
paths = []
for pathDef, tr in svgPathDefs:
print 'pathDef:', pathDef, 'tr:', tr
path = parse_path(pathDef)
if tr[0] != 0 or tr[1] != 0:
path = path.translated(vec2(*tr))
paths.append(path)
return paths, existingSvgHash
def syncGlyphUFOToSVG(g, glyphFile, svgFile, mtime, hasSvgFile):
# # Let's print out the first path object and the color it was in the SVG
# # We'll see it is composed of two CubicBezier objects and, in the SVG file it
# # came from, it was red
# paths, attributes, svg_attributes = svg2paths(svgFile, return_svg_attributes=True)
# print('svg_attributes:', repr(svg_attributes))
# # redpath = paths[0]
# # redpath_attribs = attributes[0]
# print(paths)
# print(attributes)
# wsvg(paths, attributes=attributes, svg_attributes=svg_attributes, filename=svgFile + '-x.svg')
# existingSVGHash = readSVGHash(svgFile)
svgPaths = None
existingSVGHash = None
if hasSvgFile:
svgPaths, existingSVGHash = parseSVG(svgFile)
print 'existingSVGHash:', existingSVGHash
print 'svgPaths:\n', '\n'.join([p.d() for p in svgPaths])
svgHash = hashPaths(svgPaths)
print 'hash(SVG-glyph) =>', svgHash
# computedSVGHash = computeSVGHashFromSVG(svgFile)
# print 'computeSVGHashFromSVG:', computedSVGHash
ufoPaths = glyphToPaths(g)
print 'ufoPaths:\n', '\n'.join([p.d() for p in ufoPaths])
ufoGlyphHash = hashPaths(ufoPaths)
print 'hash(UFO-glyph) =>', ufoGlyphHash
# svg = glyphToSVG(g, ufoGlyphHash)
# with open('/Users/rsms/src/interface/_local/svgPaths.txt', 'w') as f:
# f.write(svgPaths[0].d())
# with open('/Users/rsms/src/interface/_local/ufoPaths.txt', 'w') as f:
# f.write(ufoPaths[0].d())
# print svgPaths[0].d() == ufoPaths[0].d()
# svgHash = hashPaths()
# print 'hash(UFO-glyph) =>', pathHash
sys.exit(1)
if pathHash == existingSVGHash:
return (None, 0) # did not change
svg = glyphToSVG(g, pathHash)
sys.exit(1)
writeFileAndMkDirsIfNeeded(svgFile, svg)
os.utime(svgFile, (mtime, mtime))
print 'svgsync write', svgFile
g.lib['svgsync.hash'] = pathHash
return (glyphFile, mtime)
def syncGlyphSVGToUFO(glyphname, svgFile):
print glyphname + ': SVG -> UFO'
sys.exit(1)
svg = ''
with open(svgFile, 'r') as f:
svg = f.read()
g = font.getGlyph(glyphname)
glyphUpdateFromSVG(g, svg)
def findGlifFile(glyphname):
# glyphname.glif
# glyphname_.glif
# glyphname__.glif
# glyphname___.glif
for underscoreCount in range(0, 5):
fn = os.path.join(ufopath, 'glyphs', glyphname + ('_' * underscoreCount) + '.glif')
st = stat(fn)
if st is not None:
return fn, st
if glyphname.find('.') != -1:
# glyph_.name.glif
# glyph__.name.glif
# glyph___.name.glif
for underscoreCount in range(0, 5):
nv = glyphname.split('.')
nv[0] = nv[0] + ('_' * underscoreCount)
ns = '.'.join(nv)
fn = os.path.join(ufopath, 'glyphs', ns + '.glif')
st = stat(fn)
if st is not None:
return fn, st
if glyphname.find('_') != -1:
# glyph_name.glif
# glyph_name_.glif
# glyph_name__.glif
# glyph__name.glif
# glyph__name_.glif
# glyph__name__.glif
# glyph___name.glif
# glyph___name_.glif
# glyph___name__.glif
for x in range(0, 4):
for y in range(0, 5):
ns = glyphname.replace('_', '__' + ('_' * x))
fn = os.path.join(ufopath, 'glyphs', ns + ('_' * y) + '.glif')
st = stat(fn)
if st is not None:
return fn, st
return ('', None)
def syncGlyph(glyphname, createSVG=False): # => (glyphname, mtime) or (None, 0) if noop
glyphFile, glyphStat = findGlifFile(glyphname)
svgFile = os.path.join(svgdir, glyphname + '.svg')
svgStat = stat(svgFile)
if glyphStat is None and svgStat is None:
raise Exception("glyph %r doesn't exist in UFO or SVG directory" % glyphname)
c = cmp(
0 if glyphStat is None else glyphStat.st_mtime,
0 if svgStat is None else svgStat.st_mtime
)
g = font.getGlyph(glyphname)
ufoPathHash = g.lib['svgsync.hash'] if 'svgsync.hash' in g.lib else None
print '[syncGlyph] g.lib["svgsync.hash"] =', ufoPathHash
c = 1 # XXX DEBUG
if c < 0:
syncGlyphSVGToUFO(glyphname, svgFile)
return (glyphFile, svgStat.st_mtime) # glif file in UFO change + it's new mtime
elif c > 0 and (svgStat is not None or createSVG):
print glyphname + ': UFO -> SVG'
return syncGlyphUFOToSVG(
g,
glyphFile,
svgFile,
glyphStat.st_mtime,
hasSvgFile=svgStat is not None
)
return (None, 0) # UFO did not change
# ————————————————————————————————————————————————————————————————————————
# main
argparser = argparse.ArgumentParser(description='Convert UFO glyphs to SVG')
argparser.add_argument('--svgdir', dest='svgdir', metavar='<dir>', type=str,
default='',
help='Write SVG files to <dir>. If not specified, SVG files are' +
' written to: {dirname(<ufopath>)/svg/<familyname>/<style>')
argparser.add_argument('ufopath', metavar='<ufopath>', type=str,
help='Path to UFO packages')
argparser.add_argument('glyphs', metavar='<glyphname>', type=str, nargs='*',
help='Glyphs to convert. Converts all if none specified.')
args = argparser.parse_args()
ufopath = args.ufopath.rstrip('/')
font = OpenFont(ufopath)
effectiveAscender = max(font.info.ascender, font.info.unitsPerEm)
svgdir = args.svgdir
if len(svgdir) == 0:
svgdir = os.path.join(
os.path.dirname(ufopath),
'svg',
font.info.familyName,
font.info.styleName
)
print 'svgsync sync %s (%s)' % (font.info.familyName, font.info.styleName)
createSVGs = len(args.glyphs) > 0
glyphnames = args.glyphs if len(args.glyphs) else font.keys()
modifiedGlifFiles = []
for glyphname in glyphnames:
glyphFile, mtime = syncGlyph(glyphname, createSVG=createSVGs)
if glyphFile is not None:
modifiedGlifFiles.append((glyphFile, mtime))
if len(modifiedGlifFiles) > 0:
font.save()
for glyphFile, mtime in modifiedGlifFiles:
os.utime(glyphFile, (mtime, mtime))
print 'svgsync write', glyphFile

9
misc/ttf2woff/.gitignore vendored Normal file
View File

@ -0,0 +1,9 @@
*.o
*.d
*.core
*.obj
*.exe
*~
.DS_Store
ttf2woff

68
misc/ttf2woff/Makefile Normal file
View File

@ -0,0 +1,68 @@
# gmake
NAME = ttf2woff
VERSION = 0.14
BINDIR = /usr/local/bin
PKG=$(NAME)-$(VERSION)
FILES_TTF2WOFF := Makefile ttf2woff.c ttf2woff.h genwoff.c genttf.c readttf.c readttc.c readwoff.c optimize.c \
comp-zlib.c comp-zopfli.c compat.c ttf2woff.rc zopfli.diff
FILES_ZOPFLI := zopfli.h symbols.h \
$(patsubst %,%.h,zlib_container deflate lz77 blocksplitter squeeze hash cache tree util katajainen) \
$(patsubst %,%.c,zlib_container deflate lz77 blocksplitter squeeze hash cache tree util katajainen)
FILES += $(FILES_TTF2WOFF) $(addprefix zopfli/,$(FILES_ZOPFLI))
ZOPFLI = 1
OBJ := ttf2woff.o readttf.o readttc.o readwoff.o genwoff.o genttf.o optimize.o
ifeq ($(ZOPFLI),)
OBJ += comp-zlib.o
else
OBJ += comp-zopfli.o
LDFLAGS += -lm
endif
CFLAGS ?= -O2 -g
LDFLAGS += -lz
# eg. make WIN32=1 CC=mingw32-gcc RC=mingw32-windres
ifdef WIN32
EXE = .exe
CFLAGS += -DNO_ERRWARN
OBJ += compat.o rc.o
endif
ttf2woff$(EXE): $(OBJ)
$(CC) -o $@ $(OBJ) $(LDFLAGS)
ttf2woff.o: ttf2woff.c ttf2woff.h Makefile
$(CC) $(CFLAGS) -DVERSION=$(VERSION) -c ttf2woff.c
comp-zopfli.o: comp-zopfli.c ttf2woff.h $(addprefix zopfli/,$(FILES_ZOPFLI))
$(CC) $(CFLAGS) -c comp-zopfli.c
rc.o: ttf2woff.rc Makefile
$(RC) $(DEF) -DVERNUMS=`echo $(VERSION) | sed 's/\\./,/g; s/[^0-9,]//g'` -DVERSION=$(VERSION) -o $@ ttf2woff.rc
install: ttf2woff
install -s $< $(BINDIR)
clean:
rm -f ttf2woff $(addsuffix .o,$(basename $(filter %.c,$(FILES_TTF2WOFF))))
dist:
ln -s . $(PKG)
tar czf $(PKG).tar.gz --group=root --owner=root $(addprefix $(PKG)/, $(FILES)); \
rm $(PKG)
.PHONY: install clean dist zopfli zopfli.diff
# git://github.com/google/zopfli.git
ZOPFLI_SRC = zopfli-src
zopfli: $(addprefix $(ZOPFLI_SRC)/src/zopfli/,$(FILES_ZOPFLI))
@install -d zopfli
cp -pf $^ zopfli
patch -p3 -dzopfli <zopfli.diff
zopfli.diff:
diff -u --minimal $(ZOPFLI_SRC)/src/zopfli zopfli >$@; true

34
misc/ttf2woff/comp-zlib.c Normal file
View File

@ -0,0 +1,34 @@
/*
* Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <stdlib.h>
#include <zlib.h>
#include "ttf2woff.h"
char *copression_by = "zlib";
int zlib_compress(struct buf *out, struct buf *inp)
{
u8 *b;
int v;
uLongf len;
len = inp->len;
b = my_alloc(inp->len);
v = compress2(b,&len, inp->ptr,inp->len, 9);
if(v==Z_OK && REALLY_SMALLER(len, inp->len)) {
out->ptr = b;
out->len = len;
return 1;
} else {
my_free(b);
return 0;
}
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <stdlib.h>
#include "ttf2woff.h"
#include "zopfli/zlib_container.c"
#include "zopfli/deflate.c"
#include "zopfli/lz77.c"
#include "zopfli/blocksplitter.c"
#include "zopfli/squeeze.c"
#include "zopfli/hash.c"
#include "zopfli/cache.c"
#include "zopfli/tree.c"
#include "zopfli/util.c"
#include "zopfli/katajainen.c"
#define adler32 zlib_adler32
#include <zlib.h>
char *copression_by = "zopfli";
int zlib_compress(struct buf *out, struct buf *inp)
{
ZopfliOptions opt = {0};
u8 *b=0;
size_t sz=0;
opt.numiterations = 15;
ZopfliZlibCompress(&opt, inp->ptr, inp->len, &b, &sz);
if(REALLY_SMALLER(sz, inp->len)) {
/* Trust, but verify */
uLong tmpl = inp->len;
Bytef *tmpb = my_alloc(inp->len);
int v = uncompress(tmpb, &tmpl, b, sz);
if(v!=Z_OK || tmpl!=inp->len)
errx(3,"Zopfli error");
my_free(tmpb);
out->ptr = b;
out->len = sz;
return 1;
} else {
free(b);
return 0;
}
}

43
misc/ttf2woff/compat.c Normal file
View File

@ -0,0 +1,43 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <stdarg.h>
static void er(int s, int e, char *f, va_list *va)
{
// fprintf(stderr, "%s: ", getexecname());
if(f) vfprintf(stderr, f, *va);
va_end(*va);
if(e >= 0) fprintf(stderr, ": %s", strerror(e));
putc('\n', stderr);
if(s >= 0) exit(s);
}
void err(int s, char *f, ...)
{
va_list va;
va_start(va, f);
er(s, errno, f, &va);
}
void errx(int s, char *f, ...)
{
va_list va;
va_start(va, f);
er(s, -1, f, &va);
}
void warn(char *f, ...)
{
va_list va;
va_start(va, f);
er(-1, errno, f, &va);
}
void warnx(char *f, ...)
{
va_list va;
va_start(va, f);
er(-1, -1, f, &va);
}

63
misc/ttf2woff/genttf.c Normal file
View File

@ -0,0 +1,63 @@
/*
* Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <assert.h>
#include "ttf2woff.h"
u8 *put_ttf_header(u8 buf[12], struct ttf *ttf)
{
u8 *p = buf;
int n = ttf->ntables;
p = p32(p, ttf->flavor);
p = p16(p, n);
while(n & n-1) n &= n-1;
p = p16(p, n<<4);
p = p16(p, ffs(n)-1);
p = p16(p, ttf->ntables-n << 4);
return p;
}
void gen_ttf(struct buf *out, struct ttf *ttf)
{
unsigned sfnt_size;
u8 *buf, *p;
int i;
sfnt_size = 12 + 16*ttf->ntables;
for(i=0; i<ttf->ntables; i++) {
struct table *t = ttf->tab_pos[i];
t->pos = sfnt_size; // remember offset in output file
sfnt_size += t->buf.len+3 & ~3;
}
buf = my_alloc(sfnt_size);
p = put_ttf_header(buf, ttf);
for(i=0; i<ttf->ntables; i++) {
struct table *t = &ttf->tables[i];
p = p32(p, t->tag);
p = p32(p, t->csum);
p = p32(p, t->pos);
p = p32(p, t->buf.len);
}
for(i=0; i<ttf->ntables; i++) {
struct table *t = ttf->tab_pos[i];
unsigned sz = t->buf.len;
p = append(p, t->buf.ptr, sz);
while(sz&3) *p++=0, sz++;
}
assert(p == buf+sfnt_size);
out->ptr = buf;
out->len = sfnt_size;
}

95
misc/ttf2woff/genwoff.c Normal file
View File

@ -0,0 +1,95 @@
/*
* Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include "ttf2woff.h"
#define MIN_COMPR 16
void gen_woff(struct buf *out, struct ttf *ttf)
{
unsigned woff_size, sfnt_size;
struct buf meta_comp={0};
u32 meta_off, priv_off;
u8 *buf, *p;
int i;
woff_size = 44 + 20*ttf->ntables;
sfnt_size = 12 + 16*ttf->ntables;
for(i=0; i<ttf->ntables; i++) {
struct table *t = ttf->tab_pos[i];
t->pos = woff_size; // remember offset in output file
t->zbuf = t->buf;
if(t->buf.len >= MIN_COMPR)
zlib_compress(&t->zbuf, &t->buf);
sfnt_size += t->buf.len+3 & ~3;
woff_size += t->zbuf.len+3 & ~3;
}
meta_off = 0;
if(ttf->woff_meta.len >= MIN_COMPR) {
meta_comp = ttf->woff_meta;
zlib_compress(&meta_comp, &ttf->woff_meta);
meta_off = woff_size;
woff_size += meta_comp.len;
}
priv_off = 0;
if(ttf->woff_priv.len) {
priv_off = woff_size;
woff_size += ttf->woff_priv.len;
}
buf = my_alloc(woff_size);
p32(buf, 0x774F4646);
p32(buf+4, ttf->flavor);
p32(buf+8, woff_size);
p16(buf+12, ttf->ntables);
p16(buf+14, 0);
p32(buf+16, sfnt_size);
p32(buf+20, 0); // version ?
p32(buf+24, meta_off);
p32(buf+28, meta_comp.len); // meta len
p32(buf+32, ttf->woff_meta.len); // meta orig len
p32(buf+36, priv_off);
p32(buf+40, ttf->woff_priv.len);
p = buf + 44;
for(i=0; i<ttf->ntables; i++) {
struct table *t = &ttf->tables[i];
p32(p, t->tag);
p32(p+4, t->pos);
p32(p+8, t->zbuf.len);
p32(p+12, t->buf.len);
p32(p+16, t->csum);
p += 20;
}
for(i=0; i<ttf->ntables; i++) {
struct table *t = ttf->tab_pos[i];
u32 sz = t->zbuf.len;
p = append(p, t->zbuf.ptr, sz);
while(sz&3) *p++=0, sz++;
// if(t->zbuf.ptr != t->buf.ptr)
// my_free(t->zbuf.ptr);
}
if(meta_comp.len)
p = append(p, meta_comp.ptr, meta_comp.len);
if(ttf->woff_priv.len)
p = append(p, ttf->woff_priv.ptr, ttf->woff_priv.len);
assert(p == buf+woff_size);
out->ptr = buf;
out->len = woff_size;
}

319
misc/ttf2woff/optimize.c Normal file
View File

@ -0,0 +1,319 @@
/*
* Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include "ttf2woff.h"
struct table *find_table(struct ttf *ttf, char tag[4])
{
u32 tg = g32((u8*)tag);
int i;
for(i=0; i<ttf->ntables; i++)
if(ttf->tables[i].tag == tg)
return &ttf->tables[i];
return 0;
}
static void replace_table(struct table *t, u8 *p, int l)
{
if(t->free_buf)
t->buf.ptr = my_free(t->buf.ptr);
t->free_buf = 1;
t->modified = 1;
t->buf.ptr = p;
t->buf.len = l;
}
static void optimized(struct table *t, struct buf new)
{
if(g.verbose)
echo("Optimized %s table: %u > %u (%d bytes)", t->name, t->buf.len, new.len, new.len-t->buf.len);
replace_table(t, new.ptr, new.len);
}
static void optimize_loca(struct ttf *ttf)
{
struct table *head, *loca, *glyf;
struct buf new;
int i,n;
head = find_table(ttf, "head");
loca = find_table(ttf, "loca");
glyf = find_table(ttf, "glyf");
if(!head || !loca || !glyf)
return;
if(head->buf.len<54 || g16(head->buf.ptr+50)!=1)
return;
if(loca->buf.len&3 || loca->buf.len<4)
return;
// we have 32-bit loca table
if(glyf->buf.len != g32(loca->buf.ptr+loca->buf.len-4))
return;
if(glyf->buf.len >= 1<<16)
return;
n = loca->buf.len>>2;
new.len = 2*n;
new.ptr = my_alloc(new.len);
for(i=0;i<n;i++) {
u32 o = g32(loca->buf.ptr+4*i);
if(o&1) {
echo("Bad offset in loca");
my_free(new.ptr);
return;
}
p16(new.ptr+2*i, o>>1);
}
optimized(loca, new);
p16(head->buf.ptr+50, 0);
head->modified = 1;
}
static int overlap(struct buf a, struct buf b)
{
int o = a.len<b.len ? a.len : b.len;
while(o) {
if(memcmp(a.len-o+a.ptr, b.ptr, o)==0)
break;
o--;
}
return o;
}
static u8 *bufbuf(struct buf a, struct buf b)
{
u8 *p=a.ptr, *e=a.ptr+a.len-b.len;
while(p<=e) {
if(memcmp(p,b.ptr,b.len)==0)
return p;
p++;
}
return 0;
}
static int name_cmp_len(const void *va, const void *vb) {
struct buf a = *(struct buf*)va;
struct buf b = *(struct buf*)vb;
int d = a.len - b.len;
if(!d) d = memcmp(a.ptr, b.ptr, a.len);
return d;
}
static void optimize_name(struct ttf *ttf)
{
struct table *name = find_table(ttf, "name");
struct buf str, new;
struct buf *ent;
u8 *p;
int count,n,i;
if(!name || name->buf.len<6+2*12+1 || g16(name->buf.ptr))
return;
n = g16(name->buf.ptr+4); // stringOffset
if(name->buf.len < n)
goto corrupted;
str.ptr = name->buf.ptr+n;
str.len = name->buf.len-n;
count = g16(name->buf.ptr+2);
if(name->buf.len < 6+12*count) {
corrupted:
echo("Name table corrupted");
return;
}
n = count;
ent = my_alloc(n * sizeof *ent);
p = name->buf.ptr+6;
for(i=0; i<n; i++) {
unsigned l = g16(p+8);
unsigned o = g16(p+10);
if(o+l > str.len) {
echo("Bad string location in name table");
my_free(ent);
return;
}
if(l) {
ent[i].ptr = str.ptr + o;
ent[i].len = l;
}
p += 12;
}
qsort(ent, n, sizeof *ent, name_cmp_len);
for(;;) {
int j,mo,mi,mj;
struct buf a, b, c;
mo = 0;
for(j=0;j<n;j++) for(i=1;i<n;i++) if(i!=j) {
int o;
a = ent[i];
b = ent[j];
if(bufbuf(a,b))
goto remove_b;
o = overlap(a,b);
if(o > mo) {
mo = o;
mi = i;
mj = j;
}
}
if(!mo)
break;
a = ent[mi];
b = ent[mj];
c.len = a.len + b.len - mo;
c.ptr = my_alloc(c.len);
p = append(c.ptr, a.ptr, a.len);
append(p, b.ptr+mo, b.len-mo);
if(a.ptr<str.ptr || a.ptr>=str.ptr+str.len)
my_free(a.ptr);
i = mi<mj ? mi : mj;
j = mi<mj ? mj : mi;
ent[i] = c;
remove_b:
if(b.ptr<str.ptr || b.ptr>=str.ptr+str.len)
my_free(b.ptr);
n--;
while(j < n) ent[j]=ent[j+1], j++;
}
{
int sz = 6 + 12*count;
for(i=0;i<n;i++)
sz += ent[i].len;
if(sz >= name->buf.len) {
my_free(ent);
return;
}
new.len = sz;
new.ptr = my_alloc(sz);
p = new.ptr + 6 + 12*count;
for(i=0;i<n;i++) {
struct buf a = ent[i];
memcpy(p,a.ptr,a.len); p+=a.len;
if(a.ptr<str.ptr || a.ptr>=str.ptr+str.len)
my_free(a.ptr);
}
assert(p == new.ptr+new.len);
}
my_free(ent);
memcpy(new.ptr, name->buf.ptr, 6+12*count);
p16(new.ptr+4,6+12*count);
{
struct buf newstr;
newstr.ptr = new.ptr + 6+12*count;
newstr.len = new.len - 6+12*count;
p = new.ptr + 6 + 10;
for(i=0;i<count;i++) {
struct buf a = {str.ptr+g16(p), g16(p-2)};
u8 *s = bufbuf(newstr, a);
assert(s);
p16(p, s-newstr.ptr);
p += 12;
}
}
#ifndef NDEBUG
for(i=0; i<count; i++) {
u8 *p0 = name->buf.ptr;
u8 *p1 = new.ptr;
p0 += g16(p0+4) + g16(p0+6+12*i+10);
p1 += g16(p1+4) + g16(p1+6+12*i+10);
assert(!memcmp(p0,p1,g16(new.ptr+6+12*i+8)));
}
#endif
optimized(name, new);
}
static void optimize_hmtx(struct ttf *ttf)
{
struct table *hhea, *hmtx;
struct buf buf;
u8 *p, *q;
int nlhm,adv,n;
hhea = find_table(ttf, "hhea");
hmtx = find_table(ttf, "hmtx");
if(!hhea || !hmtx || hhea->buf.len < 36 || g32(hhea->buf.ptr)!=0x10000)
return;
nlhm = g16(hhea->buf.ptr + 34);
buf = hmtx->buf;
if(!nlhm || buf.len&1 || buf.len < 4*nlhm) {
return;
}
if(nlhm<2)
return;
p = buf.ptr + 4*(nlhm-1);
adv = g16(p);
for(n=nlhm; n>1; n--) {
p -= 4;
if(adv != g16(p))
break;
}
if(n < nlhm) {
struct buf new;
int i, nent = (buf.len>>1) - nlhm;
new.len = 2*nent + 2*n;
new.ptr = my_alloc(new.len);
p = append(new.ptr, buf.ptr, n<<2);
q = buf.ptr + (n<<2);
for(i=n; i<nlhm; i++) {
p = p16(p, g16(q+2));
q += 4;
}
p = append(p, q, buf.ptr+buf.len-q);
assert(p == new.ptr+new.len);
optimized(hmtx, new);
p16(hhea->buf.ptr+34, n);
hhea->modified = 1;
}
}
void optimize(struct ttf *ttf)
{
optimize_loca(ttf);
optimize_name(ttf);
optimize_hmtx(ttf);
}

29
misc/ttf2woff/readttc.c Normal file
View File

@ -0,0 +1,29 @@
/*
* Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <string.h>
#include <stdlib.h>
#include "ttf2woff.h"
void read_ttc(struct ttf *ttf, u8 *data, size_t length, int fontn)
{
unsigned n, o;
if(length < 16+12+16) BAD_FONT;
n = g32(data+8);
if(length < 16+(4+12+16)*n) BAD_FONT;
if(fontn<0 || fontn>=n)
errx(1, "No font #%d in collection",fontn);
o = g32(data+12+4*fontn);
if(o >= length) BAD_FONT;
read_ttf(ttf, data, length, o);
}

47
misc/ttf2woff/readttf.c Normal file
View File

@ -0,0 +1,47 @@
/*
* Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <string.h>
#include <stdlib.h>
#include "ttf2woff.h"
void read_ttf(struct ttf *ttf, u8 *data, size_t length, unsigned start)
{
int i;
u8 *p;
if(length-start<+12+16)
BAD_FONT;
ttf->flavor = g32(data+start);
// XXX check type 'true', or ...
ttf->ntables = g16(data+start+4);
if(!ttf->ntables || length-start<=12+16*ttf->ntables)
BAD_FONT;
alloc_tables(ttf);
p = data+start+12;
for(i=0; i<ttf->ntables; i++) {
struct table *t = &ttf->tables[i];
u32 off=g32(p+8), len=g32(p+12);
if((off|len)>length || off+len>length)
BAD_FONT;
t->tag = g32(p);
t->csum = g32(p+4);
t->pos = off;
t->buf.ptr = data + off;
t->buf.len = len;
name_table(t);
// echo("%5X %5X %s", off, len, t->name);
p += 16;
}
}

88
misc/ttf2woff/readwoff.c Normal file
View File

@ -0,0 +1,88 @@
/*
* Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <string.h>
#include <stdlib.h>
#include <zlib.h>
#include "ttf2woff.h"
static struct buf get_or_inflate(u8 *p, size_t len, size_t orig_len)
{
struct buf buf;
uLongf blen;
char *m;
int v;
if(len == orig_len) {
buf.ptr = p;
buf.len = len;
return buf;
}
buf.len = orig_len;
buf.ptr = my_alloc(orig_len);
blen = buf.len;
v = uncompress(buf.ptr, &blen, p, len);
switch(v) {
case Z_OK:
if(blen==buf.len)
return buf;
case Z_MEM_ERROR: m = "BAD_FONT uncompressed length"; break;
case Z_DATA_ERROR: m = "Data corrupted"; ; break;
default: m = "Error";
}
errx(3, "zlib: %s", m);
}
void read_woff(struct ttf *ttf, u8 *data, size_t length)
{
u8 *p;
int i;
if(length<=44+20) BAD_FONT;
ttf->flavor = g32(data+4);
if(g32(data+8) != length) BAD_FONT;
ttf->ntables = g16(data+12);
if(!ttf->ntables) BAD_FONT;
{
u32 len=g32(data+28), off;
ttf->woff_meta.len = 0;
if(len) {
off = g32(data+24);
if((off|len)>length || off+len>length)
BAD_FONT;
ttf->woff_meta = get_or_inflate(data+off, len, g32(data+32));
}
}
ttf->woff_priv.len = g32(data+40);
ttf->woff_priv.ptr = ttf->woff_priv.len ? data+g32(data+36) : 0;
alloc_tables(ttf);
p = data+44;
for(i=0; i<ttf->ntables; i++) {
struct table *t = &ttf->tables[i];
u32 off=g32(p+4), len=g32(p+8);
if((off|len)>length || off+len>length)
BAD_FONT;
t->tag = g32(p);
t->csum = g32(p+16);
t->pos = off;
t->free_buf = 1;
t->buf = get_or_inflate(data+off, len, g32(p+12));
name_table(t);
p += 20;
}
}

523
misc/ttf2woff/ttf2woff.c Normal file
View File

@ -0,0 +1,523 @@
/*
* Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <stdlib.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <getopt.h>
#include <stdio.h>
#include <stdarg.h>
#include <strings.h>
#include <errno.h>
#include "ttf2woff.h"
#ifndef O_BINARY
#define O_BINARY 0
#endif
struct flags g;
void echo(char *f, ...)
{
FILE *o = g.stdout_used ? stderr : stdout;
va_list va;
va_start(va, f);
vfprintf(o, f, va);
va_end(va);
fputc('\n',o);
}
void *my_alloc(size_t sz)
{
void *p = malloc(sz);
if(!p) errx(1,"Out of memory");
return p;
}
void *my_free(void *p)
{
free(p);
return 0;
}
void *my_realloc(void *p, size_t sz)
{
p = realloc(p, sz);
if(!p) errx(1,"Out of memory");
return p;
}
static struct buf read_file(char *path)
{
struct buf file = {0};
int v, fd = 0;
if(path[0]!='-' || path[1]) {
fd = open(path, O_RDONLY|O_BINARY);
if(fd<0)
err(1, "%s", path);
}
{
struct stat st;
if(fstat(fd, &st) < 0)
err(1, "fstat");
file.len = st.st_size;
}
if(file.len) {
file.ptr = malloc(file.len);
v = read(fd, file.ptr, file.len);
if(v < file.len) {
if(v<0) err(1, "read");
errx(1, "Truncated");
}
} else {
size_t alen = 0;
file.ptr = 0;
for(;;) {
if(file.len == alen) {
if(alen > 64<<20)
errx(1,"Too much data - aborting");
alen += 1<<16;
file.ptr = my_realloc(file.ptr, alen);
}
v = read(fd, file.ptr+file.len, alen-file.len);
if(v<=0) {
if(v) err(1, "read");
break;
}
file.len += v;
}
}
if(fd) close(fd);
return file;
}
static int open_temporary(char *pt, char **pnm)
{
int l = strlen(pt);
char *nm = malloc(l+5);
char *p = nm + l;
int i, fd;
memcpy(nm, pt, l);
*p++ = '.';
for(i=0;;) {
sprintf(p, "%d", i);
fd = open(nm, O_WRONLY|O_TRUNC|O_CREAT|O_BINARY|O_EXCL, 0666);
if(fd>=0)
break;
if(errno!=EEXIST)
err(1, "%s", nm);
if(++i>999)
errx(1, "Can't create temporary file");
}
*pnm = nm;
return fd;
}
void alloc_tables(struct ttf *ttf)
{
int sz = ttf->ntables*sizeof *ttf->tables;
ttf->tables = my_alloc(sz);
memset(ttf->tables, 0, sz);
}
void name_table(struct table *t) {
char *d = t->name;
int i;
for(i=24; i>=0; i-=8) {
char c = t->tag>>i;
if(c>' ' && c<127)
*d++ = c;
}
*d = 0;
}
static u32 calc_csum(u8 *p, size_t n)
{
u32 s=0;
if(n) for(;;) {
s += p[0]<<24;
if(!--n) break;
s += p[1]<<16;
if(!--n) break;
s += p[2]<<8;
if(!--n) break;
s += p[3];
if(!--n) break;
p += 4;
}
return s;
}
enum {
tag_head = 0x68656164,
tag_DSIG = 0x44534947
};
static void recalc_checksums(struct ttf *ttf)
{
u8 h[12];
u32 font_csum, off;
int i, modified;
struct table *head = 0;
struct table *DSIG = 0;
modified = ttf->modified;
for(i=0; i<ttf->ntables; i++) {
struct table *t = ttf->tab_pos[i];
u8 *p = t->buf.ptr;
u32 csum;
if(t->tag == tag_DSIG && t->buf.len>8)
DSIG = t;
if(t->tag != tag_head)
csum = calc_csum(p, t->buf.len);
else {
head = t;
csum = calc_csum(p, 8);
csum += calc_csum(p+12, t->buf.len-12);
}
modified |= t->modified;
if(csum != t->csum) {
modified = 1;
t->csum = csum;
if(!t->modified)
echo("Corrected checksum of table %s", t->name);
}
}
if(modified && DSIG) {
remove_signature:
if(DSIG->free_buf)
free(DSIG->buf.ptr);
DSIG->buf.len = 8;
DSIG->buf.ptr = (u8*)"\0\0\0\1\0\0\0"; // empty DSIG
DSIG->free_buf = 0;
DSIG->csum = calc_csum(DSIG->buf.ptr, DSIG->buf.len);
DSIG = 0;
if(g.verbose)
echo("Digital signature removed");
}
put_ttf_header(h, ttf);
font_csum = calc_csum(h, 12);
off = 12 + 16*ttf->ntables;
for(i=0; i<ttf->ntables; i++) {
struct table *t = ttf->tab_pos[i];
font_csum += t->tag + t->csum + off + t->buf.len;
font_csum += t->csum;
off += t->buf.len+3 & ~3;
}
if(!head || head->buf.len<16)
errx(1, "No head table");
{
u8 *p = head->buf.ptr + 8;
font_csum = 0xB1B0AFBA - font_csum;
if(font_csum != g32(p)) {
if(DSIG)
goto remove_signature;
p32(p, font_csum);
if(!modified)
echo("Corrected checkSumAdjustment");
}
}
}
static int usage(FILE *f, int y)
{
if(!y) {
fprintf(f, "usage:"
"\tttf2woff [-v] font.ttf [font.woff]\n"
"\tttf2woff [-v] font.woff [font.ttf]\n"
"\tttf2woff [-v] -i font\n"
"\tttf2woff -h\n");
} else {
fprintf(f,"TTF2WOFF "STR(VERSION)" by Jan Bobrowski\n"
"usage:\n"
" ttf2woff [-v] [-O|-S] [-t type] [-X table]... [-m file] [-p file] [-u font] input [output]\n"
" ttf2woff -i [-v] [-O|-S] [-X table]... [-m file] [-p file] file\n"
" ttf2woff -l input\n"
" -v be verbose\n"
" -i in place modification\n"
" -O optimize (default unless signed)\n"
" -S don't optimize\n"
" -t fmt output format: woff, ttf\n"
" -u num font number in collection (TTC), 0-based\n"
" -m xml metadata\n"
" -p priv private data\n"
" -X tag remove table\n"
" -l list tables\n"
"Use `-' to indicate standard input/output.\n"
"Skip output for dry run.\n"
"Compressor: %s.\n",
copression_by);
}
return 1;
}
static int type_by_name(char *s)
{
if(strcasecmp(s,"TTF")==0 || strcasecmp(s,"OTF")==0) return fmt_TTF;
if(strcasecmp(s,"WOFF")==0) return fmt_WOFF;
return fmt_UNKNOWN;
}
static int cmp_tab_pos(const void *a, const void *b) {
return (*(struct table**)a)->pos - (*(struct table**)b)->pos;
}
int main(int argc, char *argv[])
{
struct ttf ttf = {0};
char *iname, *itype_name, *oname, *otype_name, *mname=0, *pname=0;
struct buf input, output;
struct buf xtab = {0};
int i, v, itype, fontn;
g.otype = fmt_UNKNOWN;
g.dryrun = 1; // no output
g.mayoptim = 1;
fontn = 0;
for(;;) switch(getopt(argc, argv, "vt:u:SOX:lm:p:ihV")) {
case 'v': g.verbose = 1; break;
case 'l': g.listonly = 1; break;
case 'i': g.inplace = 1; break;
case 't':
v = type_by_name(optarg);
if(v==fmt_UNKNOWN)
errx(1, "Unsupported font type: %s", optarg);
g.otype = v;
break;
case 'u':
fontn = atoi(optarg);
break;
case 'S':
g.mayoptim = g.optimize = 0;
break;
case 'O':
g.mayoptim = g.optimize = 1;
break;
case 'X':
v = strlen(optarg) + 1;
xtab.ptr = my_realloc(xtab.ptr, xtab.len+v);
strcpy(xtab.ptr+xtab.len, optarg);
xtab.len += v;
break;
case 'm': mname = optarg; break;
case 'p': pname = optarg; break;
case '?':
if(optopt!='?')
break;
case 'h': return usage(stdout,1);
case 'V': printf(STR(VERSION)"\n"); return 0;
case -1: goto gotopt;
}
gotopt:
if(optind==argc)
return usage(stderr,0);
iname = argv[optind++];
oname = 0;
if(g.inplace) {
if(iname[0]=='-' && !iname[1])
errx(1, "-i is not compatible with -");
g.dryrun = 0;
if(optind < argc)
warnx("Too many args");
}
if(optind < argc) {
g.dryrun = 0;
oname = argv[optind++];
if(optind < argc)
warnx("Too many args");
if(oname[0]=='-' && !oname[1]) {
oname = 0;
g.stdout_used = 1;
} else if(g.otype==fmt_UNKNOWN) {
char *p = strrchr(oname, '.');
if(p)
g.otype = type_by_name(p+1);
}
}
input = read_file(iname);
if(input.len < 28)
errx(1,"File too short");
itype = fmt_UNKNOWN;
if(g32(input.ptr) == g32("wOFF")) {
read_woff(&ttf, input.ptr, input.len);
itype_name = "WOFF";
itype = fmt_WOFF;
} else if(g32(input.ptr) == g32("ttcf")) {
if(g.inplace)
errx(1, "Can't optimize collection");
read_ttc(&ttf, input.ptr, input.len, fontn);
itype_name = "TTC";
} else if(g32(input.ptr) == g32("wOF2")) {
errx(1, "WOFF2 is not supported");
} else {
read_ttf(&ttf, input.ptr, input.len, 0);
itype_name = "TTF";
itype = fmt_TTF;
}
if(g.inplace)
g.otype = itype;
if(g.otype==fmt_UNKNOWN || g.otype==fmt_WOFF) {
g.otype = fmt_WOFF;
if(mname)
ttf.woff_meta = read_file(mname);
if(pname)
ttf.woff_priv = read_file(pname);
}
// all read
if(xtab.len) {
char *p=xtab.ptr, *e=p+xtab.len;
for(; p<e; p=strchr(p,0)+1) {
struct table *t;
struct buf *b;
if(strcmp(p,"metadata")==0) {
b = &ttf.woff_meta;
rm_meta:
if(b->len) {
b->len = 0;
ttf.modified_meta = 1;
}
continue;
}
if(strcmp(p,"private")==0) {
b = &ttf.woff_priv;
goto rm_meta;
}
for(i=0; i<ttf.ntables; i++) {
t = &ttf.tables[i];
if(strcmp(t->name, p)==0)
goto rm_tab;
}
echo("Table %s not found", p);
if(0) {
rm_tab:
memmove(t, t+1, (char*)(ttf.tables+ttf.ntables) - (char*)(t+1));
ttf.ntables--;
ttf.modified = 1;
if(g.verbose)
echo("Table %s removed", p);
}
}
free(xtab.ptr);
}
ttf.tab_pos = malloc(ttf.ntables * sizeof *ttf.tab_pos);
for(i=0; i<ttf.ntables; i++)
ttf.tab_pos[i] = &ttf.tables[i];
qsort(ttf.tab_pos, ttf.ntables, sizeof *ttf.tab_pos, cmp_tab_pos);
if(g.listonly) {
unsigned size = 12 + 16*ttf.ntables;
for(i=0; i<ttf.ntables; i++) {
struct table *t = ttf.tab_pos[i];
size += t->buf.len;
echo("%-4s %6u", t->name, t->buf.len);
}
echo("%-4s %6u", "", size);
return 0;
}
if(!ttf.modified) {
struct table *t = find_table(&ttf, "DSIG");
if(t && t->buf.len>8)
g.mayoptim = g.optimize;
}
if(g.mayoptim)
optimize(&ttf);
recalc_checksums(&ttf);
switch(g.otype) {
case fmt_TTF:
gen_ttf(&output, &ttf);
otype_name = "TTF";
break;
case fmt_WOFF:
gen_woff(&output, &ttf);
otype_name = "WOFF";
break;
}
if(g.verbose || g.dryrun)
echo("input: %s %u bytes, output: %s %u bytes (%.1f%%)",
itype_name, input.len, otype_name, output.len, 100.*output.len/input.len);
if(g.dryrun)
return 0;
if(g.inplace && !ttf.modified && !ttf.modified_meta) {
if(output.len >= input.len) {
if(g.verbose)
echo("Not modified");
return 0;
}
}
{
u8 *p=output.ptr, *e=p+output.len;
int fd = 1;
if(g.inplace)
fd = open_temporary(iname, &oname);
else if(oname) {
fd = open(oname, O_WRONLY|O_TRUNC|O_CREAT|O_BINARY, 0666);
if(fd<0) err(1, "%s", oname);
}
do {
v = write(fd, p, e-p);
if(v<=0) {
if(v) err(1, "write");
errx(1, "Short write");
}
p += v;
} while(p < e);
close(fd);
}
if(g.inplace) {
#ifdef WIN32
unlink(iname);
#endif
v = rename(oname, iname);
if(v<0) {
warn("Rename %s to %s", oname, iname);
unlink(oname);
return 1;
}
// free(oname);
}
return 0;
}

94
misc/ttf2woff/ttf2woff.h Normal file
View File

@ -0,0 +1,94 @@
#include <sys/types.h>
#include <string.h>
#pragma clang diagnostic ignored "-Wshift-op-parentheses"
#pragma clang diagnostic ignored "-Wpointer-sign"
#ifndef NO_ERRWARN
#include <err.h>
#else
void err(int,char*,...);
void errx(int,char*,...);
void warn(char*,...);
void warnx(char*,...);
#endif
enum {
fmt_UNKNOWN=0,
fmt_TTF,
fmt_WOFF
};
extern struct flags {
unsigned otype:8;
unsigned stdout_used:1;
unsigned verbose:1;
unsigned mayoptim:1;
unsigned optimize:1;
unsigned dryrun:1;
unsigned inplace:1;
unsigned listonly:1;
} g;
void echo(char *, ...);
typedef unsigned char u8;
typedef unsigned int u32;
static inline int g16(u8 *p) {return p[0]<<8 | p[1];}
static inline u32 g32(u8 *p) {return (u32)p[0]<<24 | p[1]<<16 | p[2]<<8 | p[3];}
static inline u8 *p16(u8 *p, int v) {p[0]=v>>8; p[1]=v; return p+2;}
static inline u8 *p32(u8 *p, u32 v) {p[0]=v>>24; p[1]=v>>16; p[2]=v>>8; p[3]=v; return p+4;}
static inline u8 *append(u8 *d, u8 *s, size_t n) {u8 *p=d+n; memcpy(d,s,n); return p;}
struct buf {
u8 *ptr;
unsigned len;
};
struct table {
u32 tag;
unsigned modified:1;
unsigned free_buf:1;
struct buf buf;
u32 csum;
u32 pos;
char name[8];
struct buf zbuf;
};
struct ttf {
u32 flavor;
int ntables;
unsigned modified:1;
unsigned modified_meta:1; // WOFF meta & priv
struct table *tables; // sorted by name
struct table **tab_pos; // sorted by file pos
struct buf woff_meta, woff_priv;
};
void alloc_tables(struct ttf *ttf);
void name_table(struct table *t);
u8 *put_ttf_header(u8 buf[12], struct ttf *ttf);
struct table *find_table(struct ttf *ttf, char tag[4]);
void optimize(struct ttf *ttf);
void read_ttf(struct ttf *ttf, u8 *data, size_t length, unsigned offset);
void read_ttc(struct ttf *ttf, u8 *data, size_t length, int fontn);
void read_woff(struct ttf *ttf, u8 *data, size_t length);
void gen_woff(struct buf *out, struct ttf *ttf);
void gen_ttf(struct buf *out, struct ttf *ttf);
#define BAD_FONT errx(2, "Bad font (%s:%d)",__FILE__,__LINE__)
int zlib_compress(struct buf *out, struct buf *inp);
extern char *copression_by;
#define _STR(X) #X
#define STR(X) _STR(X)
#define REALLY_SMALLER(A,B) (((A)+3&~3)<((B)+3&~3))
void *my_alloc(size_t sz);
void *my_free(void *p);
void *my_realloc(void *p, size_t sz);

39
misc/ttf2woff/ttf2woff.rc Normal file
View File

@ -0,0 +1,39 @@
#include <winver.h>
#define _STR(S) #S
#define STR(S) _STR(S)
#define Z "\0"
#define _FOUR(A,B,C,D,E...) A,B,C,D
#define FOUR(A...) _FOUR(A,0,0,0)
#ifdef __GNUC__
VS_VERSION_INFO VERSIONINFO
#else
VS_VERSION_INFO VERSIONINFO MOVEABLE IMPURE LOADONCALL DISCARDABLE
#endif
FILEVERSION FOUR(VERNUMS)
PRODUCTVERSION FOUR(VERNUMS)
FILEFLAGS 0
FILEOS VOS__WINDOWS32
FILETYPE VFT_APP
FILESUBTYPE 0
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "00000000"
BEGIN
VALUE "FileDescription", "WOFF font converter" Z
VALUE "FileVersion", STR(VERSION) Z
VALUE "InternalName", "ttf2woff" Z
VALUE "LegalCopyright", "GPL" Z
VALUE "OriginalFilename", "ttf2woff.exe" Z
VALUE "ProductName", "TTF2WOFF" Z
VALUE "ProductVersion", STR(VERSION) Z
VALUE "URL", "http://wizard.ae.krakow.pl/~jb/ttf2woff/" Z
VALUE "Author", "Jan Bobrowski" Z
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0,0
END
END

View File

@ -0,0 +1,332 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
#include "blocksplitter.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "deflate.h"
#include "squeeze.h"
#include "tree.h"
#include "util.h"
/*
The "f" for the FindMinimum function below.
i: the current parameter of f(i)
context: for your implementation
*/
typedef double FindMinimumFun(size_t i, void* context);
/*
Finds minimum of function f(i) where is is of type size_t, f(i) is of type
double, i is in range start-end (excluding end).
Outputs the minimum value in *smallest and returns the index of this value.
*/
static size_t FindMinimum(FindMinimumFun f, void* context,
size_t start, size_t end, double* smallest) {
if (end - start < 1024) {
double best = ZOPFLI_LARGE_FLOAT;
size_t result = start;
size_t i;
for (i = start; i < end; i++) {
double v = f(i, context);
if (v < best) {
best = v;
result = i;
}
}
*smallest = best;
return result;
} else {
/* Try to find minimum faster by recursively checking multiple points. */
#define NUM 9 /* Good value: 9. */
size_t i;
size_t p[NUM];
double vp[NUM];
size_t besti;
double best;
double lastbest = ZOPFLI_LARGE_FLOAT;
size_t pos = start;
for (;;) {
if (end - start <= NUM) break;
for (i = 0; i < NUM; i++) {
p[i] = start + (i + 1) * ((end - start) / (NUM + 1));
vp[i] = f(p[i], context);
}
besti = 0;
best = vp[0];
for (i = 1; i < NUM; i++) {
if (vp[i] < best) {
best = vp[i];
besti = i;
}
}
if (best > lastbest) break;
start = besti == 0 ? start : p[besti - 1];
end = besti == NUM - 1 ? end : p[besti + 1];
pos = p[besti];
lastbest = best;
}
*smallest = lastbest;
return pos;
#undef NUM
}
}
/*
Returns estimated cost of a block in bits. It includes the size to encode the
tree and the size to encode all literal, length and distance symbols and their
extra bits.
litlens: lz77 lit/lengths
dists: ll77 distances
lstart: start of block
lend: end of block (not inclusive)
*/
static double EstimateCost(const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend) {
return ZopfliCalculateBlockSizeAutoType(lz77, lstart, lend);
}
typedef struct SplitCostContext {
const ZopfliLZ77Store* lz77;
size_t start;
size_t end;
} SplitCostContext;
/*
Gets the cost which is the sum of the cost of the left and the right section
of the data.
type: FindMinimumFun
*/
static double SplitCost(size_t i, void* context) {
SplitCostContext* c = (SplitCostContext*)context;
return EstimateCost(c->lz77, c->start, i) + EstimateCost(c->lz77, i, c->end);
}
static void AddSorted(size_t value, size_t** out, size_t* outsize) {
size_t i;
ZOPFLI_APPEND_DATA(value, out, outsize);
for (i = 0; i + 1 < *outsize; i++) {
if ((*out)[i] > value) {
size_t j;
for (j = *outsize - 1; j > i; j--) {
(*out)[j] = (*out)[j - 1];
}
(*out)[i] = value;
break;
}
}
}
/*
Prints the block split points as decimal and hex values in the terminal.
*/
static void PrintBlockSplitPoints(const ZopfliLZ77Store* lz77,
const size_t* lz77splitpoints,
size_t nlz77points) {
size_t* splitpoints = 0;
size_t npoints = 0;
size_t i;
/* The input is given as lz77 indices, but we want to see the uncompressed
index values. */
size_t pos = 0;
if (nlz77points > 0) {
for (i = 0; i < lz77->size; i++) {
size_t length = lz77->dists[i] == 0 ? 1 : lz77->litlens[i];
if (lz77splitpoints[npoints] == i) {
ZOPFLI_APPEND_DATA(pos, &splitpoints, &npoints);
if (npoints == nlz77points) break;
}
pos += length;
}
}
assert(npoints == nlz77points);
fprintf(stderr, "block split points: ");
for (i = 0; i < npoints; i++) {
fprintf(stderr, "%d ", (int)splitpoints[i]);
}
fprintf(stderr, "(hex:");
for (i = 0; i < npoints; i++) {
fprintf(stderr, " %x", (int)splitpoints[i]);
}
fprintf(stderr, ")\n");
free(splitpoints);
}
/*
Finds next block to try to split, the largest of the available ones.
The largest is chosen to make sure that if only a limited amount of blocks is
requested, their sizes are spread evenly.
lz77size: the size of the LL77 data, which is the size of the done array here.
done: array indicating which blocks starting at that position are no longer
splittable (splitting them increases rather than decreases cost).
splitpoints: the splitpoints found so far.
npoints: the amount of splitpoints found so far.
lstart: output variable, giving start of block.
lend: output variable, giving end of block.
returns 1 if a block was found, 0 if no block found (all are done).
*/
static int FindLargestSplittableBlock(
size_t lz77size, const unsigned char* done,
const size_t* splitpoints, size_t npoints,
size_t* lstart, size_t* lend) {
size_t longest = 0;
int found = 0;
size_t i;
for (i = 0; i <= npoints; i++) {
size_t start = i == 0 ? 0 : splitpoints[i - 1];
size_t end = i == npoints ? lz77size - 1 : splitpoints[i];
if (!done[start] && end - start > longest) {
*lstart = start;
*lend = end;
found = 1;
longest = end - start;
}
}
return found;
}
void ZopfliBlockSplitLZ77(const ZopfliOptions* options,
const ZopfliLZ77Store* lz77, size_t maxblocks,
size_t** splitpoints, size_t* npoints) {
size_t lstart, lend;
size_t i;
size_t llpos = 0;
size_t numblocks = 1;
unsigned char* done;
double splitcost, origcost;
if (lz77->size < 10) return; /* This code fails on tiny files. */
done = (unsigned char*)malloc(lz77->size);
if (!done) exit(-1); /* Allocation failed. */
for (i = 0; i < lz77->size; i++) done[i] = 0;
lstart = 0;
lend = lz77->size;
for (;;) {
SplitCostContext c;
if (maxblocks > 0 && numblocks >= maxblocks) {
break;
}
c.lz77 = lz77;
c.start = lstart;
c.end = lend;
assert(lstart < lend);
llpos = FindMinimum(SplitCost, &c, lstart + 1, lend, &splitcost);
assert(llpos > lstart);
assert(llpos < lend);
origcost = EstimateCost(lz77, lstart, lend);
if (splitcost > origcost || llpos == lstart + 1 || llpos == lend) {
done[lstart] = 1;
} else {
AddSorted(llpos, splitpoints, npoints);
numblocks++;
}
if (!FindLargestSplittableBlock(
lz77->size, done, *splitpoints, *npoints, &lstart, &lend)) {
break; /* No further split will probably reduce compression. */
}
if (lend - lstart < 10) {
break;
}
}
if (options->verbose) {
PrintBlockSplitPoints(lz77, *splitpoints, *npoints);
}
free(done);
}
void ZopfliBlockSplit(const ZopfliOptions* options,
const unsigned char* in, size_t instart, size_t inend,
size_t maxblocks, size_t** splitpoints, size_t* npoints) {
size_t pos = 0;
size_t i;
ZopfliBlockState s;
size_t* lz77splitpoints = 0;
size_t nlz77points = 0;
ZopfliLZ77Store store;
ZopfliHash hash;
ZopfliHash* h = &hash;
ZopfliInitLZ77Store(in, &store);
ZopfliInitBlockState(options, instart, inend, 0, &s);
ZopfliAllocHash(ZOPFLI_WINDOW_SIZE, h);
*npoints = 0;
*splitpoints = 0;
/* Unintuitively, Using a simple LZ77 method here instead of ZopfliLZ77Optimal
results in better blocks. */
ZopfliLZ77Greedy(&s, in, instart, inend, &store, h);
ZopfliBlockSplitLZ77(options,
&store, maxblocks,
&lz77splitpoints, &nlz77points);
/* Convert LZ77 positions to positions in the uncompressed input. */
pos = instart;
if (nlz77points > 0) {
for (i = 0; i < store.size; i++) {
size_t length = store.dists[i] == 0 ? 1 : store.litlens[i];
if (lz77splitpoints[*npoints] == i) {
ZOPFLI_APPEND_DATA(pos, splitpoints, npoints);
if (*npoints == nlz77points) break;
}
pos += length;
}
}
assert(*npoints == nlz77points);
free(lz77splitpoints);
ZopfliCleanBlockState(&s);
ZopfliCleanLZ77Store(&store);
ZopfliCleanHash(h);
}
void ZopfliBlockSplitSimple(const unsigned char* in,
size_t instart, size_t inend,
size_t blocksize,
size_t** splitpoints, size_t* npoints) {
size_t i = instart;
while (i < inend) {
ZOPFLI_APPEND_DATA(i, splitpoints, npoints);
i += blocksize;
}
(void)in;
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
/*
Functions to choose good boundaries for block splitting. Deflate allows encoding
the data in multiple blocks, with a separate Huffman tree for each block. The
Huffman tree itself requires some bytes to encode, so by choosing certain
blocks, you can either hurt, or enhance compression. These functions choose good
ones that enhance it.
*/
#ifndef ZOPFLI_BLOCKSPLITTER_H_
#define ZOPFLI_BLOCKSPLITTER_H_
#include <stdlib.h>
#include "lz77.h"
#include "zopfli.h"
/*
Does blocksplitting on LZ77 data.
The output splitpoints are indices in the LZ77 data.
maxblocks: set a limit to the amount of blocks. Set to 0 to mean no limit.
*/
void ZopfliBlockSplitLZ77(const ZopfliOptions* options,
const ZopfliLZ77Store* lz77, size_t maxblocks,
size_t** splitpoints, size_t* npoints);
/*
Does blocksplitting on uncompressed data.
The output splitpoints are indices in the uncompressed bytes.
options: general program options.
in: uncompressed input data
instart: where to start splitting
inend: where to end splitting (not inclusive)
maxblocks: maximum amount of blocks to split into, or 0 for no limit
splitpoints: dynamic array to put the resulting split point coordinates into.
The coordinates are indices in the input array.
npoints: pointer to amount of splitpoints, for the dynamic array. The amount of
blocks is the amount of splitpoitns + 1.
*/
void ZopfliBlockSplit(const ZopfliOptions* options,
const unsigned char* in, size_t instart, size_t inend,
size_t maxblocks, size_t** splitpoints, size_t* npoints);
/*
Divides the input into equal blocks, does not even take LZ77 lengths into
account.
*/
void ZopfliBlockSplitSimple(const unsigned char* in,
size_t instart, size_t inend,
size_t blocksize,
size_t** splitpoints, size_t* npoints);
#endif /* ZOPFLI_BLOCKSPLITTER_H_ */

View File

@ -0,0 +1,125 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
#include "cache.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef ZOPFLI_LONGEST_MATCH_CACHE
void ZopfliInitCache(size_t blocksize, ZopfliLongestMatchCache* lmc) {
size_t i;
lmc->length = (unsigned short*)malloc(sizeof(unsigned short) * blocksize);
lmc->dist = (unsigned short*)malloc(sizeof(unsigned short) * blocksize);
/* Rather large amount of memory. */
lmc->sublen = (unsigned char*)malloc(ZOPFLI_CACHE_LENGTH * 3 * blocksize);
if(lmc->sublen == NULL) {
fprintf(stderr,
"Error: Out of memory. Tried allocating %lu bytes of memory.\n",
ZOPFLI_CACHE_LENGTH * 3 * blocksize);
exit (EXIT_FAILURE);
}
/* length > 0 and dist 0 is invalid combination, which indicates on purpose
that this cache value is not filled in yet. */
for (i = 0; i < blocksize; i++) lmc->length[i] = 1;
for (i = 0; i < blocksize; i++) lmc->dist[i] = 0;
for (i = 0; i < ZOPFLI_CACHE_LENGTH * blocksize * 3; i++) lmc->sublen[i] = 0;
}
void ZopfliCleanCache(ZopfliLongestMatchCache* lmc) {
free(lmc->length);
free(lmc->dist);
free(lmc->sublen);
}
void ZopfliSublenToCache(const unsigned short* sublen,
size_t pos, size_t length,
ZopfliLongestMatchCache* lmc) {
size_t i;
size_t j = 0;
unsigned bestlength = 0;
unsigned char* cache;
#if ZOPFLI_CACHE_LENGTH == 0
return;
#endif
cache = &lmc->sublen[ZOPFLI_CACHE_LENGTH * pos * 3];
if (length < 3) return;
for (i = 3; i <= length; i++) {
if (i == length || sublen[i] != sublen[i + 1]) {
cache[j * 3] = i - 3;
cache[j * 3 + 1] = sublen[i] % 256;
cache[j * 3 + 2] = (sublen[i] >> 8) % 256;
bestlength = i;
j++;
if (j >= ZOPFLI_CACHE_LENGTH) break;
}
}
if (j < ZOPFLI_CACHE_LENGTH) {
assert(bestlength == length);
cache[(ZOPFLI_CACHE_LENGTH - 1) * 3] = bestlength - 3;
} else {
assert(bestlength <= length);
}
assert(bestlength == ZopfliMaxCachedSublen(lmc, pos, length));
}
void ZopfliCacheToSublen(const ZopfliLongestMatchCache* lmc,
size_t pos, size_t length,
unsigned short* sublen) {
size_t i, j;
unsigned maxlength = ZopfliMaxCachedSublen(lmc, pos, length);
unsigned prevlength = 0;
unsigned char* cache;
#if ZOPFLI_CACHE_LENGTH == 0
return;
#endif
if (length < 3) return;
cache = &lmc->sublen[ZOPFLI_CACHE_LENGTH * pos * 3];
for (j = 0; j < ZOPFLI_CACHE_LENGTH; j++) {
unsigned length = cache[j * 3] + 3;
unsigned dist = cache[j * 3 + 1] + 256 * cache[j * 3 + 2];
for (i = prevlength; i <= length; i++) {
sublen[i] = dist;
}
if (length == maxlength) break;
prevlength = length + 1;
}
}
/*
Returns the length up to which could be stored in the cache.
*/
unsigned ZopfliMaxCachedSublen(const ZopfliLongestMatchCache* lmc,
size_t pos, size_t length) {
unsigned char* cache;
#if ZOPFLI_CACHE_LENGTH == 0
return 0;
#endif
cache = &lmc->sublen[ZOPFLI_CACHE_LENGTH * pos * 3];
(void)length;
if (cache[1] == 0 && cache[2] == 0) return 0; /* No sublen cached. */
return cache[(ZOPFLI_CACHE_LENGTH - 1) * 3] + 3;
}
#endif /* ZOPFLI_LONGEST_MATCH_CACHE */

View File

@ -0,0 +1,66 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
/*
The cache that speeds up ZopfliFindLongestMatch of lz77.c.
*/
#ifndef ZOPFLI_CACHE_H_
#define ZOPFLI_CACHE_H_
#include "util.h"
#ifdef ZOPFLI_LONGEST_MATCH_CACHE
/*
Cache used by ZopfliFindLongestMatch to remember previously found length/dist
values.
This is needed because the squeeze runs will ask these values multiple times for
the same position.
Uses large amounts of memory, since it has to remember the distance belonging
to every possible shorter-than-the-best length (the so called "sublen" array).
*/
typedef struct ZopfliLongestMatchCache {
unsigned short* length;
unsigned short* dist;
unsigned char* sublen;
} ZopfliLongestMatchCache;
/* Initializes the ZopfliLongestMatchCache. */
void ZopfliInitCache(size_t blocksize, ZopfliLongestMatchCache* lmc);
/* Frees up the memory of the ZopfliLongestMatchCache. */
void ZopfliCleanCache(ZopfliLongestMatchCache* lmc);
/* Stores sublen array in the cache. */
void ZopfliSublenToCache(const unsigned short* sublen,
size_t pos, size_t length,
ZopfliLongestMatchCache* lmc);
/* Extracts sublen array from the cache. */
void ZopfliCacheToSublen(const ZopfliLongestMatchCache* lmc,
size_t pos, size_t length,
unsigned short* sublen);
/* Returns the length up to which could be stored in the cache. */
unsigned ZopfliMaxCachedSublen(const ZopfliLongestMatchCache* lmc,
size_t pos, size_t length);
#endif /* ZOPFLI_LONGEST_MATCH_CACHE */
#endif /* ZOPFLI_CACHE_H_ */

View File

@ -0,0 +1,933 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
#include "deflate.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "blocksplitter.h"
#include "squeeze.h"
#include "symbols.h"
#include "tree.h"
/*
bp = bitpointer, always in range [0, 7].
The outsize is number of necessary bytes to encode the bits.
Given the value of bp and the amount of bytes, the amount of bits represented
is not simply bytesize * 8 + bp because even representing one bit requires a
whole byte. It is: (bp == 0) ? (bytesize * 8) : ((bytesize - 1) * 8 + bp)
*/
static void AddBit(int bit,
unsigned char* bp, unsigned char** out, size_t* outsize) {
if (*bp == 0) ZOPFLI_APPEND_DATA(0, out, outsize);
(*out)[*outsize - 1] |= bit << *bp;
*bp = (*bp + 1) & 7;
}
static void AddBits(unsigned symbol, unsigned length,
unsigned char* bp, unsigned char** out, size_t* outsize) {
/* TODO(lode): make more efficient (add more bits at once). */
unsigned i;
for (i = 0; i < length; i++) {
unsigned bit = (symbol >> i) & 1;
if (*bp == 0) ZOPFLI_APPEND_DATA(0, out, outsize);
(*out)[*outsize - 1] |= bit << *bp;
*bp = (*bp + 1) & 7;
}
}
/*
Adds bits, like AddBits, but the order is inverted. The deflate specification
uses both orders in one standard.
*/
static void AddHuffmanBits(unsigned symbol, unsigned length,
unsigned char* bp, unsigned char** out,
size_t* outsize) {
/* TODO(lode): make more efficient (add more bits at once). */
unsigned i;
for (i = 0; i < length; i++) {
unsigned bit = (symbol >> (length - i - 1)) & 1;
if (*bp == 0) ZOPFLI_APPEND_DATA(0, out, outsize);
(*out)[*outsize - 1] |= bit << *bp;
*bp = (*bp + 1) & 7;
}
}
/*
Ensures there are at least 2 distance codes to support buggy decoders.
Zlib 1.2.1 and below have a bug where it fails if there isn't at least 1
distance code (with length > 0), even though it's valid according to the
deflate spec to have 0 distance codes. On top of that, some mobile phones
require at least two distance codes. To support these decoders too (but
potentially at the cost of a few bytes), add dummy code lengths of 1.
References to this bug can be found in the changelog of
Zlib 1.2.2 and here: http://www.jonof.id.au/forum/index.php?topic=515.0.
d_lengths: the 32 lengths of the distance codes.
*/
static void PatchDistanceCodesForBuggyDecoders(unsigned* d_lengths) {
#if 0
int num_dist_codes = 0; /* Amount of non-zero distance codes */
int i;
for (i = 0; i < 30 /* Ignore the two unused codes from the spec */; i++) {
if (d_lengths[i]) num_dist_codes++;
if (num_dist_codes >= 2) return; /* Two or more codes is fine. */
}
if (num_dist_codes == 0) {
d_lengths[0] = d_lengths[1] = 1;
} else if (num_dist_codes == 1) {
d_lengths[d_lengths[0] ? 1 : 0] = 1;
}
#endif
}
/*
Encodes the Huffman tree and returns how many bits its encoding takes. If out
is a null pointer, only returns the size and runs faster.
*/
static size_t EncodeTree(const unsigned* ll_lengths,
const unsigned* d_lengths,
int use_16, int use_17, int use_18,
unsigned char* bp,
unsigned char** out, size_t* outsize) {
unsigned lld_total; /* Total amount of literal, length, distance codes. */
/* Runlength encoded version of lengths of litlen and dist trees. */
unsigned* rle = 0;
unsigned* rle_bits = 0; /* Extra bits for rle values 16, 17 and 18. */
size_t rle_size = 0; /* Size of rle array. */
size_t rle_bits_size = 0; /* Should have same value as rle_size. */
unsigned hlit = 29; /* 286 - 257 */
unsigned hdist = 29; /* 32 - 1, but gzip does not like hdist > 29.*/
unsigned hclen;
unsigned hlit2;
size_t i, j;
size_t clcounts[19];
unsigned clcl[19]; /* Code length code lengths. */
unsigned clsymbols[19];
/* The order in which code length code lengths are encoded as per deflate. */
static const unsigned order[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
};
int size_only = !out;
size_t result_size = 0;
for(i = 0; i < 19; i++) clcounts[i] = 0;
/* Trim zeros. */
while (hlit > 0 && ll_lengths[257 + hlit - 1] == 0) hlit--;
while (hdist > 0 && d_lengths[1 + hdist - 1] == 0) hdist--;
hlit2 = hlit + 257;
lld_total = hlit2 + hdist + 1;
for (i = 0; i < lld_total; i++) {
/* This is an encoding of a huffman tree, so now the length is a symbol */
unsigned char symbol = i < hlit2 ? ll_lengths[i] : d_lengths[i - hlit2];
unsigned count = 1;
if(use_16 || (symbol == 0 && (use_17 || use_18))) {
for (j = i + 1; j < lld_total && symbol ==
(j < hlit2 ? ll_lengths[j] : d_lengths[j - hlit2]); j++) {
count++;
}
}
i += count - 1;
/* Repetitions of zeroes */
if (symbol == 0 && count >= 3) {
if (use_18) {
while (count >= 11) {
unsigned count2 = count > 138 ? 138 : count;
if (!size_only) {
ZOPFLI_APPEND_DATA(18, &rle, &rle_size);
ZOPFLI_APPEND_DATA(count2 - 11, &rle_bits, &rle_bits_size);
}
clcounts[18]++;
count -= count2;
}
}
if (use_17) {
while (count >= 3) {
unsigned count2 = count > 10 ? 10 : count;
if (!size_only) {
ZOPFLI_APPEND_DATA(17, &rle, &rle_size);
ZOPFLI_APPEND_DATA(count2 - 3, &rle_bits, &rle_bits_size);
}
clcounts[17]++;
count -= count2;
}
}
}
/* Repetitions of any symbol */
if (use_16 && count >= 4) {
count--; /* Since the first one is hardcoded. */
clcounts[symbol]++;
if (!size_only) {
ZOPFLI_APPEND_DATA(symbol, &rle, &rle_size);
ZOPFLI_APPEND_DATA(0, &rle_bits, &rle_bits_size);
}
while (count >= 3) {
unsigned count2 = count > 6 ? 6 : count;
if (!size_only) {
ZOPFLI_APPEND_DATA(16, &rle, &rle_size);
ZOPFLI_APPEND_DATA(count2 - 3, &rle_bits, &rle_bits_size);
}
clcounts[16]++;
count -= count2;
}
}
/* No or insufficient repetition */
clcounts[symbol] += count;
while (count > 0) {
if (!size_only) {
ZOPFLI_APPEND_DATA(symbol, &rle, &rle_size);
ZOPFLI_APPEND_DATA(0, &rle_bits, &rle_bits_size);
}
count--;
}
}
ZopfliCalculateBitLengths(clcounts, 19, 7, clcl);
if (!size_only) ZopfliLengthsToSymbols(clcl, 19, 7, clsymbols);
hclen = 15;
/* Trim zeros. */
while (hclen > 0 && clcounts[order[hclen + 4 - 1]] == 0) hclen--;
if (!size_only) {
AddBits(hlit, 5, bp, out, outsize);
AddBits(hdist, 5, bp, out, outsize);
AddBits(hclen, 4, bp, out, outsize);
for (i = 0; i < hclen + 4; i++) {
AddBits(clcl[order[i]], 3, bp, out, outsize);
}
for (i = 0; i < rle_size; i++) {
unsigned symbol = clsymbols[rle[i]];
AddHuffmanBits(symbol, clcl[rle[i]], bp, out, outsize);
/* Extra bits. */
if (rle[i] == 16) AddBits(rle_bits[i], 2, bp, out, outsize);
else if (rle[i] == 17) AddBits(rle_bits[i], 3, bp, out, outsize);
else if (rle[i] == 18) AddBits(rle_bits[i], 7, bp, out, outsize);
}
}
result_size += 14; /* hlit, hdist, hclen bits */
result_size += (hclen + 4) * 3; /* clcl bits */
for(i = 0; i < 19; i++) {
result_size += clcl[i] * clcounts[i];
}
/* Extra bits. */
result_size += clcounts[16] * 2;
result_size += clcounts[17] * 3;
result_size += clcounts[18] * 7;
/* Note: in case of "size_only" these are null pointers so no effect. */
free(rle);
free(rle_bits);
return result_size;
}
static void AddDynamicTree(const unsigned* ll_lengths,
const unsigned* d_lengths,
unsigned char* bp,
unsigned char** out, size_t* outsize) {
int i;
int best = 0;
size_t bestsize = 0;
for(i = 0; i < 8; i++) {
size_t size = EncodeTree(ll_lengths, d_lengths,
i & 1, i & 2, i & 4,
0, 0, 0);
if (bestsize == 0 || size < bestsize) {
bestsize = size;
best = i;
}
}
EncodeTree(ll_lengths, d_lengths,
best & 1, best & 2, best & 4,
bp, out, outsize);
}
/*
Gives the exact size of the tree, in bits, as it will be encoded in DEFLATE.
*/
static size_t CalculateTreeSize(const unsigned* ll_lengths,
const unsigned* d_lengths) {
size_t result = 0;
int i;
for(i = 0; i < 8; i++) {
size_t size = EncodeTree(ll_lengths, d_lengths,
i & 1, i & 2, i & 4,
0, 0, 0);
if (result == 0 || size < result) result = size;
}
return result;
}
/*
Adds all lit/len and dist codes from the lists as huffman symbols. Does not add
end code 256. expected_data_size is the uncompressed block size, used for
assert, but you can set it to 0 to not do the assertion.
*/
static void AddLZ77Data(const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend,
size_t expected_data_size,
const unsigned* ll_symbols, const unsigned* ll_lengths,
const unsigned* d_symbols, const unsigned* d_lengths,
unsigned char* bp,
unsigned char** out, size_t* outsize) {
size_t testlength = 0;
size_t i;
for (i = lstart; i < lend; i++) {
unsigned dist = lz77->dists[i];
unsigned litlen = lz77->litlens[i];
if (dist == 0) {
assert(litlen < 256);
assert(ll_lengths[litlen] > 0);
AddHuffmanBits(ll_symbols[litlen], ll_lengths[litlen], bp, out, outsize);
testlength++;
} else {
unsigned lls = ZopfliGetLengthSymbol(litlen);
unsigned ds = ZopfliGetDistSymbol(dist);
assert(litlen >= 3 && litlen <= 288);
assert(ll_lengths[lls] > 0);
assert(d_lengths[ds] > 0);
AddHuffmanBits(ll_symbols[lls], ll_lengths[lls], bp, out, outsize);
AddBits(ZopfliGetLengthExtraBitsValue(litlen),
ZopfliGetLengthExtraBits(litlen),
bp, out, outsize);
AddHuffmanBits(d_symbols[ds], d_lengths[ds], bp, out, outsize);
AddBits(ZopfliGetDistExtraBitsValue(dist),
ZopfliGetDistExtraBits(dist),
bp, out, outsize);
testlength += litlen;
}
}
assert(expected_data_size == 0 || testlength == expected_data_size);
}
static void GetFixedTree(unsigned* ll_lengths, unsigned* d_lengths) {
size_t i;
for (i = 0; i < 144; i++) ll_lengths[i] = 8;
for (i = 144; i < 256; i++) ll_lengths[i] = 9;
for (i = 256; i < 280; i++) ll_lengths[i] = 7;
for (i = 280; i < 288; i++) ll_lengths[i] = 8;
for (i = 0; i < 32; i++) d_lengths[i] = 5;
}
/*
Same as CalculateBlockSymbolSize, but for block size smaller than histogram
size.
*/
static size_t CalculateBlockSymbolSizeSmall(const unsigned* ll_lengths,
const unsigned* d_lengths,
const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend) {
size_t result = 0;
size_t i;
for (i = lstart; i < lend; i++) {
assert(i < lz77->size);
assert(lz77->litlens[i] < 259);
if (lz77->dists[i] == 0) {
result += ll_lengths[lz77->litlens[i]];
} else {
int ll_symbol = ZopfliGetLengthSymbol(lz77->litlens[i]);
int d_symbol = ZopfliGetDistSymbol(lz77->dists[i]);
result += ll_lengths[ll_symbol];
result += d_lengths[d_symbol];
result += ZopfliGetLengthSymbolExtraBits(ll_symbol);
result += ZopfliGetDistSymbolExtraBits(d_symbol);
}
}
result += ll_lengths[256]; /*end symbol*/
return result;
}
/*
Same as CalculateBlockSymbolSize, but with the histogram provided by the caller.
*/
static size_t CalculateBlockSymbolSizeGivenCounts(const size_t* ll_counts,
const size_t* d_counts,
const unsigned* ll_lengths,
const unsigned* d_lengths,
const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend) {
size_t result = 0;
size_t i;
if (lstart + ZOPFLI_NUM_LL * 3 > lend) {
return CalculateBlockSymbolSizeSmall(
ll_lengths, d_lengths, lz77, lstart, lend);
} else {
for (i = 0; i < 256; i++) {
result += ll_lengths[i] * ll_counts[i];
}
for (i = 257; i < 286; i++) {
result += ll_lengths[i] * ll_counts[i];
result += ZopfliGetLengthSymbolExtraBits(i) * ll_counts[i];
}
for (i = 0; i < 30; i++) {
result += d_lengths[i] * d_counts[i];
result += ZopfliGetDistSymbolExtraBits(i) * d_counts[i];
}
result += ll_lengths[256]; /*end symbol*/
return result;
}
}
/*
Calculates size of the part after the header and tree of an LZ77 block, in bits.
*/
static size_t CalculateBlockSymbolSize(const unsigned* ll_lengths,
const unsigned* d_lengths,
const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend) {
if (lstart + ZOPFLI_NUM_LL * 3 > lend) {
return CalculateBlockSymbolSizeSmall(
ll_lengths, d_lengths, lz77, lstart, lend);
} else {
size_t ll_counts[ZOPFLI_NUM_LL];
size_t d_counts[ZOPFLI_NUM_D];
ZopfliLZ77GetHistogram(lz77, lstart, lend, ll_counts, d_counts);
return CalculateBlockSymbolSizeGivenCounts(
ll_counts, d_counts, ll_lengths, d_lengths, lz77, lstart, lend);
}
}
static size_t AbsDiff(size_t x, size_t y) {
if (x > y)
return x - y;
else
return y - x;
}
/*
Changes the population counts in a way that the consequent Huffman tree
compression, especially its rle-part, will be more likely to compress this data
more efficiently. length contains the size of the histogram.
*/
void OptimizeHuffmanForRle(int length, size_t* counts) {
int i, k, stride;
size_t symbol, sum, limit;
int* good_for_rle;
/* 1) We don't want to touch the trailing zeros. We may break the
rules of the format by adding more data in the distance codes. */
for (; length >= 0; --length) {
if (length == 0) {
return;
}
if (counts[length - 1] != 0) {
/* Now counts[0..length - 1] does not have trailing zeros. */
break;
}
}
/* 2) Let's mark all population counts that already can be encoded
with an rle code.*/
good_for_rle = (int*)malloc(length * sizeof(int));
for (i = 0; i < length; ++i) good_for_rle[i] = 0;
/* Let's not spoil any of the existing good rle codes.
Mark any seq of 0's that is longer than 5 as a good_for_rle.
Mark any seq of non-0's that is longer than 7 as a good_for_rle.*/
symbol = counts[0];
stride = 0;
for (i = 0; i < length + 1; ++i) {
if (i == length || counts[i] != symbol) {
if ((symbol == 0 && stride >= 5) || (symbol != 0 && stride >= 7)) {
for (k = 0; k < stride; ++k) {
good_for_rle[i - k - 1] = 1;
}
}
stride = 1;
if (i != length) {
symbol = counts[i];
}
} else {
++stride;
}
}
/* 3) Let's replace those population counts that lead to more rle codes. */
stride = 0;
limit = counts[0];
sum = 0;
for (i = 0; i < length + 1; ++i) {
if (i == length || good_for_rle[i]
/* Heuristic for selecting the stride ranges to collapse. */
|| AbsDiff(counts[i], limit) >= 4) {
if (stride >= 4 || (stride >= 3 && sum == 0)) {
/* The stride must end, collapse what we have, if we have enough (4). */
int count = (sum + stride / 2) / stride;
if (count < 1) count = 1;
if (sum == 0) {
/* Don't make an all zeros stride to be upgraded to ones. */
count = 0;
}
for (k = 0; k < stride; ++k) {
/* We don't want to change value at counts[i],
that is already belonging to the next stride. Thus - 1. */
counts[i - k - 1] = count;
}
}
stride = 0;
sum = 0;
if (i < length - 3) {
/* All interesting strides have a count of at least 4,
at least when non-zeros. */
limit = (counts[i] + counts[i + 1] +
counts[i + 2] + counts[i + 3] + 2) / 4;
} else if (i < length) {
limit = counts[i];
} else {
limit = 0;
}
}
++stride;
if (i != length) {
sum += counts[i];
}
}
free(good_for_rle);
}
/*
Tries out OptimizeHuffmanForRle for this block, if the result is smaller,
uses it, otherwise keeps the original. Returns size of encoded tree and data in
bits, not including the 3-bit block header.
*/
static double TryOptimizeHuffmanForRle(
const ZopfliLZ77Store* lz77, size_t lstart, size_t lend,
const size_t* ll_counts, const size_t* d_counts,
unsigned* ll_lengths, unsigned* d_lengths) {
size_t ll_counts2[ZOPFLI_NUM_LL];
size_t d_counts2[ZOPFLI_NUM_D];
unsigned ll_lengths2[ZOPFLI_NUM_LL];
unsigned d_lengths2[ZOPFLI_NUM_D];
double treesize;
double datasize;
double treesize2;
double datasize2;
treesize = CalculateTreeSize(ll_lengths, d_lengths);
datasize = CalculateBlockSymbolSizeGivenCounts(ll_counts, d_counts,
ll_lengths, d_lengths, lz77, lstart, lend);
memcpy(ll_counts2, ll_counts, sizeof(ll_counts2));
memcpy(d_counts2, d_counts, sizeof(d_counts2));
OptimizeHuffmanForRle(ZOPFLI_NUM_LL, ll_counts2);
OptimizeHuffmanForRle(ZOPFLI_NUM_D, d_counts2);
ZopfliCalculateBitLengths(ll_counts2, ZOPFLI_NUM_LL, 15, ll_lengths2);
ZopfliCalculateBitLengths(d_counts2, ZOPFLI_NUM_D, 15, d_lengths2);
PatchDistanceCodesForBuggyDecoders(d_lengths2);
treesize2 = CalculateTreeSize(ll_lengths2, d_lengths2);
datasize2 = CalculateBlockSymbolSizeGivenCounts(ll_counts, d_counts,
ll_lengths2, d_lengths2, lz77, lstart, lend);
if (treesize2 + datasize2 < treesize + datasize) {
memcpy(ll_lengths, ll_lengths2, sizeof(ll_lengths2));
memcpy(d_lengths, d_lengths2, sizeof(d_lengths2));
return treesize2 + datasize2;
}
return treesize + datasize;
}
/*
Calculates the bit lengths for the symbols for dynamic blocks. Chooses bit
lengths that give the smallest size of tree encoding + encoding of all the
symbols to have smallest output size. This are not necessarily the ideal Huffman
bit lengths. Returns size of encoded tree and data in bits, not including the
3-bit block header.
*/
static double GetDynamicLengths(const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend,
unsigned* ll_lengths, unsigned* d_lengths) {
size_t ll_counts[ZOPFLI_NUM_LL];
size_t d_counts[ZOPFLI_NUM_D];
ZopfliLZ77GetHistogram(lz77, lstart, lend, ll_counts, d_counts);
ll_counts[256] = 1; /* End symbol. */
ZopfliCalculateBitLengths(ll_counts, ZOPFLI_NUM_LL, 15, ll_lengths);
ZopfliCalculateBitLengths(d_counts, ZOPFLI_NUM_D, 15, d_lengths);
PatchDistanceCodesForBuggyDecoders(d_lengths);
return TryOptimizeHuffmanForRle(
lz77, lstart, lend, ll_counts, d_counts, ll_lengths, d_lengths);
}
double ZopfliCalculateBlockSize(const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend, int btype) {
unsigned ll_lengths[ZOPFLI_NUM_LL];
unsigned d_lengths[ZOPFLI_NUM_D];
double result = 3; /* bfinal and btype bits */
if (btype == 0) {
size_t length = ZopfliLZ77GetByteRange(lz77, lstart, lend);
size_t rem = length % 65535;
size_t blocks = length / 65535 + (rem ? 1 : 0);
/* An uncompressed block must actually be split into multiple blocks if it's
larger than 65535 bytes long. Eeach block header is 5 bytes: 3 bits,
padding, LEN and NLEN (potential less padding for first one ignored). */
return blocks * 5 * 8 + length * 8;
} if (btype == 1) {
GetFixedTree(ll_lengths, d_lengths);
result += CalculateBlockSymbolSize(
ll_lengths, d_lengths, lz77, lstart, lend);
} else {
result += GetDynamicLengths(lz77, lstart, lend, ll_lengths, d_lengths);
}
return result;
}
double ZopfliCalculateBlockSizeAutoType(const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend) {
double uncompressedcost = ZopfliCalculateBlockSize(lz77, lstart, lend, 0);
/* Don't do the expensive fixed cost calculation for larger blocks that are
unlikely to use it. */
double fixedcost = (lz77->size > 1000) ?
uncompressedcost : ZopfliCalculateBlockSize(lz77, lstart, lend, 1);
double dyncost = ZopfliCalculateBlockSize(lz77, lstart, lend, 2);
return (uncompressedcost < fixedcost && uncompressedcost < dyncost)
? uncompressedcost
: (fixedcost < dyncost ? fixedcost : dyncost);
}
/* Since an uncompressed block can be max 65535 in size, it actually adds
multible blocks if needed. */
static void AddNonCompressedBlock(const ZopfliOptions* options, int final,
const unsigned char* in, size_t instart,
size_t inend,
unsigned char* bp,
unsigned char** out, size_t* outsize) {
size_t pos = instart;
(void)options;
for (;;) {
size_t i;
unsigned short blocksize = 65535;
unsigned short nlen;
int currentfinal;
if (pos + blocksize > inend) blocksize = inend - pos;
currentfinal = pos + blocksize >= inend;
nlen = ~blocksize;
AddBit(final && currentfinal, bp, out, outsize);
/* BTYPE 00 */
AddBit(0, bp, out, outsize);
AddBit(0, bp, out, outsize);
/* Any bits of input up to the next byte boundary are ignored. */
*bp = 0;
ZOPFLI_APPEND_DATA(blocksize % 256, out, outsize);
ZOPFLI_APPEND_DATA((blocksize / 256) % 256, out, outsize);
ZOPFLI_APPEND_DATA(nlen % 256, out, outsize);
ZOPFLI_APPEND_DATA((nlen / 256) % 256, out, outsize);
for (i = 0; i < blocksize; i++) {
ZOPFLI_APPEND_DATA(in[pos + i], out, outsize);
}
if (currentfinal) break;
pos += blocksize;
}
}
/*
Adds a deflate block with the given LZ77 data to the output.
options: global program options
btype: the block type, must be 1 or 2
final: whether to set the "final" bit on this block, must be the last block
litlens: literal/length array of the LZ77 data, in the same format as in
ZopfliLZ77Store.
dists: distance array of the LZ77 data, in the same format as in
ZopfliLZ77Store.
lstart: where to start in the LZ77 data
lend: where to end in the LZ77 data (not inclusive)
expected_data_size: the uncompressed block size, used for assert, but you can
set it to 0 to not do the assertion.
bp: output bit pointer
out: dynamic output array to append to
outsize: dynamic output array size
*/
static void AddLZ77Block(const ZopfliOptions* options, int btype, int final,
const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend,
size_t expected_data_size,
unsigned char* bp,
unsigned char** out, size_t* outsize) {
unsigned ll_lengths[ZOPFLI_NUM_LL];
unsigned d_lengths[ZOPFLI_NUM_D];
unsigned ll_symbols[ZOPFLI_NUM_LL];
unsigned d_symbols[ZOPFLI_NUM_D];
size_t detect_block_size = *outsize;
size_t compressed_size;
size_t uncompressed_size = 0;
size_t i;
if (btype == 0) {
size_t length = ZopfliLZ77GetByteRange(lz77, lstart, lend);
size_t pos = lstart == lend ? 0 : lz77->pos[lstart];
size_t end = pos + length;
AddNonCompressedBlock(options, final,
lz77->data, pos, end, bp, out, outsize);
return;
}
AddBit(final, bp, out, outsize);
AddBit(btype & 1, bp, out, outsize);
AddBit((btype & 2) >> 1, bp, out, outsize);
if (btype == 1) {
/* Fixed block. */
GetFixedTree(ll_lengths, d_lengths);
} else {
/* Dynamic block. */
unsigned detect_tree_size;
assert(btype == 2);
GetDynamicLengths(lz77, lstart, lend, ll_lengths, d_lengths);
detect_tree_size = *outsize;
AddDynamicTree(ll_lengths, d_lengths, bp, out, outsize);
if (options->verbose) {
fprintf(stderr, "treesize: %d\n", (int)(*outsize - detect_tree_size));
}
}
ZopfliLengthsToSymbols(ll_lengths, ZOPFLI_NUM_LL, 15, ll_symbols);
ZopfliLengthsToSymbols(d_lengths, ZOPFLI_NUM_D, 15, d_symbols);
detect_block_size = *outsize;
AddLZ77Data(lz77, lstart, lend, expected_data_size,
ll_symbols, ll_lengths, d_symbols, d_lengths,
bp, out, outsize);
/* End symbol. */
AddHuffmanBits(ll_symbols[256], ll_lengths[256], bp, out, outsize);
for (i = lstart; i < lend; i++) {
uncompressed_size += lz77->dists[i] == 0 ? 1 : lz77->litlens[i];
}
compressed_size = *outsize - detect_block_size;
if (options->verbose) {
fprintf(stderr, "compressed block size: %d (%dk) (unc: %d)\n",
(int)compressed_size, (int)(compressed_size / 1024),
(int)(uncompressed_size));
}
}
static void AddLZ77BlockAutoType(const ZopfliOptions* options, int final,
const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend,
size_t expected_data_size,
unsigned char* bp,
unsigned char** out, size_t* outsize) {
double uncompressedcost = ZopfliCalculateBlockSize(lz77, lstart, lend, 0);
double fixedcost = ZopfliCalculateBlockSize(lz77, lstart, lend, 1);
double dyncost = ZopfliCalculateBlockSize(lz77, lstart, lend, 2);
/* Whether to perform the expensive calculation of creating an optimal block
with fixed huffman tree to check if smaller. Only do this for small blocks or
blocks which already are pretty good with fixed huffman tree. */
int expensivefixed = (lz77->size < 1000) || fixedcost <= dyncost * 1.1;
ZopfliLZ77Store fixedstore;
if (lstart == lend) {
/* Smallest empty block is represented by fixed block */
AddBits(final, 1, bp, out, outsize);
AddBits(1, 2, bp, out, outsize); /* btype 01 */
AddBits(0, 7, bp, out, outsize); /* end symbol has code 0000000 */
return;
}
ZopfliInitLZ77Store(lz77->data, &fixedstore);
if (expensivefixed) {
/* Recalculate the LZ77 with ZopfliLZ77OptimalFixed */
size_t instart = lz77->pos[lstart];
size_t inend = instart + ZopfliLZ77GetByteRange(lz77, lstart, lend);
ZopfliBlockState s;
ZopfliInitBlockState(options, instart, inend, 1, &s);
ZopfliLZ77OptimalFixed(&s, lz77->data, instart, inend, &fixedstore);
fixedcost = ZopfliCalculateBlockSize(&fixedstore, 0, fixedstore.size, 1);
ZopfliCleanBlockState(&s);
}
if (uncompressedcost < fixedcost && uncompressedcost < dyncost) {
AddLZ77Block(options, 0, final, lz77, lstart, lend,
expected_data_size, bp, out, outsize);
} else if (fixedcost < dyncost) {
if (expensivefixed) {
AddLZ77Block(options, 1, final, &fixedstore, 0, fixedstore.size,
expected_data_size, bp, out, outsize);
} else {
AddLZ77Block(options, 1, final, lz77, lstart, lend,
expected_data_size, bp, out, outsize);
}
} else {
AddLZ77Block(options, 2, final, lz77, lstart, lend,
expected_data_size, bp, out, outsize);
}
ZopfliCleanLZ77Store(&fixedstore);
}
/*
Deflate a part, to allow ZopfliDeflate() to use multiple master blocks if
needed.
It is possible to call this function multiple times in a row, shifting
instart and inend to next bytes of the data. If instart is larger than 0, then
previous bytes are used as the initial dictionary for LZ77.
This function will usually output multiple deflate blocks. If final is 1, then
the final bit will be set on the last block.
*/
void ZopfliDeflatePart(const ZopfliOptions* options, int btype, int final,
const unsigned char* in, size_t instart, size_t inend,
unsigned char* bp, unsigned char** out,
size_t* outsize) {
size_t i;
/* byte coordinates rather than lz77 index */
size_t* splitpoints_uncompressed = 0;
size_t npoints = 0;
size_t* splitpoints = 0;
double totalcost = 0;
ZopfliLZ77Store lz77;
/* If btype=2 is specified, it tries all block types. If a lesser btype is
given, then however it forces that one. Neither of the lesser types needs
block splitting as they have no dynamic huffman trees. */
if (btype == 0) {
AddNonCompressedBlock(options, final, in, instart, inend, bp, out, outsize);
return;
} else if (btype == 1) {
ZopfliLZ77Store store;
ZopfliBlockState s;
ZopfliInitLZ77Store(in, &store);
ZopfliInitBlockState(options, instart, inend, 1, &s);
ZopfliLZ77OptimalFixed(&s, in, instart, inend, &store);
AddLZ77Block(options, btype, final, &store, 0, store.size, 0,
bp, out, outsize);
ZopfliCleanBlockState(&s);
ZopfliCleanLZ77Store(&store);
return;
}
if (options->blocksplitting) {
ZopfliBlockSplit(options, in, instart, inend,
options->blocksplittingmax,
&splitpoints_uncompressed, &npoints);
splitpoints = (size_t*)malloc(sizeof(*splitpoints) * npoints);
}
ZopfliInitLZ77Store(in, &lz77);
for (i = 0; i <= npoints; i++) {
size_t start = i == 0 ? instart : splitpoints_uncompressed[i - 1];
size_t end = i == npoints ? inend : splitpoints_uncompressed[i];
ZopfliBlockState s;
ZopfliLZ77Store store;
ZopfliInitLZ77Store(in, &store);
ZopfliInitBlockState(options, start, end, 1, &s);
ZopfliLZ77Optimal(&s, in, start, end, options->numiterations, &store);
totalcost += ZopfliCalculateBlockSizeAutoType(&store, 0, store.size);
ZopfliAppendLZ77Store(&store, &lz77);
if (i < npoints) splitpoints[i] = lz77.size;
ZopfliCleanBlockState(&s);
ZopfliCleanLZ77Store(&store);
}
/* Second block splitting attempt */
if (options->blocksplitting && npoints > 1) {
size_t* splitpoints2 = 0;
size_t npoints2 = 0;
double totalcost2 = 0;
ZopfliBlockSplitLZ77(options, &lz77,
options->blocksplittingmax, &splitpoints2, &npoints2);
for (i = 0; i <= npoints2; i++) {
size_t start = i == 0 ? 0 : splitpoints2[i - 1];
size_t end = i == npoints2 ? lz77.size : splitpoints2[i];
totalcost2 += ZopfliCalculateBlockSizeAutoType(&lz77, start, end);
}
if (totalcost2 < totalcost) {
free(splitpoints);
splitpoints = splitpoints2;
npoints = npoints2;
} else {
free(splitpoints2);
}
}
for (i = 0; i <= npoints; i++) {
size_t start = i == 0 ? 0 : splitpoints[i - 1];
size_t end = i == npoints ? lz77.size : splitpoints[i];
AddLZ77BlockAutoType(options, i == npoints && final,
&lz77, start, end, 0,
bp, out, outsize);
}
ZopfliCleanLZ77Store(&lz77);
free(splitpoints);
free(splitpoints_uncompressed);
}
void ZopfliDeflate(const ZopfliOptions* options, int btype, int final,
const unsigned char* in, size_t insize,
unsigned char* bp, unsigned char** out, size_t* outsize) {
size_t offset = *outsize;
#if ZOPFLI_MASTER_BLOCK_SIZE == 0
ZopfliDeflatePart(options, btype, final, in, 0, insize, bp, out, outsize);
#else
size_t i = 0;
do {
int masterfinal = (i + ZOPFLI_MASTER_BLOCK_SIZE >= insize);
int final2 = final && masterfinal;
size_t size = masterfinal ? insize - i : ZOPFLI_MASTER_BLOCK_SIZE;
ZopfliDeflatePart(options, btype, final2,
in, i, i + size, bp, out, outsize);
i += size;
} while (i < insize);
#endif
if (options->verbose) {
fprintf(stderr,
"Original Size: %lu, Deflate: %lu, Compression: %f%% Removed\n",
(unsigned long)insize, (unsigned long)(*outsize - offset),
100.0 * (double)(insize - (*outsize - offset)) / (double)insize);
}
}

View File

@ -0,0 +1,92 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
#ifndef ZOPFLI_DEFLATE_H_
#define ZOPFLI_DEFLATE_H_
/*
Functions to compress according to the DEFLATE specification, using the
"squeeze" LZ77 compression backend.
*/
#include "lz77.h"
#include "zopfli.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
Compresses according to the deflate specification and append the compressed
result to the output.
This function will usually output multiple deflate blocks. If final is 1, then
the final bit will be set on the last block.
options: global program options
btype: the deflate block type. Use 2 for best compression.
-0: non compressed blocks (00)
-1: blocks with fixed tree (01)
-2: blocks with dynamic tree (10)
final: whether this is the last section of the input, sets the final bit to the
last deflate block.
in: the input bytes
insize: number of input bytes
bp: bit pointer for the output array. This must initially be 0, and for
consecutive calls must be reused (it can have values from 0-7). This is
because deflate appends blocks as bit-based data, rather than on byte
boundaries.
out: pointer to the dynamic output array to which the result is appended. Must
be freed after use.
outsize: pointer to the dynamic output array size.
*/
void ZopfliDeflate(const ZopfliOptions* options, int btype, int final,
const unsigned char* in, size_t insize,
unsigned char* bp, unsigned char** out, size_t* outsize);
/*
Like ZopfliDeflate, but allows to specify start and end byte with instart and
inend. Only that part is compressed, but earlier bytes are still used for the
back window.
*/
void ZopfliDeflatePart(const ZopfliOptions* options, int btype, int final,
const unsigned char* in, size_t instart, size_t inend,
unsigned char* bp, unsigned char** out,
size_t* outsize);
/*
Calculates block size in bits.
litlens: lz77 lit/lengths
dists: ll77 distances
lstart: start of block
lend: end of block (not inclusive)
*/
double ZopfliCalculateBlockSize(const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend, int btype);
/*
Calculates block size in bits, automatically using the best btype.
*/
double ZopfliCalculateBlockSizeAutoType(const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend);
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* ZOPFLI_DEFLATE_H_ */

143
misc/ttf2woff/zopfli/hash.c Normal file
View File

@ -0,0 +1,143 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
#include "hash.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define HASH_SHIFT 5
#define HASH_MASK 32767
void ZopfliAllocHash(size_t window_size, ZopfliHash* h) {
h->head = (int*)malloc(sizeof(*h->head) * 65536);
h->prev = (unsigned short*)malloc(sizeof(*h->prev) * window_size);
h->hashval = (int*)malloc(sizeof(*h->hashval) * window_size);
#ifdef ZOPFLI_HASH_SAME
h->same = (unsigned short*)malloc(sizeof(*h->same) * window_size);
#endif
#ifdef ZOPFLI_HASH_SAME_HASH
h->head2 = (int*)malloc(sizeof(*h->head2) * 65536);
h->prev2 = (unsigned short*)malloc(sizeof(*h->prev2) * window_size);
h->hashval2 = (int*)malloc(sizeof(*h->hashval2) * window_size);
#endif
}
void ZopfliResetHash(size_t window_size, ZopfliHash* h) {
size_t i;
h->val = 0;
for (i = 0; i < 65536; i++) {
h->head[i] = -1; /* -1 indicates no head so far. */
}
for (i = 0; i < window_size; i++) {
h->prev[i] = i; /* If prev[j] == j, then prev[j] is uninitialized. */
h->hashval[i] = -1;
}
#ifdef ZOPFLI_HASH_SAME
for (i = 0; i < window_size; i++) {
h->same[i] = 0;
}
#endif
#ifdef ZOPFLI_HASH_SAME_HASH
h->val2 = 0;
for (i = 0; i < 65536; i++) {
h->head2[i] = -1;
}
for (i = 0; i < window_size; i++) {
h->prev2[i] = i;
h->hashval2[i] = -1;
}
#endif
}
void ZopfliCleanHash(ZopfliHash* h) {
free(h->head);
free(h->prev);
free(h->hashval);
#ifdef ZOPFLI_HASH_SAME_HASH
free(h->head2);
free(h->prev2);
free(h->hashval2);
#endif
#ifdef ZOPFLI_HASH_SAME
free(h->same);
#endif
}
/*
Update the sliding hash value with the given byte. All calls to this function
must be made on consecutive input characters. Since the hash value exists out
of multiple input bytes, a few warmups with this function are needed initially.
*/
static void UpdateHashValue(ZopfliHash* h, unsigned char c) {
h->val = (((h->val) << HASH_SHIFT) ^ (c)) & HASH_MASK;
}
void ZopfliUpdateHash(const unsigned char* array, size_t pos, size_t end,
ZopfliHash* h) {
unsigned short hpos = pos & ZOPFLI_WINDOW_MASK;
#ifdef ZOPFLI_HASH_SAME
size_t amount = 0;
#endif
UpdateHashValue(h, pos + ZOPFLI_MIN_MATCH <= end ?
array[pos + ZOPFLI_MIN_MATCH - 1] : 0);
h->hashval[hpos] = h->val;
if (h->head[h->val] != -1 && h->hashval[h->head[h->val]] == h->val) {
h->prev[hpos] = h->head[h->val];
}
else h->prev[hpos] = hpos;
h->head[h->val] = hpos;
#ifdef ZOPFLI_HASH_SAME
/* Update "same". */
if (h->same[(pos - 1) & ZOPFLI_WINDOW_MASK] > 1) {
amount = h->same[(pos - 1) & ZOPFLI_WINDOW_MASK] - 1;
}
while (pos + amount + 1 < end &&
array[pos] == array[pos + amount + 1] && amount < (unsigned short)(-1)) {
amount++;
}
h->same[hpos] = amount;
#endif
#ifdef ZOPFLI_HASH_SAME_HASH
h->val2 = ((h->same[hpos] - ZOPFLI_MIN_MATCH) & 255) ^ h->val;
h->hashval2[hpos] = h->val2;
if (h->head2[h->val2] != -1 && h->hashval2[h->head2[h->val2]] == h->val2) {
h->prev2[hpos] = h->head2[h->val2];
}
else h->prev2[hpos] = hpos;
h->head2[h->val2] = hpos;
#endif
}
void ZopfliWarmupHash(const unsigned char* array, size_t pos, size_t end,
ZopfliHash* h) {
UpdateHashValue(h, array[pos + 0]);
if (pos + 1 < end) UpdateHashValue(h, array[pos + 1]);
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
/*
The hash for ZopfliFindLongestMatch of lz77.c.
*/
#ifndef ZOPFLI_HASH_H_
#define ZOPFLI_HASH_H_
#include "util.h"
typedef struct ZopfliHash {
int* head; /* Hash value to index of its most recent occurrence. */
unsigned short* prev; /* Index to index of prev. occurrence of same hash. */
int* hashval; /* Index to hash value at this index. */
int val; /* Current hash value. */
#ifdef ZOPFLI_HASH_SAME_HASH
/* Fields with similar purpose as the above hash, but for the second hash with
a value that is calculated differently. */
int* head2; /* Hash value to index of its most recent occurrence. */
unsigned short* prev2; /* Index to index of prev. occurrence of same hash. */
int* hashval2; /* Index to hash value at this index. */
int val2; /* Current hash value. */
#endif
#ifdef ZOPFLI_HASH_SAME
unsigned short* same; /* Amount of repetitions of same byte after this .*/
#endif
} ZopfliHash;
/* Allocates ZopfliHash memory. */
void ZopfliAllocHash(size_t window_size, ZopfliHash* h);
/* Resets all fields of ZopfliHash. */
void ZopfliResetHash(size_t window_size, ZopfliHash* h);
/* Frees ZopfliHash memory. */
void ZopfliCleanHash(ZopfliHash* h);
/*
Updates the hash values based on the current position in the array. All calls
to this must be made for consecutive bytes.
*/
void ZopfliUpdateHash(const unsigned char* array, size_t pos, size_t end,
ZopfliHash* h);
/*
Prepopulates hash:
Fills in the initial values in the hash, before ZopfliUpdateHash can be used
correctly.
*/
void ZopfliWarmupHash(const unsigned char* array, size_t pos, size_t end,
ZopfliHash* h);
#endif /* ZOPFLI_HASH_H_ */

View File

@ -0,0 +1,262 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
/*
Bounded package merge algorithm, based on the paper
"A Fast and Space-Economical Algorithm for Length-Limited Coding
Jyrki Katajainen, Alistair Moffat, Andrew Turpin".
*/
#include "katajainen.h"
#include <assert.h>
#include <stdlib.h>
#include <limits.h>
typedef struct Node Node;
/*
Nodes forming chains. Also used to represent leaves.
*/
struct Node {
size_t weight; /* Total weight (symbol count) of this chain. */
Node* tail; /* Previous node(s) of this chain, or 0 if none. */
int count; /* Leaf symbol index, or number of leaves before this chain. */
};
/*
Memory pool for nodes.
*/
typedef struct NodePool {
Node* next; /* Pointer to a free node in the pool. */
} NodePool;
/*
Initializes a chain node with the given values and marks it as in use.
*/
static void InitNode(size_t weight, int count, Node* tail, Node* node) {
node->weight = weight;
node->count = count;
node->tail = tail;
}
/*
Performs a Boundary Package-Merge step. Puts a new chain in the given list. The
new chain is, depending on the weights, a leaf or a combination of two chains
from the previous list.
lists: The lists of chains.
maxbits: Number of lists.
leaves: The leaves, one per symbol.
numsymbols: Number of leaves.
pool: the node memory pool.
index: The index of the list in which a new chain or leaf is required.
*/
static void BoundaryPM(Node* (*lists)[2], Node* leaves, int numsymbols,
NodePool* pool, int index) {
Node* newchain;
Node* oldchain;
int lastcount = lists[index][1]->count; /* Count of last chain of list. */
if (index == 0 && lastcount >= numsymbols) return;
newchain = pool->next++;
oldchain = lists[index][1];
/* These are set up before the recursive calls below, so that there is a list
pointing to the new node, to let the garbage collection know it's in use. */
lists[index][0] = oldchain;
lists[index][1] = newchain;
if (index == 0) {
/* New leaf node in list 0. */
InitNode(leaves[lastcount].weight, lastcount + 1, 0, newchain);
} else {
size_t sum = lists[index - 1][0]->weight + lists[index - 1][1]->weight;
if (lastcount < numsymbols && sum > leaves[lastcount].weight) {
/* New leaf inserted in list, so count is incremented. */
InitNode(leaves[lastcount].weight, lastcount + 1, oldchain->tail,
newchain);
} else {
InitNode(sum, lastcount, lists[index - 1][1], newchain);
/* Two lookahead chains of previous list used up, create new ones. */
BoundaryPM(lists, leaves, numsymbols, pool, index - 1);
BoundaryPM(lists, leaves, numsymbols, pool, index - 1);
}
}
}
static void BoundaryPMFinal(Node* (*lists)[2],
Node* leaves, int numsymbols, NodePool* pool, int index) {
int lastcount = lists[index][1]->count; /* Count of last chain of list. */
size_t sum = lists[index - 1][0]->weight + lists[index - 1][1]->weight;
if (lastcount < numsymbols && sum > leaves[lastcount].weight) {
Node* newchain = pool->next;
Node* oldchain = lists[index][1]->tail;
lists[index][1] = newchain;
newchain->count = lastcount + 1;
newchain->tail = oldchain;
} else {
lists[index][1]->tail = lists[index - 1][1];
}
}
/*
Initializes each list with as lookahead chains the two leaves with lowest
weights.
*/
static void InitLists(
NodePool* pool, const Node* leaves, int maxbits, Node* (*lists)[2]) {
int i;
Node* node0 = pool->next++;
Node* node1 = pool->next++;
InitNode(leaves[0].weight, 1, 0, node0);
InitNode(leaves[1].weight, 2, 0, node1);
for (i = 0; i < maxbits; i++) {
lists[i][0] = node0;
lists[i][1] = node1;
}
}
/*
Converts result of boundary package-merge to the bitlengths. The result in the
last chain of the last list contains the amount of active leaves in each list.
chain: Chain to extract the bit length from (last chain from last list).
*/
static void ExtractBitLengths(Node* chain, Node* leaves, unsigned* bitlengths) {
int counts[16] = {0};
unsigned end = 16;
unsigned ptr = 15;
unsigned value = 1;
Node* node;
int val;
for (node = chain; node; node = node->tail) {
counts[--end] = node->count;
}
val = counts[15];
while (ptr >= end) {
for (; val > counts[ptr - 1]; val--) {
bitlengths[leaves[val - 1].count] = value;
}
ptr--;
value++;
}
}
/*
Comparator for sorting the leaves. Has the function signature for qsort.
*/
static int LeafComparator(const void* a, const void* b) {
return ((const Node*)a)->weight - ((const Node*)b)->weight;
}
int ZopfliLengthLimitedCodeLengths(
const size_t* frequencies, int n, int maxbits, unsigned* bitlengths) {
NodePool pool;
int i;
int numsymbols = 0; /* Amount of symbols with frequency > 0. */
int numBoundaryPMRuns;
Node* nodes;
/* Array of lists of chains. Each list requires only two lookahead chains at
a time, so each list is a array of two Node*'s. */
Node* (*lists)[2];
/* One leaf per symbol. Only numsymbols leaves will be used. */
Node* leaves = (Node*)malloc(n * sizeof(*leaves));
/* Initialize all bitlengths at 0. */
for (i = 0; i < n; i++) {
bitlengths[i] = 0;
}
/* Count used symbols and place them in the leaves. */
for (i = 0; i < n; i++) {
if (frequencies[i]) {
leaves[numsymbols].weight = frequencies[i];
leaves[numsymbols].count = i; /* Index of symbol this leaf represents. */
numsymbols++;
}
}
/* Check special cases and error conditions. */
if ((1 << maxbits) < numsymbols) {
free(leaves);
return 1; /* Error, too few maxbits to represent symbols. */
}
if (numsymbols == 0) {
free(leaves);
return 0; /* No symbols at all. OK. */
}
if (numsymbols == 1) {
bitlengths[leaves[0].count] = 1;
free(leaves);
return 0; /* Only one symbol, give it bitlength 1, not 0. OK. */
}
if (numsymbols == 2) {
bitlengths[leaves[0].count]++;
bitlengths[leaves[1].count]++;
free(leaves);
return 0;
}
/* Sort the leaves from lightest to heaviest. Add count into the same
variable for stable sorting. */
for (i = 0; i < numsymbols; i++) {
if (leaves[i].weight >=
((size_t)1 << (sizeof(leaves[0].weight) * CHAR_BIT - 9))) {
free(leaves);
return 1; /* Error, we need 9 bits for the count. */
}
leaves[i].weight = (leaves[i].weight << 9) | leaves[i].count;
}
qsort(leaves, numsymbols, sizeof(Node), LeafComparator);
for (i = 0; i < numsymbols; i++) {
leaves[i].weight >>= 9;
}
if (numsymbols - 1 < maxbits) {
maxbits = numsymbols - 1;
}
/* Initialize node memory pool. */
nodes = (Node*)malloc(maxbits * 2 * numsymbols * sizeof(Node));
pool.next = nodes;
lists = (Node* (*)[2])malloc(maxbits * sizeof(*lists));
InitLists(&pool, leaves, maxbits, lists);
/* In the last list, 2 * numsymbols - 2 active chains need to be created. Two
are already created in the initialization. Each BoundaryPM run creates one. */
numBoundaryPMRuns = 2 * numsymbols - 4;
for (i = 0; i < numBoundaryPMRuns - 1; i++) {
BoundaryPM(lists, leaves, numsymbols, &pool, maxbits - 1);
}
BoundaryPMFinal(lists, leaves, numsymbols, &pool, maxbits - 1);
ExtractBitLengths(lists[maxbits - 1][1], leaves, bitlengths);
free(lists);
free(leaves);
free(nodes);
return 0; /* OK. */
}

View File

@ -0,0 +1,42 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
#ifndef ZOPFLI_KATAJAINEN_H_
#define ZOPFLI_KATAJAINEN_H_
#include <string.h>
/*
Outputs minimum-redundancy length-limited code bitlengths for symbols with the
given counts. The bitlengths are limited by maxbits.
The output is tailored for DEFLATE: symbols that never occur, get a bit length
of 0, and if only a single symbol occurs at least once, its bitlength will be 1,
and not 0 as would theoretically be needed for a single symbol.
frequencies: The amount of occurrences of each symbol.
n: The amount of symbols.
maxbits: Maximum bit length, inclusive.
bitlengths: Output, the bitlengths for the symbol prefix codes.
return: 0 for OK, non-0 for error.
*/
int ZopfliLengthLimitedCodeLengths(
const size_t* frequencies, int n, int maxbits, unsigned* bitlengths);
#endif /* ZOPFLI_KATAJAINEN_H_ */

630
misc/ttf2woff/zopfli/lz77.c Normal file
View File

@ -0,0 +1,630 @@
/*
Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: lode.vandevenne@gmail.com (Lode Vandevenne)
Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
*/
#include "lz77.h"
#include "symbols.h"
#include "util.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
void ZopfliInitLZ77Store(const unsigned char* data, ZopfliLZ77Store* store) {
store->size = 0;
store->litlens = 0;
store->dists = 0;
store->pos = 0;
store->data = data;
store->ll_symbol = 0;
store->d_symbol = 0;
store->ll_counts = 0;
store->d_counts = 0;
}
void ZopfliCleanLZ77Store(ZopfliLZ77Store* store) {
free(store->litlens);
free(store->dists);
free(store->pos);
free(store->ll_symbol);
free(store->d_symbol);
free(store->ll_counts);
free(store->d_counts);
}
static size_t CeilDiv(size_t a, size_t b) {
return (a + b - 1) / b;
}
void ZopfliCopyLZ77Store(
const ZopfliLZ77Store* source, ZopfliLZ77Store* dest) {
size_t i;
size_t llsize = ZOPFLI_NUM_LL * CeilDiv(source->size, ZOPFLI_NUM_LL);
size_t dsize = ZOPFLI_NUM_D * CeilDiv(source->size, ZOPFLI_NUM_D);
ZopfliCleanLZ77Store(dest);
ZopfliInitLZ77Store(source->data, dest);
dest->litlens =
(unsigned short*)malloc(sizeof(*dest->litlens) * source->size);
dest->dists = (unsigned short*)malloc(sizeof(*dest->dists) * source->size);
dest->pos = (size_t*)malloc(sizeof(*dest->pos) * source->size);
dest->ll_symbol =
(unsigned short*)malloc(sizeof(*dest->ll_symbol) * source->size);
dest->d_symbol =
(unsigned short*)malloc(sizeof(*dest->d_symbol) * source->size);
dest->ll_counts = (size_t*)malloc(sizeof(*dest->ll_counts) * llsize);
dest->d_counts = (size_t*)malloc(sizeof(*dest->d_counts) * dsize);
/* Allocation failed. */
if (!dest->litlens || !dest->dists) exit(-1);
if (!dest->pos) exit(-1);
if (!dest->ll_symbol || !dest->d_symbol) exit(-1);
if (!dest->ll_counts || !dest->d_counts) exit(-1);
dest->size = source->size;
for (i = 0; i < source->size; i++) {
dest->litlens[i] = source->litlens[i];
dest->dists[i] = source->dists[i];
dest->pos[i] = source->pos[i];
dest->ll_symbol[i] = source->ll_symbol[i];
dest->d_symbol[i] = source->d_symbol[i];
}
for (i = 0; i < llsize; i++) {
dest->ll_counts[i] = source->ll_counts[i];
}
for (i = 0; i < dsize; i++) {
dest->d_counts[i] = source->d_counts[i];
}
}
/*
Appends the length and distance to the LZ77 arrays of the ZopfliLZ77Store.
context must be a ZopfliLZ77Store*.
*/
void ZopfliStoreLitLenDist(unsigned short length, unsigned short dist,
size_t pos, ZopfliLZ77Store* store) {
size_t i;
/* Needed for using ZOPFLI_APPEND_DATA multiple times. */
size_t origsize = store->size;
size_t llstart = ZOPFLI_NUM_LL * (origsize / ZOPFLI_NUM_LL);
size_t dstart = ZOPFLI_NUM_D * (origsize / ZOPFLI_NUM_D);
/* Everytime the index wraps around, a new cumulative histogram is made: we're
keeping one histogram value per LZ77 symbol rather than a full histogram for
each to save memory. */
if (origsize % ZOPFLI_NUM_LL == 0) {
size_t llsize = origsize;
for (i = 0; i < ZOPFLI_NUM_LL; i++) {
ZOPFLI_APPEND_DATA(
origsize == 0 ? 0 : store->ll_counts[origsize - ZOPFLI_NUM_LL + i],
&store->ll_counts, &llsize);
}
}
if (origsize % ZOPFLI_NUM_D == 0) {
size_t dsize = origsize;
for (i = 0; i < ZOPFLI_NUM_D; i++) {
ZOPFLI_APPEND_DATA(
origsize == 0 ? 0 : store->d_counts[origsize - ZOPFLI_NUM_D + i],
&store->d_counts, &dsize);
}
}
ZOPFLI_APPEND_DATA(length, &store->litlens, &store->size);
store->size = origsize;
ZOPFLI_APPEND_DATA(dist, &store->dists, &store->size);
store->size = origsize;
ZOPFLI_APPEND_DATA(pos, &store->pos, &store->size);
assert(length < 259);
if (dist == 0) {
store->size = origsize;
ZOPFLI_APPEND_DATA(length, &store->ll_symbol, &store->size);
store->size = origsize;
ZOPFLI_APPEND_DATA(0, &store->d_symbol, &store->size);
store->ll_counts[llstart + length]++;
} else {
store->size = origsize;
ZOPFLI_APPEND_DATA(ZopfliGetLengthSymbol(length),
&store->ll_symbol, &store->size);
store->size = origsize;
ZOPFLI_APPEND_DATA(ZopfliGetDistSymbol(dist),
&store->d_symbol, &store->size);
store->ll_counts[llstart + ZopfliGetLengthSymbol(length)]++;
store->d_counts[dstart + ZopfliGetDistSymbol(dist)]++;
}
}
void ZopfliAppendLZ77Store(const ZopfliLZ77Store* store,
ZopfliLZ77Store* target) {
size_t i;
for (i = 0; i < store->size; i++) {
ZopfliStoreLitLenDist(store->litlens[i], store->dists[i],
store->pos[i], target);
}
}
size_t ZopfliLZ77GetByteRange(const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend) {
size_t l = lend - 1;
if (lstart == lend) return 0;
return lz77->pos[l] + ((lz77->dists[l] == 0) ?
1 : lz77->litlens[l]) - lz77->pos[lstart];
}
static void ZopfliLZ77GetHistogramAt(const ZopfliLZ77Store* lz77, size_t lpos,
size_t* ll_counts, size_t* d_counts) {
/* The real histogram is created by using the histogram for this chunk, but
all superfluous values of this chunk subtracted. */
size_t llpos = ZOPFLI_NUM_LL * (lpos / ZOPFLI_NUM_LL);
size_t dpos = ZOPFLI_NUM_D * (lpos / ZOPFLI_NUM_D);
size_t i;
for (i = 0; i < ZOPFLI_NUM_LL; i++) {
ll_counts[i] = lz77->ll_counts[llpos + i];
}
for (i = lpos + 1; i < llpos + ZOPFLI_NUM_LL && i < lz77->size; i++) {
ll_counts[lz77->ll_symbol[i]]--;
}
for (i = 0; i < ZOPFLI_NUM_D; i++) {
d_counts[i] = lz77->d_counts[dpos + i];
}
for (i = lpos + 1; i < dpos + ZOPFLI_NUM_D && i < lz77->size; i++) {
if (lz77->dists[i] != 0) d_counts[lz77->d_symbol[i]]--;
}
}
void ZopfliLZ77GetHistogram(const ZopfliLZ77Store* lz77,
size_t lstart, size_t lend,
size_t* ll_counts, size_t* d_counts) {
size_t i;
if (lstart + ZOPFLI_NUM_LL * 3 > lend) {
memset(ll_counts, 0, sizeof(*ll_counts) * ZOPFLI_NUM_LL);
memset(d_counts, 0, sizeof(*d_counts) * ZOPFLI_NUM_D);
for (i = lstart; i < lend; i++) {
ll_counts[lz77->ll_symbol[i]]++;
if (lz77->dists[i] != 0) d_counts[lz77->d_symbol[i]]++;
}
} else {
/* Subtract the cumulative histograms at the end and the start to get the
histogram for this range. */
ZopfliLZ77GetHistogramAt(lz77, lend - 1, ll_counts, d_counts);
if (lstart > 0) {
size_t ll_counts2[ZOPFLI_NUM_LL];
size_t d_counts2[ZOPFLI_NUM_D];
ZopfliLZ77GetHistogramAt(lz77, lstart - 1, ll_counts2, d_counts2);
for (i = 0; i < ZOPFLI_NUM_LL; i++) {
ll_counts[i] -= ll_counts2[i];
}
for (i = 0; i < ZOPFLI_NUM_D; i++) {
d_counts[i] -= d_counts2[i];
}
}
}
}
void ZopfliInitBlockState(const ZopfliOptions* options,
size_t blockstart, size_t blockend, int add_lmc,
ZopfliBlockState* s) {
s->options = options;
s->blockstart = blockstart;
s->blockend = blockend;
#ifdef ZOPFLI_LONGEST_MATCH_CACHE
if (add_lmc) {
s->lmc = (ZopfliLongestMatchCache*)malloc(sizeof(ZopfliLongestMatchCache));
ZopfliInitCache(blockend - blockstart, s->lmc);
} else {
s->lmc = 0;
}
#endif
}
void ZopfliCleanBlockState(ZopfliBlockState* s) {
#ifdef ZOPFLI_LONGEST_MATCH_CACHE
if (s->lmc) {
ZopfliCleanCache(s->lmc);
free(s->lmc);
}
#endif
}
/*
Gets a score of the length given the distance. Typically, the score of the
length is the length itself, but if the distance is very long, decrease the
score of the length a bit to make up for the fact that long distances use large
amounts of extra bits.
This is not an accurate score, it is a heuristic only for the greedy LZ77
implementation. More accurate cost models are employed later. Making this
heuristic more accurate may hurt rather than improve compression.
The two direct uses of this heuristic are:
-avoid using a length of 3 in combination with a long distance. This only has
an effect if length == 3.
-make a slightly better choice between the two options of the lazy matching.
Indirectly, this affects:
-the block split points if the default of block splitting first is used, in a
rather unpredictable way
-the first zopfli run, so it affects the chance of the first run being closer
to the optimal output
*/
static int GetLengthScore(int length, int distance) {
/*
At 1024, the distance uses 9+ extra bits and this seems to be the sweet spot
on tested files.
*/
return distance > 1024 ? length - 1 : length;
}
void ZopfliVerifyLenDist(const unsigned char* data, size_t datasize, size_t pos,
unsigned short dist, unsigned short length) {
/* TODO(lode): make this only run in a debug compile, it's for assert only. */
size_t i;
assert(pos + length <= datasize);
for (i = 0; i < length; i++) {
if (data[pos - dist + i] != data[pos + i]) {
assert(data[pos - dist + i] == data[pos + i]);
break;
}
}
}
/*
Finds how long the match of scan and match is. Can be used to find how many
bytes starting from scan, and from match, are equal. Returns the last byte
after scan, which is still equal to the correspondinb byte after match.
scan is the position to compare
match is the earlier position to compare.
end is the last possible byte, beyond which to stop looking.
safe_end is a few (8) bytes before end, for comparing multiple bytes at once.
*/
static const unsigned char* GetMatch(const unsigned char* scan,
const unsigned char* match,
const unsigned char* end,
const unsigned char* safe_end) {
if (sizeof(size_t) == 8) {
/* 8 checks at once per array bounds check (size_t is 64-bit). */
while (scan < safe_end && *((size_t*)scan) == *((size_t*)match)) {
scan += 8;
match += 8;
}
} else if (sizeof(unsigned int) == 4) {
/* 4 checks at once per array bounds check (unsigned int is 32-bit). */
while (scan < safe_end
&& *((unsigned int*)scan) == *((unsigned int*)match)) {
scan += 4;
match += 4;
}
} else {
/* do 8 checks at once per array bounds check. */
while (scan < safe_end && *scan == *match && *++scan == *++match
&& *++scan == *++match && *++scan == *++match
&& *++scan == *++match && *++scan == *++match
&& *++scan == *++match && *++scan == *++match) {
scan++; match++;
}
}
/* The remaining few bytes. */
while (scan != end && *scan == *match) {
scan++; match++;
}
return scan;
}
#ifdef ZOPFLI_LONGEST_MATCH_CACHE
/*
Gets distance, length and sublen values from the cache if possible.
Returns 1 if it got the values from the cache, 0 if not.
Updates the limit value to a smaller one if possible with more limited
information from the cache.
*/
static int TryGetFromLongestMatchCache(ZopfliBlockState* s,
size_t pos, size_t* limit,
unsigned short* sublen, unsigned short* distance, unsigned short* length) {
/* The LMC cache starts at the beginning of the block rather than the
beginning of the whole array. */
size_t lmcpos = pos - s->blockstart;
/* Length > 0 and dist 0 is invalid combination, which indicates on purpose
that this cache value is not filled in yet. */
unsigned char cache_available = s->lmc && (s->lmc->length[lmcpos] == 0 ||
s->lmc->dist[lmcpos] != 0);
unsigned char limit_ok_for_cache = cache_available &&
(*limit == ZOPFLI_MAX_MATCH || s->lmc->length[lmcpos] <= *limit ||
(sublen && ZopfliMaxCachedSublen(s->lmc,
lmcpos, s->lmc->length[lmcpos]) >= *limit));
if (s->lmc && limit_ok_for_cache && cache_available) {
if (!sublen || s->lmc->length[lmcpos]
<= ZopfliMaxCachedSublen(s->lmc, lmcpos, s->lmc->length[lmcpos])) {
*length = s->lmc->length[lmcpos];
if (*length > *limit) *length = *limit;
if (sublen) {
ZopfliCacheToSublen(s->lmc, lmcpos, *length, sublen);
*distance = sublen[*length];
if (*limit == ZOPFLI_MAX_MATCH && *length >= ZOPFLI_MIN_MATCH) {
assert(sublen[*length] == s->lmc->dist[lmcpos]);
}
} else {
*distance = s->lmc->dist[lmcpos];
}
return 1;
}
/* Can't use much of the cache, since the "sublens" need to be calculated,
but at least we already know when to stop. */
*limit = s->lmc->length[lmcpos];
}
return 0;
}
/*
Stores the found sublen, distance and length in the longest match cache, if
possible.
*/
static void StoreInLongestMatchCache(ZopfliBlockState* s,
size_t pos, size_t limit,
const unsigned short* sublen,
unsigned short distance, unsigned short length) {
/* The LMC cache starts at the beginning of the block rather than the
beginning of the whole array. */
size_t lmcpos = pos - s->blockstart;
/* Length > 0 and dist 0 is invalid combination, which indicates on purpose
that this cache value is not filled in yet. */
unsigned char cache_available = s->lmc && (s->lmc->length[lmcpos] == 0 ||
s->lmc->dist[lmcpos] != 0);
if (s->lmc && limit == ZOPFLI_MAX_MATCH && sublen && !cache_available) {
assert(s->lmc->length[lmcpos] == 1 && s->lmc->dist[lmcpos] == 0);
s->lmc->dist[lmcpos] = length < ZOPFLI_MIN_MATCH ? 0 : distance;
s->lmc->length[lmcpos] = length < ZOPFLI_MIN_MATCH ? 0 : length;
assert(!(s->lmc->length[lmcpos] == 1 && s->lmc->dist[lmcpos] == 0));
ZopfliSublenToCache(sublen, lmcpos, length, s->lmc);
}
}
#endif
void ZopfliFindLongestMatch(ZopfliBlockState* s, const ZopfliHash* h,
const unsigned char* array,
size_t pos, size_t size, size_t limit,
unsigned short* sublen, unsigned short* distance, unsigned short* length) {
unsigned short hpos = pos & ZOPFLI_WINDOW_MASK, p, pp;
unsigned short bestdist = 0;
unsigned short bestlength = 1;
const unsigned char* scan;
const unsigned char* match;
const unsigned char* arrayend;
const unsigned char* arrayend_safe;
#if ZOPFLI_MAX_CHAIN_HITS < ZOPFLI_WINDOW_SIZE
int chain_counter = ZOPFLI_MAX_CHAIN_HITS; /* For quitting early. */
#endif
unsigned dist = 0; /* Not unsigned short on purpose. */
int* hhead = h->head;
unsigned short* hprev = h->prev;
int* hhashval = h->hashval;
int hval = h->val;
#ifdef ZOPFLI_LONGEST_MATCH_CACHE
if (TryGetFromLongestMatchCache(s, pos, &limit, sublen, distance, length)) {
assert(pos + *length <= size);
return;
}
#endif
assert(limit <= ZOPFLI_MAX_MATCH);
assert(limit >= ZOPFLI_MIN_MATCH);
assert(pos < size);
if (size - pos < ZOPFLI_MIN_MATCH) {
/* The rest of the code assumes there are at least ZOPFLI_MIN_MATCH bytes to
try. */
*length = 0;
*distance = 0;
return;
}
if (pos + limit > size) {
limit = size - pos;
}
arrayend = &array[pos] + limit;
arrayend_safe = arrayend - 8;
assert(hval < 65536);
pp = hhead[hval]; /* During the whole loop, p == hprev[pp]. */
p = hprev[pp];
assert(pp == hpos);
dist = p < pp ? pp - p : ((ZOPFLI_WINDOW_SIZE - p) + pp);
/* Go through all distances. */
while (dist < ZOPFLI_WINDOW_SIZE) {
unsigned short currentlength = 0;
assert(p < ZOPFLI_WINDOW_SIZE);
assert(p == hprev[pp]);
assert(hhashval[p] == hval);
if (dist > 0) {
assert(pos < size);
assert(dist <= pos);
scan = &array[pos];
match = &array[pos - dist];
/* Testing the byte at position bestlength first, goes slightly faster. */
if (pos + bestlength >= size
|| *(scan + bestlength) == *(match + bestlength)) {
#ifdef ZOPFLI_HASH_SAME
unsigned short same0 = h->same[pos & ZOPFLI_WINDOW_MASK];
if (same0 > 2 && *scan == *match) {
unsigned short same1 = h->same[(pos - dist) & ZOPFLI_WINDOW_MASK];
unsigned short same = same0 < same1 ? same0 : same1;
if (same > limit) same = limit;
scan += same;
match += same;
}
#endif
scan = GetMatch(scan, match, arrayend, arrayend_safe);
currentlength = scan - &array[pos]; /* The found length. */
}
if (currentlength > bestlength) {
if (sublen) {
unsigned short j;
for (j = bestlength + 1; j <= currentlength; j++) {
sublen[j] = dist;
}
}
bestdist = dist;
bestlength = currentlength;
if (currentlength >= limit) break;
}
}
#ifdef ZOPFLI_HASH_SAME_HASH
/* Switch to the other hash once this will be more efficient. */
if (hhead != h->head2 && bestlength >= h->same[hpos] &&
h->val2 == h->hashval2[p]) {
/* Now use the hash that encodes the length and first byte. */
hhead = h->head2;
hprev = h->prev2;
hhashval = h->hashval2;
hval = h->val2;
}
#endif
pp = p;
p = hprev[p];
if (p == pp) break; /* Uninited prev value. */
dist += p < pp ? pp - p : ((ZOPFLI_WINDOW_SIZE - p) + pp);
#if ZOPFLI_MAX_CHAIN_HITS < ZOPFLI_WINDOW_SIZE
chain_counter--;
if (chain_counter <= 0) break;
#endif
}
#ifdef ZOPFLI_LONGEST_MATCH_CACHE
StoreInLongestMatchCache(s, pos, limit, sublen, bestdist, bestlength);
#endif
assert(bestlength <= limit);
*distance = bestdist;
*length = bestlength;
assert(pos + *length <= size);
}
void ZopfliLZ77Greedy(ZopfliBlockState* s, const unsigned char* in,
size_t instart, size_t inend,
ZopfliLZ77Store* store, ZopfliHash* h) {
size_t i = 0, j;
unsigned short leng;
unsigned short dist;
int lengthscore;
size_t windowstart = instart > ZOPFLI_WINDOW_SIZE
? instart - ZOPFLI_WINDOW_SIZE : 0;
unsigned short dummysublen[259];
#ifdef ZOPFLI_LAZY_MATCHING
/* Lazy matching. */
unsigned prev_length = 0;
unsigned prev_match = 0;
int prevlengthscore;
int match_available = 0;
#endif
if (instart == inend) return;
ZopfliResetHash(ZOPFLI_WINDOW_SIZE, h);
ZopfliWarmupHash(in, windowstart, inend, h);
for (i = windowstart; i < instart; i++) {
ZopfliUpdateHash(in, i, inend, h);
}
for (i = instart; i < inend; i++) {
ZopfliUpdateHash(in, i, inend, h);
ZopfliFindLongestMatch(s, h, in, i, inend, ZOPFLI_MAX_MATCH, dummysublen,
&dist, &leng);
lengthscore = GetLengthScore(leng, dist);
#ifdef ZOPFLI_LAZY_MATCHING
/* Lazy matching. */
prevlengthscore = GetLengthScore(prev_length, prev_match);
if (match_available) {
match_available = 0;
if (lengthscore > prevlengthscore + 1) {
ZopfliStoreLitLenDist(in[i - 1], 0, i - 1, store);
if (lengthscore >= ZOPFLI_MIN_MATCH && leng < ZOPFLI_MAX_MATCH) {
match_available = 1;
prev_length = leng;
prev_match = dist;
continue;
}
} else {
/* Add previous to output. */
leng = prev_length;
dist = prev_match;
lengthscore = prevlengthscore;
/* Add to output. */
ZopfliVerifyLenDist(in, inend, i - 1, dist, leng);
ZopfliStoreLitLenDist(leng, dist, i - 1, store);
for (j = 2; j < leng; j++) {
assert(i < inend);
i++;
ZopfliUpdateHash(in, i, inend, h);
}
continue;
}
}
else if (lengthscore >= ZOPFLI_MIN_MATCH && leng < ZOPFLI_MAX_MATCH) {
match_available = 1;
prev_length = leng;
prev_match = dist;
continue;
}
/* End of lazy matching. */
#endif
/* Add to output. */
if (lengthscore >= ZOPFLI_MIN_MATCH) {
ZopfliVerifyLenDist(in, inend, i, dist, leng);
ZopfliStoreLitLenDist(leng, dist, i, store);
} else {
leng = 1;
ZopfliStoreLitLenDist(in[i], 0, i, store);
}
for (j = 1; j < leng; j++) {
assert(i < inend);
i++;
ZopfliUpdateHash(in, i, inend, h);
}
}
}

Some files were not shown because too many files have changed in this diff Show More