parent
c45b362da5
commit
8d27bd2e77
@ -1,3 +0,0 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
@ -1,6 +0,0 @@
|
||||
<component name="InspectionProjectProfileManager">
|
||||
<settings>
|
||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
||||
<version value="1.0" />
|
||||
</settings>
|
||||
</component>
|
@ -1,10 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="Black">
|
||||
<option name="sdkName" value="Python 3.12 (pythonProject11)" />
|
||||
</component>
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (pythonProject11)" project-jdk-type="Python SDK" />
|
||||
<component name="PyCharmProfessionalAdvertiser">
|
||||
<option name="shown" value="true" />
|
||||
</component>
|
||||
</project>
|
@ -1,8 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/pythonProject11.iml" filepath="$PROJECT_DIR$/.idea/pythonProject11.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
@ -1,16 +0,0 @@
|
||||
# This is a sample Python script.
|
||||
|
||||
# Press Shift+F10 to execute it or replace it with your code.
|
||||
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
|
||||
|
||||
|
||||
def print_hi(name):
|
||||
# Use a breakpoint in the code line below to debug your script.
|
||||
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
|
||||
|
||||
|
||||
# Press the green button in the gutter to run the script.
|
||||
if __name__ == '__main__':
|
||||
print_hi('PyCharm')
|
||||
|
||||
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
pip
|
@ -1,27 +0,0 @@
|
||||
Copyright (c) Django Software Foundation and individual contributors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of Django nor the names of its contributors may be used
|
||||
to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -1,290 +0,0 @@
|
||||
Django is licensed under the three-clause BSD license; see the file
|
||||
LICENSE for details.
|
||||
|
||||
Django includes code from the Python standard library, which is licensed under
|
||||
the Python license, a permissive open source license. The copyright and license
|
||||
is included below for compliance with Python's terms.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2001-present Python Software Foundation; All Rights Reserved
|
||||
|
||||
A. HISTORY OF THE SOFTWARE
|
||||
==========================
|
||||
|
||||
Python was created in the early 1990s by Guido van Rossum at Stichting
|
||||
Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
|
||||
as a successor of a language called ABC. Guido remains Python's
|
||||
principal author, although it includes many contributions from others.
|
||||
|
||||
In 1995, Guido continued his work on Python at the Corporation for
|
||||
National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
|
||||
in Reston, Virginia where he released several versions of the
|
||||
software.
|
||||
|
||||
In May 2000, Guido and the Python core development team moved to
|
||||
BeOpen.com to form the BeOpen PythonLabs team. In October of the same
|
||||
year, the PythonLabs team moved to Digital Creations, which became
|
||||
Zope Corporation. In 2001, the Python Software Foundation (PSF, see
|
||||
https://www.python.org/psf/) was formed, a non-profit organization
|
||||
created specifically to own Python-related Intellectual Property.
|
||||
Zope Corporation was a sponsoring member of the PSF.
|
||||
|
||||
All Python releases are Open Source (see http://www.opensource.org for
|
||||
the Open Source Definition). Historically, most, but not all, Python
|
||||
releases have also been GPL-compatible; the table below summarizes
|
||||
the various releases.
|
||||
|
||||
Release Derived Year Owner GPL-
|
||||
from compatible? (1)
|
||||
|
||||
0.9.0 thru 1.2 1991-1995 CWI yes
|
||||
1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
|
||||
1.6 1.5.2 2000 CNRI no
|
||||
2.0 1.6 2000 BeOpen.com no
|
||||
1.6.1 1.6 2001 CNRI yes (2)
|
||||
2.1 2.0+1.6.1 2001 PSF no
|
||||
2.0.1 2.0+1.6.1 2001 PSF yes
|
||||
2.1.1 2.1+2.0.1 2001 PSF yes
|
||||
2.1.2 2.1.1 2002 PSF yes
|
||||
2.1.3 2.1.2 2002 PSF yes
|
||||
2.2 and above 2.1.1 2001-now PSF yes
|
||||
|
||||
Footnotes:
|
||||
|
||||
(1) GPL-compatible doesn't mean that we're distributing Python under
|
||||
the GPL. All Python licenses, unlike the GPL, let you distribute
|
||||
a modified version without making your changes open source. The
|
||||
GPL-compatible licenses make it possible to combine Python with
|
||||
other software that is released under the GPL; the others don't.
|
||||
|
||||
(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
|
||||
because its license has a choice of law clause. According to
|
||||
CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
|
||||
is "not incompatible" with the GPL.
|
||||
|
||||
Thanks to the many outside volunteers who have worked under Guido's
|
||||
direction to make these releases possible.
|
||||
|
||||
|
||||
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
|
||||
===============================================================
|
||||
|
||||
Python software and documentation are licensed under the
|
||||
Python Software Foundation License Version 2.
|
||||
|
||||
Starting with Python 3.8.6, examples, recipes, and other code in
|
||||
the documentation are dual licensed under the PSF License Version 2
|
||||
and the Zero-Clause BSD license.
|
||||
|
||||
Some software incorporated into Python is under different licenses.
|
||||
The licenses are listed with code falling under that license.
|
||||
|
||||
|
||||
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
--------------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
otherwise using this software ("Python") in source or binary form and
|
||||
its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
distribute, and otherwise use Python alone or in any derivative version,
|
||||
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation;
|
||||
All Rights Reserved" are retained in Python alone or in any derivative version
|
||||
prepared by Licensee.
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python.
|
||||
|
||||
4. PSF is making Python available to Licensee on an "AS IS"
|
||||
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. Nothing in this License Agreement shall be deemed to create any
|
||||
relationship of agency, partnership, or joint venture between PSF and
|
||||
Licensee. This License Agreement does not grant permission to use PSF
|
||||
trademarks or trade name in a trademark sense to endorse or promote
|
||||
products or services of Licensee, or any third party.
|
||||
|
||||
8. By copying, installing or otherwise using Python, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
|
||||
|
||||
BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
|
||||
-------------------------------------------
|
||||
|
||||
BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
|
||||
|
||||
1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
|
||||
office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
|
||||
Individual or Organization ("Licensee") accessing and otherwise using
|
||||
this software in source or binary form and its associated
|
||||
documentation ("the Software").
|
||||
|
||||
2. Subject to the terms and conditions of this BeOpen Python License
|
||||
Agreement, BeOpen hereby grants Licensee a non-exclusive,
|
||||
royalty-free, world-wide license to reproduce, analyze, test, perform
|
||||
and/or display publicly, prepare derivative works, distribute, and
|
||||
otherwise use the Software alone or in any derivative version,
|
||||
provided, however, that the BeOpen Python License is retained in the
|
||||
Software, alone or in any derivative version prepared by Licensee.
|
||||
|
||||
3. BeOpen is making the Software available to Licensee on an "AS IS"
|
||||
basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
|
||||
SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
|
||||
AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
|
||||
DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
5. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
6. This License Agreement shall be governed by and interpreted in all
|
||||
respects by the law of the State of California, excluding conflict of
|
||||
law provisions. Nothing in this License Agreement shall be deemed to
|
||||
create any relationship of agency, partnership, or joint venture
|
||||
between BeOpen and Licensee. This License Agreement does not grant
|
||||
permission to use BeOpen trademarks or trade names in a trademark
|
||||
sense to endorse or promote products or services of Licensee, or any
|
||||
third party. As an exception, the "BeOpen Python" logos available at
|
||||
http://www.pythonlabs.com/logos.html may be used according to the
|
||||
permissions granted on that web page.
|
||||
|
||||
7. By copying, installing or otherwise using the software, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
|
||||
|
||||
CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
|
||||
---------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Corporation for National
|
||||
Research Initiatives, having an office at 1895 Preston White Drive,
|
||||
Reston, VA 20191 ("CNRI"), and the Individual or Organization
|
||||
("Licensee") accessing and otherwise using Python 1.6.1 software in
|
||||
source or binary form and its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, CNRI
|
||||
hereby grants Licensee a nonexclusive, royalty-free, world-wide
|
||||
license to reproduce, analyze, test, perform and/or display publicly,
|
||||
prepare derivative works, distribute, and otherwise use Python 1.6.1
|
||||
alone or in any derivative version, provided, however, that CNRI's
|
||||
License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
|
||||
1995-2001 Corporation for National Research Initiatives; All Rights
|
||||
Reserved" are retained in Python 1.6.1 alone or in any derivative
|
||||
version prepared by Licensee. Alternately, in lieu of CNRI's License
|
||||
Agreement, Licensee may substitute the following text (omitting the
|
||||
quotes): "Python 1.6.1 is made available subject to the terms and
|
||||
conditions in CNRI's License Agreement. This Agreement together with
|
||||
Python 1.6.1 may be located on the internet using the following
|
||||
unique, persistent identifier (known as a handle): 1895.22/1013. This
|
||||
Agreement may also be obtained from a proxy server on the internet
|
||||
using the following URL: http://hdl.handle.net/1895.22/1013".
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python 1.6.1 or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python 1.6.1.
|
||||
|
||||
4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
|
||||
basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. This License Agreement shall be governed by the federal
|
||||
intellectual property law of the United States, including without
|
||||
limitation the federal copyright law, and, to the extent such
|
||||
U.S. federal law does not apply, by the law of the Commonwealth of
|
||||
Virginia, excluding Virginia's conflict of law provisions.
|
||||
Notwithstanding the foregoing, with regard to derivative works based
|
||||
on Python 1.6.1 that incorporate non-separable material that was
|
||||
previously distributed under the GNU General Public License (GPL), the
|
||||
law of the Commonwealth of Virginia shall govern this License
|
||||
Agreement only as to issues arising under or with respect to
|
||||
Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
|
||||
License Agreement shall be deemed to create any relationship of
|
||||
agency, partnership, or joint venture between CNRI and Licensee. This
|
||||
License Agreement does not grant permission to use CNRI trademarks or
|
||||
trade name in a trademark sense to endorse or promote products or
|
||||
services of Licensee, or any third party.
|
||||
|
||||
8. By clicking on the "ACCEPT" button where indicated, or by copying,
|
||||
installing or otherwise using Python 1.6.1, Licensee agrees to be
|
||||
bound by the terms and conditions of this License Agreement.
|
||||
|
||||
ACCEPT
|
||||
|
||||
|
||||
CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
|
||||
--------------------------------------------------
|
||||
|
||||
Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
|
||||
The Netherlands. All rights reserved.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software and its
|
||||
documentation for any purpose and without fee is hereby granted,
|
||||
provided that the above copyright notice appear in all copies and that
|
||||
both that copyright notice and this permission notice appear in
|
||||
supporting documentation, and that the name of Stichting Mathematisch
|
||||
Centrum or CWI not be used in advertising or publicity pertaining to
|
||||
distribution of the software without specific, written prior
|
||||
permission.
|
||||
|
||||
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
|
||||
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
|
||||
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
@ -1,102 +0,0 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: Django
|
||||
Version: 4.2.11
|
||||
Summary: A high-level Python web framework that encourages rapid development and clean, pragmatic design.
|
||||
Home-page: https://www.djangoproject.com/
|
||||
Author: Django Software Foundation
|
||||
Author-email: foundation@djangoproject.com
|
||||
License: BSD-3-Clause
|
||||
Project-URL: Documentation, https://docs.djangoproject.com/
|
||||
Project-URL: Release notes, https://docs.djangoproject.com/en/stable/releases/
|
||||
Project-URL: Funding, https://www.djangoproject.com/fundraising/
|
||||
Project-URL: Source, https://github.com/django/django
|
||||
Project-URL: Tracker, https://code.djangoproject.com/
|
||||
Platform: UNKNOWN
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Web Environment
|
||||
Classifier: Framework :: Django
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Topic :: Internet :: WWW/HTTP
|
||||
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
|
||||
Classifier: Topic :: Internet :: WWW/HTTP :: WSGI
|
||||
Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Requires-Python: >=3.8
|
||||
License-File: LICENSE
|
||||
License-File: LICENSE.python
|
||||
License-File: AUTHORS
|
||||
Requires-Dist: asgiref (<4,>=3.6.0)
|
||||
Requires-Dist: sqlparse (>=0.3.1)
|
||||
Requires-Dist: backports.zoneinfo ; python_version < "3.9"
|
||||
Requires-Dist: tzdata ; sys_platform == "win32"
|
||||
Provides-Extra: argon2
|
||||
Requires-Dist: argon2-cffi (>=19.1.0) ; extra == 'argon2'
|
||||
Provides-Extra: bcrypt
|
||||
Requires-Dist: bcrypt ; extra == 'bcrypt'
|
||||
|
||||
======
|
||||
Django
|
||||
======
|
||||
|
||||
Django is a high-level Python web framework that encourages rapid development
|
||||
and clean, pragmatic design. Thanks for checking it out.
|
||||
|
||||
All documentation is in the "``docs``" directory and online at
|
||||
https://docs.djangoproject.com/en/stable/. If you're just getting started,
|
||||
here's how we recommend you read the docs:
|
||||
|
||||
* First, read ``docs/intro/install.txt`` for instructions on installing Django.
|
||||
|
||||
* Next, work through the tutorials in order (``docs/intro/tutorial01.txt``,
|
||||
``docs/intro/tutorial02.txt``, etc.).
|
||||
|
||||
* If you want to set up an actual deployment server, read
|
||||
``docs/howto/deployment/index.txt`` for instructions.
|
||||
|
||||
* You'll probably want to read through the topical guides (in ``docs/topics``)
|
||||
next; from there you can jump to the HOWTOs (in ``docs/howto``) for specific
|
||||
problems, and check out the reference (``docs/ref``) for gory details.
|
||||
|
||||
* See ``docs/README`` for instructions on building an HTML version of the docs.
|
||||
|
||||
Docs are updated rigorously. If you find any problems in the docs, or think
|
||||
they should be clarified in any way, please take 30 seconds to fill out a
|
||||
ticket here: https://code.djangoproject.com/newticket
|
||||
|
||||
To get more help:
|
||||
|
||||
* Join the ``#django`` channel on ``irc.libera.chat``. Lots of helpful people
|
||||
hang out there. See https://web.libera.chat if you're new to IRC.
|
||||
|
||||
* Join the django-users mailing list, or read the archives, at
|
||||
https://groups.google.com/group/django-users.
|
||||
|
||||
To contribute to Django:
|
||||
|
||||
* Check out https://docs.djangoproject.com/en/dev/internals/contributing/ for
|
||||
information about getting involved.
|
||||
|
||||
To run Django's test suite:
|
||||
|
||||
* Follow the instructions in the "Unit tests" section of
|
||||
``docs/internals/contributing/writing-code/unit-tests.txt``, published online at
|
||||
https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/unit-tests/#running-the-unit-tests
|
||||
|
||||
Supporting the Development of Django
|
||||
====================================
|
||||
|
||||
Django's development depends on your contributions.
|
||||
|
||||
If you depend on Django, remember to support the Django Software Foundation: https://www.djangoproject.com/fundraising/
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +0,0 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.37.1)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
@ -1,3 +0,0 @@
|
||||
[console_scripts]
|
||||
django-admin = django.core.management:execute_from_command_line
|
||||
|
@ -1 +0,0 @@
|
||||
django
|
@ -1,168 +0,0 @@
|
||||
"""
|
||||
MySQLdb - A DB API v2.0 compatible interface to MySQL.
|
||||
|
||||
This package is a wrapper around _mysql, which mostly implements the
|
||||
MySQL C API.
|
||||
|
||||
connect() -- connects to server
|
||||
|
||||
See the C API specification and the MySQL documentation for more info
|
||||
on other items.
|
||||
|
||||
For information on how MySQLdb handles type conversion, see the
|
||||
MySQLdb.converters module.
|
||||
"""
|
||||
|
||||
from .release import version_info
|
||||
from . import _mysql
|
||||
|
||||
if version_info != _mysql.version_info:
|
||||
raise ImportError(
|
||||
f"this is MySQLdb version {version_info}, "
|
||||
f"but _mysql is version {_mysql.version_info!r}\n"
|
||||
f"_mysql: {_mysql.__file__!r}"
|
||||
)
|
||||
|
||||
|
||||
from ._mysql import (
|
||||
NotSupportedError,
|
||||
OperationalError,
|
||||
get_client_info,
|
||||
ProgrammingError,
|
||||
Error,
|
||||
InterfaceError,
|
||||
debug,
|
||||
IntegrityError,
|
||||
string_literal,
|
||||
MySQLError,
|
||||
DataError,
|
||||
DatabaseError,
|
||||
InternalError,
|
||||
Warning,
|
||||
)
|
||||
from MySQLdb.constants import FIELD_TYPE
|
||||
from MySQLdb.times import (
|
||||
Date,
|
||||
Time,
|
||||
Timestamp,
|
||||
DateFromTicks,
|
||||
TimeFromTicks,
|
||||
TimestampFromTicks,
|
||||
)
|
||||
|
||||
threadsafety = 1
|
||||
apilevel = "2.0"
|
||||
paramstyle = "format"
|
||||
|
||||
|
||||
class DBAPISet(frozenset):
|
||||
"""A special type of set for which A == x is true if A is a
|
||||
DBAPISet and x is a member of that set."""
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, DBAPISet):
|
||||
return not self.difference(other)
|
||||
return other in self
|
||||
|
||||
|
||||
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING, FIELD_TYPE.VAR_STRING])
|
||||
BINARY = DBAPISet(
|
||||
[
|
||||
FIELD_TYPE.BLOB,
|
||||
FIELD_TYPE.LONG_BLOB,
|
||||
FIELD_TYPE.MEDIUM_BLOB,
|
||||
FIELD_TYPE.TINY_BLOB,
|
||||
]
|
||||
)
|
||||
NUMBER = DBAPISet(
|
||||
[
|
||||
FIELD_TYPE.DECIMAL,
|
||||
FIELD_TYPE.DOUBLE,
|
||||
FIELD_TYPE.FLOAT,
|
||||
FIELD_TYPE.INT24,
|
||||
FIELD_TYPE.LONG,
|
||||
FIELD_TYPE.LONGLONG,
|
||||
FIELD_TYPE.TINY,
|
||||
FIELD_TYPE.YEAR,
|
||||
FIELD_TYPE.NEWDECIMAL,
|
||||
]
|
||||
)
|
||||
DATE = DBAPISet([FIELD_TYPE.DATE])
|
||||
TIME = DBAPISet([FIELD_TYPE.TIME])
|
||||
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
|
||||
DATETIME = TIMESTAMP
|
||||
ROWID = DBAPISet()
|
||||
|
||||
|
||||
def test_DBAPISet_set_equality():
|
||||
assert STRING == STRING
|
||||
|
||||
|
||||
def test_DBAPISet_set_inequality():
|
||||
assert STRING != NUMBER
|
||||
|
||||
|
||||
def test_DBAPISet_set_equality_membership():
|
||||
assert FIELD_TYPE.VAR_STRING == STRING
|
||||
|
||||
|
||||
def test_DBAPISet_set_inequality_membership():
|
||||
assert FIELD_TYPE.DATE != STRING
|
||||
|
||||
|
||||
def Binary(x):
|
||||
return bytes(x)
|
||||
|
||||
|
||||
def Connect(*args, **kwargs):
|
||||
"""Factory function for connections.Connection."""
|
||||
from MySQLdb.connections import Connection
|
||||
|
||||
return Connection(*args, **kwargs)
|
||||
|
||||
|
||||
connect = Connection = Connect
|
||||
|
||||
__all__ = [
|
||||
"BINARY",
|
||||
"Binary",
|
||||
"Connect",
|
||||
"Connection",
|
||||
"DATE",
|
||||
"Date",
|
||||
"Time",
|
||||
"Timestamp",
|
||||
"DateFromTicks",
|
||||
"TimeFromTicks",
|
||||
"TimestampFromTicks",
|
||||
"DataError",
|
||||
"DatabaseError",
|
||||
"Error",
|
||||
"FIELD_TYPE",
|
||||
"IntegrityError",
|
||||
"InterfaceError",
|
||||
"InternalError",
|
||||
"MySQLError",
|
||||
"NUMBER",
|
||||
"NotSupportedError",
|
||||
"DBAPISet",
|
||||
"OperationalError",
|
||||
"ProgrammingError",
|
||||
"ROWID",
|
||||
"STRING",
|
||||
"TIME",
|
||||
"TIMESTAMP",
|
||||
"Warning",
|
||||
"apilevel",
|
||||
"connect",
|
||||
"connections",
|
||||
"constants",
|
||||
"converters",
|
||||
"cursors",
|
||||
"debug",
|
||||
"get_client_info",
|
||||
"paramstyle",
|
||||
"string_literal",
|
||||
"threadsafety",
|
||||
"version_info",
|
||||
]
|
@ -1,91 +0,0 @@
|
||||
"""Exception classes for _mysql and MySQLdb.
|
||||
|
||||
These classes are dictated by the DB API v2.0:
|
||||
|
||||
https://www.python.org/dev/peps/pep-0249/
|
||||
"""
|
||||
|
||||
|
||||
class MySQLError(Exception):
|
||||
"""Exception related to operation with MySQL."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class Warning(Warning, MySQLError):
|
||||
"""Exception raised for important warnings like data truncations
|
||||
while inserting, etc."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class Error(MySQLError):
|
||||
"""Exception that is the base class of all other error exceptions
|
||||
(not Warning)."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class InterfaceError(Error):
|
||||
"""Exception raised for errors that are related to the database
|
||||
interface rather than the database itself."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class DatabaseError(Error):
|
||||
"""Exception raised for errors that are related to the
|
||||
database."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class DataError(DatabaseError):
|
||||
"""Exception raised for errors that are due to problems with the
|
||||
processed data like division by zero, numeric value out of range,
|
||||
etc."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class OperationalError(DatabaseError):
|
||||
"""Exception raised for errors that are related to the database's
|
||||
operation and not necessarily under the control of the programmer,
|
||||
e.g. an unexpected disconnect occurs, the data source name is not
|
||||
found, a transaction could not be processed, a memory allocation
|
||||
error occurred during processing, etc."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class IntegrityError(DatabaseError):
|
||||
"""Exception raised when the relational integrity of the database
|
||||
is affected, e.g. a foreign key check fails, duplicate key,
|
||||
etc."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class InternalError(DatabaseError):
|
||||
"""Exception raised when the database encounters an internal
|
||||
error, e.g. the cursor is not valid anymore, the transaction is
|
||||
out of sync, etc."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class ProgrammingError(DatabaseError):
|
||||
"""Exception raised for programming errors, e.g. table not found
|
||||
or already exists, syntax error in the SQL statement, wrong number
|
||||
of parameters specified, etc."""
|
||||
|
||||
__module__ = "MySQLdb"
|
||||
|
||||
|
||||
class NotSupportedError(DatabaseError):
|
||||
"""Exception raised in case a method or database API was used
|
||||
which is not supported by the database, e.g. requesting a
|
||||
.rollback() on a connection that does not support transaction or
|
||||
has transactions turned off."""
|
||||
|
||||
__module__ = "MySQLdb"
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@ -1,343 +0,0 @@
|
||||
"""
|
||||
This module implements connections for MySQLdb. Presently there is
|
||||
only one class: Connection. Others are unlikely. However, you might
|
||||
want to make your own subclasses. In most cases, you will probably
|
||||
override Connection.default_cursor with a non-standard Cursor class.
|
||||
"""
|
||||
import re
|
||||
|
||||
from . import cursors, _mysql
|
||||
from ._exceptions import (
|
||||
Warning,
|
||||
Error,
|
||||
InterfaceError,
|
||||
DataError,
|
||||
DatabaseError,
|
||||
OperationalError,
|
||||
IntegrityError,
|
||||
InternalError,
|
||||
NotSupportedError,
|
||||
ProgrammingError,
|
||||
)
|
||||
|
||||
# Mapping from MySQL charset name to Python codec name
|
||||
_charset_to_encoding = {
|
||||
"utf8mb4": "utf8",
|
||||
"utf8mb3": "utf8",
|
||||
"latin1": "cp1252",
|
||||
"koi8r": "koi8_r",
|
||||
"koi8u": "koi8_u",
|
||||
}
|
||||
|
||||
re_numeric_part = re.compile(r"^(\d+)")
|
||||
|
||||
|
||||
def numeric_part(s):
|
||||
"""Returns the leading numeric part of a string.
|
||||
|
||||
>>> numeric_part("20-alpha")
|
||||
20
|
||||
>>> numeric_part("foo")
|
||||
>>> numeric_part("16b")
|
||||
16
|
||||
"""
|
||||
|
||||
m = re_numeric_part.match(s)
|
||||
if m:
|
||||
return int(m.group(1))
|
||||
return None
|
||||
|
||||
|
||||
class Connection(_mysql.connection):
|
||||
"""MySQL Database Connection Object"""
|
||||
|
||||
default_cursor = cursors.Cursor
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
Create a connection to the database. It is strongly recommended
|
||||
that you only use keyword parameters. Consult the MySQL C API
|
||||
documentation for more information.
|
||||
|
||||
:param str host: host to connect
|
||||
:param str user: user to connect as
|
||||
:param str password: password to use
|
||||
:param str passwd: alias of password (deprecated)
|
||||
:param str database: database to use
|
||||
:param str db: alias of database (deprecated)
|
||||
:param int port: TCP/IP port to connect to
|
||||
:param str unix_socket: location of unix_socket to use
|
||||
:param dict conv: conversion dictionary, see MySQLdb.converters
|
||||
:param int connect_timeout:
|
||||
number of seconds to wait before the connection attempt fails.
|
||||
|
||||
:param bool compress: if set, compression is enabled
|
||||
:param str named_pipe: if set, a named pipe is used to connect (Windows only)
|
||||
:param str init_command:
|
||||
command which is run once the connection is created
|
||||
|
||||
:param str read_default_file:
|
||||
file from which default client values are read
|
||||
|
||||
:param str read_default_group:
|
||||
configuration group to use from the default file
|
||||
|
||||
:param type cursorclass:
|
||||
class object, used to create cursors (keyword only)
|
||||
|
||||
:param bool use_unicode:
|
||||
If True, text-like columns are returned as unicode objects
|
||||
using the connection's character set. Otherwise, text-like
|
||||
columns are returned as bytes. Unicode objects will always
|
||||
be encoded to the connection's character set regardless of
|
||||
this setting.
|
||||
Default to True.
|
||||
|
||||
:param str charset:
|
||||
If supplied, the connection character set will be changed
|
||||
to this character set.
|
||||
|
||||
:param str collation:
|
||||
If ``charset`` and ``collation`` are both supplied, the
|
||||
character set and collation for the current connection
|
||||
will be set.
|
||||
|
||||
If omitted, empty string, or None, the default collation
|
||||
for the ``charset`` is implied.
|
||||
|
||||
:param str auth_plugin:
|
||||
If supplied, the connection default authentication plugin will be
|
||||
changed to this value. Example values:
|
||||
`mysql_native_password` or `caching_sha2_password`
|
||||
|
||||
:param str sql_mode:
|
||||
If supplied, the session SQL mode will be changed to this
|
||||
setting.
|
||||
For more details and legal values, see the MySQL documentation.
|
||||
|
||||
:param int client_flag:
|
||||
flags to use or 0 (see MySQL docs or constants/CLIENTS.py)
|
||||
|
||||
:param bool multi_statements:
|
||||
If True, enable multi statements for clients >= 4.1.
|
||||
Defaults to True.
|
||||
|
||||
:param str ssl_mode:
|
||||
specify the security settings for connection to the server;
|
||||
see the MySQL documentation for more details
|
||||
(mysql_option(), MYSQL_OPT_SSL_MODE).
|
||||
Only one of 'DISABLED', 'PREFERRED', 'REQUIRED',
|
||||
'VERIFY_CA', 'VERIFY_IDENTITY' can be specified.
|
||||
|
||||
:param dict ssl:
|
||||
dictionary or mapping contains SSL connection parameters;
|
||||
see the MySQL documentation for more details
|
||||
(mysql_ssl_set()). If this is set, and the client does not
|
||||
support SSL, NotSupportedError will be raised.
|
||||
Since mysqlclient 2.2.4, ssl=True is alias of ssl_mode=REQUIRED
|
||||
for better compatibility with PyMySQL and MariaDB.
|
||||
|
||||
:param bool local_infile:
|
||||
enables LOAD LOCAL INFILE; zero disables
|
||||
|
||||
:param bool autocommit:
|
||||
If False (default), autocommit is disabled.
|
||||
If True, autocommit is enabled.
|
||||
If None, autocommit isn't set and server default is used.
|
||||
|
||||
:param bool binary_prefix:
|
||||
If set, the '_binary' prefix will be used for raw byte query
|
||||
arguments (e.g. Binary). This is disabled by default.
|
||||
|
||||
There are a number of undocumented, non-standard methods. See the
|
||||
documentation for the MySQL C API for some hints on what they do.
|
||||
"""
|
||||
from MySQLdb.constants import CLIENT, FIELD_TYPE
|
||||
from MySQLdb.converters import conversions, _bytes_or_str
|
||||
|
||||
kwargs2 = kwargs.copy()
|
||||
|
||||
if "db" in kwargs2:
|
||||
kwargs2["database"] = kwargs2.pop("db")
|
||||
if "passwd" in kwargs2:
|
||||
kwargs2["password"] = kwargs2.pop("passwd")
|
||||
|
||||
if "conv" in kwargs:
|
||||
conv = kwargs["conv"]
|
||||
else:
|
||||
conv = conversions
|
||||
|
||||
conv2 = {}
|
||||
for k, v in conv.items():
|
||||
if isinstance(k, int) and isinstance(v, list):
|
||||
conv2[k] = v[:]
|
||||
else:
|
||||
conv2[k] = v
|
||||
kwargs2["conv"] = conv2
|
||||
|
||||
cursorclass = kwargs2.pop("cursorclass", self.default_cursor)
|
||||
charset = kwargs2.get("charset", "")
|
||||
collation = kwargs2.pop("collation", "")
|
||||
use_unicode = kwargs2.pop("use_unicode", True)
|
||||
sql_mode = kwargs2.pop("sql_mode", "")
|
||||
self._binary_prefix = kwargs2.pop("binary_prefix", False)
|
||||
|
||||
client_flag = kwargs.get("client_flag", 0)
|
||||
client_flag |= CLIENT.MULTI_RESULTS
|
||||
multi_statements = kwargs2.pop("multi_statements", True)
|
||||
if multi_statements:
|
||||
client_flag |= CLIENT.MULTI_STATEMENTS
|
||||
kwargs2["client_flag"] = client_flag
|
||||
|
||||
# PEP-249 requires autocommit to be initially off
|
||||
autocommit = kwargs2.pop("autocommit", False)
|
||||
|
||||
super().__init__(*args, **kwargs2)
|
||||
self.cursorclass = cursorclass
|
||||
self.encoders = {
|
||||
k: v
|
||||
for k, v in conv.items()
|
||||
if type(k) is not int # noqa: E721
|
||||
}
|
||||
|
||||
self._server_version = tuple(
|
||||
[numeric_part(n) for n in self.get_server_info().split(".")[:2]]
|
||||
)
|
||||
|
||||
self.encoding = "ascii" # overridden in set_character_set()
|
||||
|
||||
if not charset:
|
||||
charset = self.character_set_name()
|
||||
self.set_character_set(charset, collation)
|
||||
|
||||
if sql_mode:
|
||||
self.set_sql_mode(sql_mode)
|
||||
|
||||
if use_unicode:
|
||||
for t in (
|
||||
FIELD_TYPE.STRING,
|
||||
FIELD_TYPE.VAR_STRING,
|
||||
FIELD_TYPE.VARCHAR,
|
||||
FIELD_TYPE.TINY_BLOB,
|
||||
FIELD_TYPE.MEDIUM_BLOB,
|
||||
FIELD_TYPE.LONG_BLOB,
|
||||
FIELD_TYPE.BLOB,
|
||||
):
|
||||
self.converter[t] = _bytes_or_str
|
||||
# Unlike other string/blob types, JSON is always text.
|
||||
# MySQL may return JSON with charset==binary.
|
||||
self.converter[FIELD_TYPE.JSON] = str
|
||||
|
||||
self._transactional = self.server_capabilities & CLIENT.TRANSACTIONS
|
||||
if self._transactional:
|
||||
if autocommit is not None:
|
||||
self.autocommit(autocommit)
|
||||
self.messages = []
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
|
||||
def autocommit(self, on):
|
||||
on = bool(on)
|
||||
if self.get_autocommit() != on:
|
||||
_mysql.connection.autocommit(self, on)
|
||||
|
||||
def cursor(self, cursorclass=None):
|
||||
"""
|
||||
Create a cursor on which queries may be performed. The
|
||||
optional cursorclass parameter is used to create the
|
||||
Cursor. By default, self.cursorclass=cursors.Cursor is
|
||||
used.
|
||||
"""
|
||||
return (cursorclass or self.cursorclass)(self)
|
||||
|
||||
def query(self, query):
|
||||
# Since _mysql releases GIL while querying, we need immutable buffer.
|
||||
if isinstance(query, bytearray):
|
||||
query = bytes(query)
|
||||
_mysql.connection.query(self, query)
|
||||
|
||||
def _bytes_literal(self, bs):
|
||||
assert isinstance(bs, (bytes, bytearray))
|
||||
x = self.string_literal(bs) # x is escaped and quoted bytes
|
||||
if self._binary_prefix:
|
||||
return b"_binary" + x
|
||||
return x
|
||||
|
||||
def _tuple_literal(self, t):
|
||||
return b"(%s)" % (b",".join(map(self.literal, t)))
|
||||
|
||||
def literal(self, o):
|
||||
"""If o is a single object, returns an SQL literal as a string.
|
||||
If o is a non-string sequence, the items of the sequence are
|
||||
converted and returned as a sequence.
|
||||
|
||||
Non-standard. For internal use; do not use this in your
|
||||
applications.
|
||||
"""
|
||||
if isinstance(o, str):
|
||||
s = self.string_literal(o.encode(self.encoding))
|
||||
elif isinstance(o, bytearray):
|
||||
s = self._bytes_literal(o)
|
||||
elif isinstance(o, bytes):
|
||||
s = self._bytes_literal(o)
|
||||
elif isinstance(o, (tuple, list)):
|
||||
s = self._tuple_literal(o)
|
||||
else:
|
||||
s = self.escape(o, self.encoders)
|
||||
if isinstance(s, str):
|
||||
s = s.encode(self.encoding)
|
||||
assert isinstance(s, bytes)
|
||||
return s
|
||||
|
||||
def begin(self):
|
||||
"""Explicitly begin a connection.
|
||||
|
||||
This method is not used when autocommit=False (default).
|
||||
"""
|
||||
self.query(b"BEGIN")
|
||||
|
||||
def set_character_set(self, charset, collation=None):
|
||||
"""Set the connection character set to charset."""
|
||||
super().set_character_set(charset)
|
||||
self.encoding = _charset_to_encoding.get(charset, charset)
|
||||
if collation:
|
||||
self.query(f"SET NAMES {charset} COLLATE {collation}")
|
||||
self.store_result()
|
||||
|
||||
def set_sql_mode(self, sql_mode):
|
||||
"""Set the connection sql_mode. See MySQL documentation for
|
||||
legal values."""
|
||||
if self._server_version < (4, 1):
|
||||
raise NotSupportedError("server is too old to set sql_mode")
|
||||
self.query("SET SESSION sql_mode='%s'" % sql_mode)
|
||||
self.store_result()
|
||||
|
||||
def show_warnings(self):
|
||||
"""Return detailed information about warnings as a
|
||||
sequence of tuples of (Level, Code, Message). This
|
||||
is only supported in MySQL-4.1 and up. If your server
|
||||
is an earlier version, an empty sequence is returned."""
|
||||
if self._server_version < (4, 1):
|
||||
return ()
|
||||
self.query("SHOW WARNINGS")
|
||||
r = self.store_result()
|
||||
warnings = r.fetch_row(0)
|
||||
return warnings
|
||||
|
||||
Warning = Warning
|
||||
Error = Error
|
||||
InterfaceError = InterfaceError
|
||||
DatabaseError = DatabaseError
|
||||
DataError = DataError
|
||||
OperationalError = OperationalError
|
||||
IntegrityError = IntegrityError
|
||||
InternalError = InternalError
|
||||
ProgrammingError = ProgrammingError
|
||||
NotSupportedError = NotSupportedError
|
||||
|
||||
|
||||
# vim: colorcolumn=100
|
@ -1,27 +0,0 @@
|
||||
"""MySQL CLIENT constants
|
||||
|
||||
These constants are used when creating the connection. Use bitwise-OR
|
||||
(|) to combine options together, and pass them as the client_flags
|
||||
parameter to MySQLdb.Connection. For more information on these flags,
|
||||
see the MySQL C API documentation for mysql_real_connect().
|
||||
|
||||
"""
|
||||
|
||||
LONG_PASSWORD = 1
|
||||
FOUND_ROWS = 2
|
||||
LONG_FLAG = 4
|
||||
CONNECT_WITH_DB = 8
|
||||
NO_SCHEMA = 16
|
||||
COMPRESS = 32
|
||||
ODBC = 64
|
||||
LOCAL_FILES = 128
|
||||
IGNORE_SPACE = 256
|
||||
CHANGE_USER = 512
|
||||
INTERACTIVE = 1024
|
||||
SSL = 2048
|
||||
IGNORE_SIGPIPE = 4096
|
||||
TRANSACTIONS = 8192 # mysql_com.h was WRONG prior to 3.23.35
|
||||
RESERVED = 16384
|
||||
SECURE_CONNECTION = 32768
|
||||
MULTI_STATEMENTS = 65536
|
||||
MULTI_RESULTS = 131072
|
@ -1,105 +0,0 @@
|
||||
"""MySQL Connection Errors
|
||||
|
||||
Nearly all of these raise OperationalError. COMMANDS_OUT_OF_SYNC
|
||||
raises ProgrammingError.
|
||||
|
||||
"""
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
Usage: python CR.py [/path/to/mysql/errmsg.h ...] >> CR.py
|
||||
"""
|
||||
import fileinput
|
||||
import re
|
||||
|
||||
data = {}
|
||||
error_last = None
|
||||
for line in fileinput.input():
|
||||
line = re.sub(r"/\*.*?\*/", "", line)
|
||||
m = re.match(r"^\s*#define\s+CR_([A-Z0-9_]+)\s+(\d+)(\s.*|$)", line)
|
||||
if m:
|
||||
name = m.group(1)
|
||||
value = int(m.group(2))
|
||||
if name == "ERROR_LAST":
|
||||
if error_last is None or error_last < value:
|
||||
error_last = value
|
||||
continue
|
||||
if value not in data:
|
||||
data[value] = set()
|
||||
data[value].add(name)
|
||||
for value, names in sorted(data.items()):
|
||||
for name in sorted(names):
|
||||
print(f"{name} = {value}")
|
||||
if error_last is not None:
|
||||
print("ERROR_LAST = %s" % error_last)
|
||||
|
||||
|
||||
ERROR_FIRST = 2000
|
||||
MIN_ERROR = 2000
|
||||
UNKNOWN_ERROR = 2000
|
||||
SOCKET_CREATE_ERROR = 2001
|
||||
CONNECTION_ERROR = 2002
|
||||
CONN_HOST_ERROR = 2003
|
||||
IPSOCK_ERROR = 2004
|
||||
UNKNOWN_HOST = 2005
|
||||
SERVER_GONE_ERROR = 2006
|
||||
VERSION_ERROR = 2007
|
||||
OUT_OF_MEMORY = 2008
|
||||
WRONG_HOST_INFO = 2009
|
||||
LOCALHOST_CONNECTION = 2010
|
||||
TCP_CONNECTION = 2011
|
||||
SERVER_HANDSHAKE_ERR = 2012
|
||||
SERVER_LOST = 2013
|
||||
COMMANDS_OUT_OF_SYNC = 2014
|
||||
NAMEDPIPE_CONNECTION = 2015
|
||||
NAMEDPIPEWAIT_ERROR = 2016
|
||||
NAMEDPIPEOPEN_ERROR = 2017
|
||||
NAMEDPIPESETSTATE_ERROR = 2018
|
||||
CANT_READ_CHARSET = 2019
|
||||
NET_PACKET_TOO_LARGE = 2020
|
||||
EMBEDDED_CONNECTION = 2021
|
||||
PROBE_SLAVE_STATUS = 2022
|
||||
PROBE_SLAVE_HOSTS = 2023
|
||||
PROBE_SLAVE_CONNECT = 2024
|
||||
PROBE_MASTER_CONNECT = 2025
|
||||
SSL_CONNECTION_ERROR = 2026
|
||||
MALFORMED_PACKET = 2027
|
||||
WRONG_LICENSE = 2028
|
||||
NULL_POINTER = 2029
|
||||
NO_PREPARE_STMT = 2030
|
||||
PARAMS_NOT_BOUND = 2031
|
||||
DATA_TRUNCATED = 2032
|
||||
NO_PARAMETERS_EXISTS = 2033
|
||||
INVALID_PARAMETER_NO = 2034
|
||||
INVALID_BUFFER_USE = 2035
|
||||
UNSUPPORTED_PARAM_TYPE = 2036
|
||||
SHARED_MEMORY_CONNECTION = 2037
|
||||
SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038
|
||||
SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039
|
||||
SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040
|
||||
SHARED_MEMORY_CONNECT_MAP_ERROR = 2041
|
||||
SHARED_MEMORY_FILE_MAP_ERROR = 2042
|
||||
SHARED_MEMORY_MAP_ERROR = 2043
|
||||
SHARED_MEMORY_EVENT_ERROR = 2044
|
||||
SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045
|
||||
SHARED_MEMORY_CONNECT_SET_ERROR = 2046
|
||||
CONN_UNKNOW_PROTOCOL = 2047
|
||||
INVALID_CONN_HANDLE = 2048
|
||||
UNUSED_1 = 2049
|
||||
FETCH_CANCELED = 2050
|
||||
NO_DATA = 2051
|
||||
NO_STMT_METADATA = 2052
|
||||
NO_RESULT_SET = 2053
|
||||
NOT_IMPLEMENTED = 2054
|
||||
SERVER_LOST_EXTENDED = 2055
|
||||
STMT_CLOSED = 2056
|
||||
NEW_STMT_METADATA = 2057
|
||||
ALREADY_CONNECTED = 2058
|
||||
AUTH_PLUGIN_CANNOT_LOAD = 2059
|
||||
DUPLICATE_CONNECTION_ATTR = 2060
|
||||
AUTH_PLUGIN_ERR = 2061
|
||||
INSECURE_API_ERR = 2062
|
||||
FILE_NAME_TOO_LONG = 2063
|
||||
SSL_FIPS_MODE_ERR = 2064
|
||||
MAX_ERROR = 2999
|
||||
ERROR_LAST = 2064
|
@ -1,827 +0,0 @@
|
||||
"""MySQL ER Constants
|
||||
|
||||
These constants are error codes for the bulk of the error conditions
|
||||
that may occur.
|
||||
"""
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
Usage: python ER.py [/path/to/mysql/mysqld_error.h ...] >> ER.py
|
||||
"""
|
||||
import fileinput
|
||||
import re
|
||||
|
||||
data = {}
|
||||
error_last = None
|
||||
for line in fileinput.input():
|
||||
line = re.sub(r"/\*.*?\*/", "", line)
|
||||
m = re.match(r"^\s*#define\s+((ER|WARN)_[A-Z0-9_]+)\s+(\d+)\s*", line)
|
||||
if m:
|
||||
name = m.group(1)
|
||||
if name.startswith("ER_"):
|
||||
name = name[3:]
|
||||
value = int(m.group(3))
|
||||
if name == "ERROR_LAST":
|
||||
if error_last is None or error_last < value:
|
||||
error_last = value
|
||||
continue
|
||||
if value not in data:
|
||||
data[value] = set()
|
||||
data[value].add(name)
|
||||
for value, names in sorted(data.items()):
|
||||
for name in sorted(names):
|
||||
print(f"{name} = {value}")
|
||||
if error_last is not None:
|
||||
print("ERROR_LAST = %s" % error_last)
|
||||
|
||||
|
||||
ERROR_FIRST = 1000
|
||||
NO = 1002
|
||||
YES = 1003
|
||||
CANT_CREATE_FILE = 1004
|
||||
CANT_CREATE_TABLE = 1005
|
||||
CANT_CREATE_DB = 1006
|
||||
DB_CREATE_EXISTS = 1007
|
||||
DB_DROP_EXISTS = 1008
|
||||
DB_DROP_RMDIR = 1010
|
||||
CANT_FIND_SYSTEM_REC = 1012
|
||||
CANT_GET_STAT = 1013
|
||||
CANT_LOCK = 1015
|
||||
CANT_OPEN_FILE = 1016
|
||||
FILE_NOT_FOUND = 1017
|
||||
CANT_READ_DIR = 1018
|
||||
CHECKREAD = 1020
|
||||
DUP_KEY = 1022
|
||||
ERROR_ON_READ = 1024
|
||||
ERROR_ON_RENAME = 1025
|
||||
ERROR_ON_WRITE = 1026
|
||||
FILE_USED = 1027
|
||||
FILSORT_ABORT = 1028
|
||||
GET_ERRNO = 1030
|
||||
ILLEGAL_HA = 1031
|
||||
KEY_NOT_FOUND = 1032
|
||||
NOT_FORM_FILE = 1033
|
||||
NOT_KEYFILE = 1034
|
||||
OLD_KEYFILE = 1035
|
||||
OPEN_AS_READONLY = 1036
|
||||
OUTOFMEMORY = 1037
|
||||
OUT_OF_SORTMEMORY = 1038
|
||||
CON_COUNT_ERROR = 1040
|
||||
OUT_OF_RESOURCES = 1041
|
||||
BAD_HOST_ERROR = 1042
|
||||
HANDSHAKE_ERROR = 1043
|
||||
DBACCESS_DENIED_ERROR = 1044
|
||||
ACCESS_DENIED_ERROR = 1045
|
||||
NO_DB_ERROR = 1046
|
||||
UNKNOWN_COM_ERROR = 1047
|
||||
BAD_NULL_ERROR = 1048
|
||||
BAD_DB_ERROR = 1049
|
||||
TABLE_EXISTS_ERROR = 1050
|
||||
BAD_TABLE_ERROR = 1051
|
||||
NON_UNIQ_ERROR = 1052
|
||||
SERVER_SHUTDOWN = 1053
|
||||
BAD_FIELD_ERROR = 1054
|
||||
WRONG_FIELD_WITH_GROUP = 1055
|
||||
WRONG_GROUP_FIELD = 1056
|
||||
WRONG_SUM_SELECT = 1057
|
||||
WRONG_VALUE_COUNT = 1058
|
||||
TOO_LONG_IDENT = 1059
|
||||
DUP_FIELDNAME = 1060
|
||||
DUP_KEYNAME = 1061
|
||||
DUP_ENTRY = 1062
|
||||
WRONG_FIELD_SPEC = 1063
|
||||
PARSE_ERROR = 1064
|
||||
EMPTY_QUERY = 1065
|
||||
NONUNIQ_TABLE = 1066
|
||||
INVALID_DEFAULT = 1067
|
||||
MULTIPLE_PRI_KEY = 1068
|
||||
TOO_MANY_KEYS = 1069
|
||||
TOO_MANY_KEY_PARTS = 1070
|
||||
TOO_LONG_KEY = 1071
|
||||
KEY_COLUMN_DOES_NOT_EXITS = 1072
|
||||
BLOB_USED_AS_KEY = 1073
|
||||
TOO_BIG_FIELDLENGTH = 1074
|
||||
WRONG_AUTO_KEY = 1075
|
||||
READY = 1076
|
||||
SHUTDOWN_COMPLETE = 1079
|
||||
FORCING_CLOSE = 1080
|
||||
IPSOCK_ERROR = 1081
|
||||
NO_SUCH_INDEX = 1082
|
||||
WRONG_FIELD_TERMINATORS = 1083
|
||||
BLOBS_AND_NO_TERMINATED = 1084
|
||||
TEXTFILE_NOT_READABLE = 1085
|
||||
FILE_EXISTS_ERROR = 1086
|
||||
LOAD_INFO = 1087
|
||||
ALTER_INFO = 1088
|
||||
WRONG_SUB_KEY = 1089
|
||||
CANT_REMOVE_ALL_FIELDS = 1090
|
||||
CANT_DROP_FIELD_OR_KEY = 1091
|
||||
INSERT_INFO = 1092
|
||||
UPDATE_TABLE_USED = 1093
|
||||
NO_SUCH_THREAD = 1094
|
||||
KILL_DENIED_ERROR = 1095
|
||||
NO_TABLES_USED = 1096
|
||||
TOO_BIG_SET = 1097
|
||||
NO_UNIQUE_LOGFILE = 1098
|
||||
TABLE_NOT_LOCKED_FOR_WRITE = 1099
|
||||
TABLE_NOT_LOCKED = 1100
|
||||
BLOB_CANT_HAVE_DEFAULT = 1101
|
||||
WRONG_DB_NAME = 1102
|
||||
WRONG_TABLE_NAME = 1103
|
||||
TOO_BIG_SELECT = 1104
|
||||
UNKNOWN_ERROR = 1105
|
||||
UNKNOWN_PROCEDURE = 1106
|
||||
WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
|
||||
WRONG_PARAMETERS_TO_PROCEDURE = 1108
|
||||
UNKNOWN_TABLE = 1109
|
||||
FIELD_SPECIFIED_TWICE = 1110
|
||||
INVALID_GROUP_FUNC_USE = 1111
|
||||
UNSUPPORTED_EXTENSION = 1112
|
||||
TABLE_MUST_HAVE_COLUMNS = 1113
|
||||
RECORD_FILE_FULL = 1114
|
||||
UNKNOWN_CHARACTER_SET = 1115
|
||||
TOO_MANY_TABLES = 1116
|
||||
TOO_MANY_FIELDS = 1117
|
||||
TOO_BIG_ROWSIZE = 1118
|
||||
STACK_OVERRUN = 1119
|
||||
WRONG_OUTER_JOIN_UNUSED = 1120
|
||||
NULL_COLUMN_IN_INDEX = 1121
|
||||
CANT_FIND_UDF = 1122
|
||||
CANT_INITIALIZE_UDF = 1123
|
||||
UDF_NO_PATHS = 1124
|
||||
UDF_EXISTS = 1125
|
||||
CANT_OPEN_LIBRARY = 1126
|
||||
CANT_FIND_DL_ENTRY = 1127
|
||||
FUNCTION_NOT_DEFINED = 1128
|
||||
HOST_IS_BLOCKED = 1129
|
||||
HOST_NOT_PRIVILEGED = 1130
|
||||
PASSWORD_ANONYMOUS_USER = 1131
|
||||
PASSWORD_NOT_ALLOWED = 1132
|
||||
PASSWORD_NO_MATCH = 1133
|
||||
UPDATE_INFO = 1134
|
||||
CANT_CREATE_THREAD = 1135
|
||||
WRONG_VALUE_COUNT_ON_ROW = 1136
|
||||
CANT_REOPEN_TABLE = 1137
|
||||
INVALID_USE_OF_NULL = 1138
|
||||
REGEXP_ERROR = 1139
|
||||
MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
|
||||
NONEXISTING_GRANT = 1141
|
||||
TABLEACCESS_DENIED_ERROR = 1142
|
||||
COLUMNACCESS_DENIED_ERROR = 1143
|
||||
ILLEGAL_GRANT_FOR_TABLE = 1144
|
||||
GRANT_WRONG_HOST_OR_USER = 1145
|
||||
NO_SUCH_TABLE = 1146
|
||||
NONEXISTING_TABLE_GRANT = 1147
|
||||
NOT_ALLOWED_COMMAND = 1148
|
||||
SYNTAX_ERROR = 1149
|
||||
ABORTING_CONNECTION = 1152
|
||||
NET_PACKET_TOO_LARGE = 1153
|
||||
NET_READ_ERROR_FROM_PIPE = 1154
|
||||
NET_FCNTL_ERROR = 1155
|
||||
NET_PACKETS_OUT_OF_ORDER = 1156
|
||||
NET_UNCOMPRESS_ERROR = 1157
|
||||
NET_READ_ERROR = 1158
|
||||
NET_READ_INTERRUPTED = 1159
|
||||
NET_ERROR_ON_WRITE = 1160
|
||||
NET_WRITE_INTERRUPTED = 1161
|
||||
TOO_LONG_STRING = 1162
|
||||
TABLE_CANT_HANDLE_BLOB = 1163
|
||||
TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
|
||||
WRONG_COLUMN_NAME = 1166
|
||||
WRONG_KEY_COLUMN = 1167
|
||||
WRONG_MRG_TABLE = 1168
|
||||
DUP_UNIQUE = 1169
|
||||
BLOB_KEY_WITHOUT_LENGTH = 1170
|
||||
PRIMARY_CANT_HAVE_NULL = 1171
|
||||
TOO_MANY_ROWS = 1172
|
||||
REQUIRES_PRIMARY_KEY = 1173
|
||||
UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
|
||||
KEY_DOES_NOT_EXITS = 1176
|
||||
CHECK_NO_SUCH_TABLE = 1177
|
||||
CHECK_NOT_IMPLEMENTED = 1178
|
||||
CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
|
||||
ERROR_DURING_COMMIT = 1180
|
||||
ERROR_DURING_ROLLBACK = 1181
|
||||
ERROR_DURING_FLUSH_LOGS = 1182
|
||||
NEW_ABORTING_CONNECTION = 1184
|
||||
MASTER = 1188
|
||||
MASTER_NET_READ = 1189
|
||||
MASTER_NET_WRITE = 1190
|
||||
FT_MATCHING_KEY_NOT_FOUND = 1191
|
||||
LOCK_OR_ACTIVE_TRANSACTION = 1192
|
||||
UNKNOWN_SYSTEM_VARIABLE = 1193
|
||||
CRASHED_ON_USAGE = 1194
|
||||
CRASHED_ON_REPAIR = 1195
|
||||
WARNING_NOT_COMPLETE_ROLLBACK = 1196
|
||||
TRANS_CACHE_FULL = 1197
|
||||
SLAVE_NOT_RUNNING = 1199
|
||||
BAD_SLAVE = 1200
|
||||
MASTER_INFO = 1201
|
||||
SLAVE_THREAD = 1202
|
||||
TOO_MANY_USER_CONNECTIONS = 1203
|
||||
SET_CONSTANTS_ONLY = 1204
|
||||
LOCK_WAIT_TIMEOUT = 1205
|
||||
LOCK_TABLE_FULL = 1206
|
||||
READ_ONLY_TRANSACTION = 1207
|
||||
WRONG_ARGUMENTS = 1210
|
||||
NO_PERMISSION_TO_CREATE_USER = 1211
|
||||
LOCK_DEADLOCK = 1213
|
||||
TABLE_CANT_HANDLE_FT = 1214
|
||||
CANNOT_ADD_FOREIGN = 1215
|
||||
NO_REFERENCED_ROW = 1216
|
||||
ROW_IS_REFERENCED = 1217
|
||||
CONNECT_TO_MASTER = 1218
|
||||
ERROR_WHEN_EXECUTING_COMMAND = 1220
|
||||
WRONG_USAGE = 1221
|
||||
WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
|
||||
CANT_UPDATE_WITH_READLOCK = 1223
|
||||
MIXING_NOT_ALLOWED = 1224
|
||||
DUP_ARGUMENT = 1225
|
||||
USER_LIMIT_REACHED = 1226
|
||||
SPECIFIC_ACCESS_DENIED_ERROR = 1227
|
||||
LOCAL_VARIABLE = 1228
|
||||
GLOBAL_VARIABLE = 1229
|
||||
NO_DEFAULT = 1230
|
||||
WRONG_VALUE_FOR_VAR = 1231
|
||||
WRONG_TYPE_FOR_VAR = 1232
|
||||
VAR_CANT_BE_READ = 1233
|
||||
CANT_USE_OPTION_HERE = 1234
|
||||
NOT_SUPPORTED_YET = 1235
|
||||
MASTER_FATAL_ERROR_READING_BINLOG = 1236
|
||||
SLAVE_IGNORED_TABLE = 1237
|
||||
INCORRECT_GLOBAL_LOCAL_VAR = 1238
|
||||
WRONG_FK_DEF = 1239
|
||||
KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
|
||||
OPERAND_COLUMNS = 1241
|
||||
SUBQUERY_NO_1_ROW = 1242
|
||||
UNKNOWN_STMT_HANDLER = 1243
|
||||
CORRUPT_HELP_DB = 1244
|
||||
AUTO_CONVERT = 1246
|
||||
ILLEGAL_REFERENCE = 1247
|
||||
DERIVED_MUST_HAVE_ALIAS = 1248
|
||||
SELECT_REDUCED = 1249
|
||||
TABLENAME_NOT_ALLOWED_HERE = 1250
|
||||
NOT_SUPPORTED_AUTH_MODE = 1251
|
||||
SPATIAL_CANT_HAVE_NULL = 1252
|
||||
COLLATION_CHARSET_MISMATCH = 1253
|
||||
TOO_BIG_FOR_UNCOMPRESS = 1256
|
||||
ZLIB_Z_MEM_ERROR = 1257
|
||||
ZLIB_Z_BUF_ERROR = 1258
|
||||
ZLIB_Z_DATA_ERROR = 1259
|
||||
CUT_VALUE_GROUP_CONCAT = 1260
|
||||
WARN_TOO_FEW_RECORDS = 1261
|
||||
WARN_TOO_MANY_RECORDS = 1262
|
||||
WARN_NULL_TO_NOTNULL = 1263
|
||||
WARN_DATA_OUT_OF_RANGE = 1264
|
||||
WARN_DATA_TRUNCATED = 1265
|
||||
WARN_USING_OTHER_HANDLER = 1266
|
||||
CANT_AGGREGATE_2COLLATIONS = 1267
|
||||
REVOKE_GRANTS = 1269
|
||||
CANT_AGGREGATE_3COLLATIONS = 1270
|
||||
CANT_AGGREGATE_NCOLLATIONS = 1271
|
||||
VARIABLE_IS_NOT_STRUCT = 1272
|
||||
UNKNOWN_COLLATION = 1273
|
||||
SLAVE_IGNORED_SSL_PARAMS = 1274
|
||||
SERVER_IS_IN_SECURE_AUTH_MODE = 1275
|
||||
WARN_FIELD_RESOLVED = 1276
|
||||
BAD_SLAVE_UNTIL_COND = 1277
|
||||
MISSING_SKIP_SLAVE = 1278
|
||||
UNTIL_COND_IGNORED = 1279
|
||||
WRONG_NAME_FOR_INDEX = 1280
|
||||
WRONG_NAME_FOR_CATALOG = 1281
|
||||
BAD_FT_COLUMN = 1283
|
||||
UNKNOWN_KEY_CACHE = 1284
|
||||
WARN_HOSTNAME_WONT_WORK = 1285
|
||||
UNKNOWN_STORAGE_ENGINE = 1286
|
||||
WARN_DEPRECATED_SYNTAX = 1287
|
||||
NON_UPDATABLE_TABLE = 1288
|
||||
FEATURE_DISABLED = 1289
|
||||
OPTION_PREVENTS_STATEMENT = 1290
|
||||
DUPLICATED_VALUE_IN_TYPE = 1291
|
||||
TRUNCATED_WRONG_VALUE = 1292
|
||||
INVALID_ON_UPDATE = 1294
|
||||
UNSUPPORTED_PS = 1295
|
||||
GET_ERRMSG = 1296
|
||||
GET_TEMPORARY_ERRMSG = 1297
|
||||
UNKNOWN_TIME_ZONE = 1298
|
||||
WARN_INVALID_TIMESTAMP = 1299
|
||||
INVALID_CHARACTER_STRING = 1300
|
||||
WARN_ALLOWED_PACKET_OVERFLOWED = 1301
|
||||
CONFLICTING_DECLARATIONS = 1302
|
||||
SP_NO_RECURSIVE_CREATE = 1303
|
||||
SP_ALREADY_EXISTS = 1304
|
||||
SP_DOES_NOT_EXIST = 1305
|
||||
SP_DROP_FAILED = 1306
|
||||
SP_STORE_FAILED = 1307
|
||||
SP_LILABEL_MISMATCH = 1308
|
||||
SP_LABEL_REDEFINE = 1309
|
||||
SP_LABEL_MISMATCH = 1310
|
||||
SP_UNINIT_VAR = 1311
|
||||
SP_BADSELECT = 1312
|
||||
SP_BADRETURN = 1313
|
||||
SP_BADSTATEMENT = 1314
|
||||
UPDATE_LOG_DEPRECATED_IGNORED = 1315
|
||||
UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
|
||||
QUERY_INTERRUPTED = 1317
|
||||
SP_WRONG_NO_OF_ARGS = 1318
|
||||
SP_COND_MISMATCH = 1319
|
||||
SP_NORETURN = 1320
|
||||
SP_NORETURNEND = 1321
|
||||
SP_BAD_CURSOR_QUERY = 1322
|
||||
SP_BAD_CURSOR_SELECT = 1323
|
||||
SP_CURSOR_MISMATCH = 1324
|
||||
SP_CURSOR_ALREADY_OPEN = 1325
|
||||
SP_CURSOR_NOT_OPEN = 1326
|
||||
SP_UNDECLARED_VAR = 1327
|
||||
SP_WRONG_NO_OF_FETCH_ARGS = 1328
|
||||
SP_FETCH_NO_DATA = 1329
|
||||
SP_DUP_PARAM = 1330
|
||||
SP_DUP_VAR = 1331
|
||||
SP_DUP_COND = 1332
|
||||
SP_DUP_CURS = 1333
|
||||
SP_CANT_ALTER = 1334
|
||||
SP_SUBSELECT_NYI = 1335
|
||||
STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
|
||||
SP_VARCOND_AFTER_CURSHNDLR = 1337
|
||||
SP_CURSOR_AFTER_HANDLER = 1338
|
||||
SP_CASE_NOT_FOUND = 1339
|
||||
FPARSER_TOO_BIG_FILE = 1340
|
||||
FPARSER_BAD_HEADER = 1341
|
||||
FPARSER_EOF_IN_COMMENT = 1342
|
||||
FPARSER_ERROR_IN_PARAMETER = 1343
|
||||
FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
|
||||
VIEW_NO_EXPLAIN = 1345
|
||||
WRONG_OBJECT = 1347
|
||||
NONUPDATEABLE_COLUMN = 1348
|
||||
VIEW_SELECT_CLAUSE = 1350
|
||||
VIEW_SELECT_VARIABLE = 1351
|
||||
VIEW_SELECT_TMPTABLE = 1352
|
||||
VIEW_WRONG_LIST = 1353
|
||||
WARN_VIEW_MERGE = 1354
|
||||
WARN_VIEW_WITHOUT_KEY = 1355
|
||||
VIEW_INVALID = 1356
|
||||
SP_NO_DROP_SP = 1357
|
||||
TRG_ALREADY_EXISTS = 1359
|
||||
TRG_DOES_NOT_EXIST = 1360
|
||||
TRG_ON_VIEW_OR_TEMP_TABLE = 1361
|
||||
TRG_CANT_CHANGE_ROW = 1362
|
||||
TRG_NO_SUCH_ROW_IN_TRG = 1363
|
||||
NO_DEFAULT_FOR_FIELD = 1364
|
||||
DIVISION_BY_ZERO = 1365
|
||||
TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
|
||||
ILLEGAL_VALUE_FOR_TYPE = 1367
|
||||
VIEW_NONUPD_CHECK = 1368
|
||||
VIEW_CHECK_FAILED = 1369
|
||||
PROCACCESS_DENIED_ERROR = 1370
|
||||
RELAY_LOG_FAIL = 1371
|
||||
UNKNOWN_TARGET_BINLOG = 1373
|
||||
IO_ERR_LOG_INDEX_READ = 1374
|
||||
BINLOG_PURGE_PROHIBITED = 1375
|
||||
FSEEK_FAIL = 1376
|
||||
BINLOG_PURGE_FATAL_ERR = 1377
|
||||
LOG_IN_USE = 1378
|
||||
LOG_PURGE_UNKNOWN_ERR = 1379
|
||||
RELAY_LOG_INIT = 1380
|
||||
NO_BINARY_LOGGING = 1381
|
||||
RESERVED_SYNTAX = 1382
|
||||
PS_MANY_PARAM = 1390
|
||||
KEY_PART_0 = 1391
|
||||
VIEW_CHECKSUM = 1392
|
||||
VIEW_MULTIUPDATE = 1393
|
||||
VIEW_NO_INSERT_FIELD_LIST = 1394
|
||||
VIEW_DELETE_MERGE_VIEW = 1395
|
||||
CANNOT_USER = 1396
|
||||
XAER_NOTA = 1397
|
||||
XAER_INVAL = 1398
|
||||
XAER_RMFAIL = 1399
|
||||
XAER_OUTSIDE = 1400
|
||||
XAER_RMERR = 1401
|
||||
XA_RBROLLBACK = 1402
|
||||
NONEXISTING_PROC_GRANT = 1403
|
||||
PROC_AUTO_GRANT_FAIL = 1404
|
||||
PROC_AUTO_REVOKE_FAIL = 1405
|
||||
DATA_TOO_LONG = 1406
|
||||
SP_BAD_SQLSTATE = 1407
|
||||
STARTUP = 1408
|
||||
LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
|
||||
CANT_CREATE_USER_WITH_GRANT = 1410
|
||||
WRONG_VALUE_FOR_TYPE = 1411
|
||||
TABLE_DEF_CHANGED = 1412
|
||||
SP_DUP_HANDLER = 1413
|
||||
SP_NOT_VAR_ARG = 1414
|
||||
SP_NO_RETSET = 1415
|
||||
CANT_CREATE_GEOMETRY_OBJECT = 1416
|
||||
BINLOG_UNSAFE_ROUTINE = 1418
|
||||
BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
|
||||
STMT_HAS_NO_OPEN_CURSOR = 1421
|
||||
COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
|
||||
NO_DEFAULT_FOR_VIEW_FIELD = 1423
|
||||
SP_NO_RECURSION = 1424
|
||||
TOO_BIG_SCALE = 1425
|
||||
TOO_BIG_PRECISION = 1426
|
||||
M_BIGGER_THAN_D = 1427
|
||||
WRONG_LOCK_OF_SYSTEM_TABLE = 1428
|
||||
CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
|
||||
QUERY_ON_FOREIGN_DATA_SOURCE = 1430
|
||||
FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
|
||||
FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
|
||||
FOREIGN_DATA_STRING_INVALID = 1433
|
||||
TRG_IN_WRONG_SCHEMA = 1435
|
||||
STACK_OVERRUN_NEED_MORE = 1436
|
||||
TOO_LONG_BODY = 1437
|
||||
WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
|
||||
TOO_BIG_DISPLAYWIDTH = 1439
|
||||
XAER_DUPID = 1440
|
||||
DATETIME_FUNCTION_OVERFLOW = 1441
|
||||
CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
|
||||
VIEW_PREVENT_UPDATE = 1443
|
||||
PS_NO_RECURSION = 1444
|
||||
SP_CANT_SET_AUTOCOMMIT = 1445
|
||||
VIEW_FRM_NO_USER = 1447
|
||||
VIEW_OTHER_USER = 1448
|
||||
NO_SUCH_USER = 1449
|
||||
FORBID_SCHEMA_CHANGE = 1450
|
||||
ROW_IS_REFERENCED_2 = 1451
|
||||
NO_REFERENCED_ROW_2 = 1452
|
||||
SP_BAD_VAR_SHADOW = 1453
|
||||
TRG_NO_DEFINER = 1454
|
||||
OLD_FILE_FORMAT = 1455
|
||||
SP_RECURSION_LIMIT = 1456
|
||||
SP_WRONG_NAME = 1458
|
||||
TABLE_NEEDS_UPGRADE = 1459
|
||||
SP_NO_AGGREGATE = 1460
|
||||
MAX_PREPARED_STMT_COUNT_REACHED = 1461
|
||||
VIEW_RECURSIVE = 1462
|
||||
NON_GROUPING_FIELD_USED = 1463
|
||||
TABLE_CANT_HANDLE_SPKEYS = 1464
|
||||
NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465
|
||||
REMOVED_SPACES = 1466
|
||||
AUTOINC_READ_FAILED = 1467
|
||||
USERNAME = 1468
|
||||
HOSTNAME = 1469
|
||||
WRONG_STRING_LENGTH = 1470
|
||||
NON_INSERTABLE_TABLE = 1471
|
||||
ADMIN_WRONG_MRG_TABLE = 1472
|
||||
TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT = 1473
|
||||
NAME_BECOMES_EMPTY = 1474
|
||||
AMBIGUOUS_FIELD_TERM = 1475
|
||||
FOREIGN_SERVER_EXISTS = 1476
|
||||
FOREIGN_SERVER_DOESNT_EXIST = 1477
|
||||
ILLEGAL_HA_CREATE_OPTION = 1478
|
||||
PARTITION_REQUIRES_VALUES_ERROR = 1479
|
||||
PARTITION_WRONG_VALUES_ERROR = 1480
|
||||
PARTITION_MAXVALUE_ERROR = 1481
|
||||
PARTITION_WRONG_NO_PART_ERROR = 1484
|
||||
PARTITION_WRONG_NO_SUBPART_ERROR = 1485
|
||||
WRONG_EXPR_IN_PARTITION_FUNC_ERROR = 1486
|
||||
FIELD_NOT_FOUND_PART_ERROR = 1488
|
||||
INCONSISTENT_PARTITION_INFO_ERROR = 1490
|
||||
PARTITION_FUNC_NOT_ALLOWED_ERROR = 1491
|
||||
PARTITIONS_MUST_BE_DEFINED_ERROR = 1492
|
||||
RANGE_NOT_INCREASING_ERROR = 1493
|
||||
INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR = 1494
|
||||
MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR = 1495
|
||||
PARTITION_ENTRY_ERROR = 1496
|
||||
MIX_HANDLER_ERROR = 1497
|
||||
PARTITION_NOT_DEFINED_ERROR = 1498
|
||||
TOO_MANY_PARTITIONS_ERROR = 1499
|
||||
SUBPARTITION_ERROR = 1500
|
||||
CANT_CREATE_HANDLER_FILE = 1501
|
||||
BLOB_FIELD_IN_PART_FUNC_ERROR = 1502
|
||||
UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF = 1503
|
||||
NO_PARTS_ERROR = 1504
|
||||
PARTITION_MGMT_ON_NONPARTITIONED = 1505
|
||||
FOREIGN_KEY_ON_PARTITIONED = 1506
|
||||
DROP_PARTITION_NON_EXISTENT = 1507
|
||||
DROP_LAST_PARTITION = 1508
|
||||
COALESCE_ONLY_ON_HASH_PARTITION = 1509
|
||||
REORG_HASH_ONLY_ON_SAME_NO = 1510
|
||||
REORG_NO_PARAM_ERROR = 1511
|
||||
ONLY_ON_RANGE_LIST_PARTITION = 1512
|
||||
ADD_PARTITION_SUBPART_ERROR = 1513
|
||||
ADD_PARTITION_NO_NEW_PARTITION = 1514
|
||||
COALESCE_PARTITION_NO_PARTITION = 1515
|
||||
REORG_PARTITION_NOT_EXIST = 1516
|
||||
SAME_NAME_PARTITION = 1517
|
||||
NO_BINLOG_ERROR = 1518
|
||||
CONSECUTIVE_REORG_PARTITIONS = 1519
|
||||
REORG_OUTSIDE_RANGE = 1520
|
||||
PARTITION_FUNCTION_FAILURE = 1521
|
||||
LIMITED_PART_RANGE = 1523
|
||||
PLUGIN_IS_NOT_LOADED = 1524
|
||||
WRONG_VALUE = 1525
|
||||
NO_PARTITION_FOR_GIVEN_VALUE = 1526
|
||||
FILEGROUP_OPTION_ONLY_ONCE = 1527
|
||||
CREATE_FILEGROUP_FAILED = 1528
|
||||
DROP_FILEGROUP_FAILED = 1529
|
||||
TABLESPACE_AUTO_EXTEND_ERROR = 1530
|
||||
WRONG_SIZE_NUMBER = 1531
|
||||
SIZE_OVERFLOW_ERROR = 1532
|
||||
ALTER_FILEGROUP_FAILED = 1533
|
||||
BINLOG_ROW_LOGGING_FAILED = 1534
|
||||
EVENT_ALREADY_EXISTS = 1537
|
||||
EVENT_DOES_NOT_EXIST = 1539
|
||||
EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG = 1542
|
||||
EVENT_ENDS_BEFORE_STARTS = 1543
|
||||
EVENT_EXEC_TIME_IN_THE_PAST = 1544
|
||||
EVENT_SAME_NAME = 1551
|
||||
DROP_INDEX_FK = 1553
|
||||
WARN_DEPRECATED_SYNTAX_WITH_VER = 1554
|
||||
CANT_LOCK_LOG_TABLE = 1556
|
||||
FOREIGN_DUPLICATE_KEY_OLD_UNUSED = 1557
|
||||
COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE = 1558
|
||||
TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR = 1559
|
||||
STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1560
|
||||
PARTITION_NO_TEMPORARY = 1562
|
||||
PARTITION_CONST_DOMAIN_ERROR = 1563
|
||||
PARTITION_FUNCTION_IS_NOT_ALLOWED = 1564
|
||||
NULL_IN_VALUES_LESS_THAN = 1566
|
||||
WRONG_PARTITION_NAME = 1567
|
||||
CANT_CHANGE_TX_CHARACTERISTICS = 1568
|
||||
DUP_ENTRY_AUTOINCREMENT_CASE = 1569
|
||||
EVENT_SET_VAR_ERROR = 1571
|
||||
PARTITION_MERGE_ERROR = 1572
|
||||
BASE64_DECODE_ERROR = 1575
|
||||
EVENT_RECURSION_FORBIDDEN = 1576
|
||||
ONLY_INTEGERS_ALLOWED = 1578
|
||||
UNSUPORTED_LOG_ENGINE = 1579
|
||||
BAD_LOG_STATEMENT = 1580
|
||||
CANT_RENAME_LOG_TABLE = 1581
|
||||
WRONG_PARAMCOUNT_TO_NATIVE_FCT = 1582
|
||||
WRONG_PARAMETERS_TO_NATIVE_FCT = 1583
|
||||
WRONG_PARAMETERS_TO_STORED_FCT = 1584
|
||||
NATIVE_FCT_NAME_COLLISION = 1585
|
||||
DUP_ENTRY_WITH_KEY_NAME = 1586
|
||||
BINLOG_PURGE_EMFILE = 1587
|
||||
EVENT_CANNOT_CREATE_IN_THE_PAST = 1588
|
||||
EVENT_CANNOT_ALTER_IN_THE_PAST = 1589
|
||||
NO_PARTITION_FOR_GIVEN_VALUE_SILENT = 1591
|
||||
BINLOG_UNSAFE_STATEMENT = 1592
|
||||
BINLOG_FATAL_ERROR = 1593
|
||||
BINLOG_LOGGING_IMPOSSIBLE = 1598
|
||||
VIEW_NO_CREATION_CTX = 1599
|
||||
VIEW_INVALID_CREATION_CTX = 1600
|
||||
TRG_CORRUPTED_FILE = 1602
|
||||
TRG_NO_CREATION_CTX = 1603
|
||||
TRG_INVALID_CREATION_CTX = 1604
|
||||
EVENT_INVALID_CREATION_CTX = 1605
|
||||
TRG_CANT_OPEN_TABLE = 1606
|
||||
NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT = 1609
|
||||
SLAVE_CORRUPT_EVENT = 1610
|
||||
LOG_PURGE_NO_FILE = 1612
|
||||
XA_RBTIMEOUT = 1613
|
||||
XA_RBDEADLOCK = 1614
|
||||
NEED_REPREPARE = 1615
|
||||
WARN_NO_MASTER_INFO = 1617
|
||||
WARN_OPTION_IGNORED = 1618
|
||||
PLUGIN_DELETE_BUILTIN = 1619
|
||||
WARN_PLUGIN_BUSY = 1620
|
||||
VARIABLE_IS_READONLY = 1621
|
||||
WARN_ENGINE_TRANSACTION_ROLLBACK = 1622
|
||||
SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE = 1624
|
||||
NDB_REPLICATION_SCHEMA_ERROR = 1625
|
||||
CONFLICT_FN_PARSE_ERROR = 1626
|
||||
EXCEPTIONS_WRITE_ERROR = 1627
|
||||
TOO_LONG_TABLE_COMMENT = 1628
|
||||
TOO_LONG_FIELD_COMMENT = 1629
|
||||
FUNC_INEXISTENT_NAME_COLLISION = 1630
|
||||
DATABASE_NAME = 1631
|
||||
TABLE_NAME = 1632
|
||||
PARTITION_NAME = 1633
|
||||
SUBPARTITION_NAME = 1634
|
||||
TEMPORARY_NAME = 1635
|
||||
RENAMED_NAME = 1636
|
||||
TOO_MANY_CONCURRENT_TRXS = 1637
|
||||
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED = 1638
|
||||
DEBUG_SYNC_TIMEOUT = 1639
|
||||
DEBUG_SYNC_HIT_LIMIT = 1640
|
||||
DUP_SIGNAL_SET = 1641
|
||||
SIGNAL_WARN = 1642
|
||||
SIGNAL_NOT_FOUND = 1643
|
||||
SIGNAL_EXCEPTION = 1644
|
||||
RESIGNAL_WITHOUT_ACTIVE_HANDLER = 1645
|
||||
SIGNAL_BAD_CONDITION_TYPE = 1646
|
||||
WARN_COND_ITEM_TRUNCATED = 1647
|
||||
COND_ITEM_TOO_LONG = 1648
|
||||
UNKNOWN_LOCALE = 1649
|
||||
SLAVE_IGNORE_SERVER_IDS = 1650
|
||||
SAME_NAME_PARTITION_FIELD = 1652
|
||||
PARTITION_COLUMN_LIST_ERROR = 1653
|
||||
WRONG_TYPE_COLUMN_VALUE_ERROR = 1654
|
||||
TOO_MANY_PARTITION_FUNC_FIELDS_ERROR = 1655
|
||||
MAXVALUE_IN_VALUES_IN = 1656
|
||||
TOO_MANY_VALUES_ERROR = 1657
|
||||
ROW_SINGLE_PARTITION_FIELD_ERROR = 1658
|
||||
FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD = 1659
|
||||
PARTITION_FIELDS_TOO_LONG = 1660
|
||||
BINLOG_ROW_ENGINE_AND_STMT_ENGINE = 1661
|
||||
BINLOG_ROW_MODE_AND_STMT_ENGINE = 1662
|
||||
BINLOG_UNSAFE_AND_STMT_ENGINE = 1663
|
||||
BINLOG_ROW_INJECTION_AND_STMT_ENGINE = 1664
|
||||
BINLOG_STMT_MODE_AND_ROW_ENGINE = 1665
|
||||
BINLOG_ROW_INJECTION_AND_STMT_MODE = 1666
|
||||
BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1667
|
||||
BINLOG_UNSAFE_LIMIT = 1668
|
||||
BINLOG_UNSAFE_SYSTEM_TABLE = 1670
|
||||
BINLOG_UNSAFE_AUTOINC_COLUMNS = 1671
|
||||
BINLOG_UNSAFE_UDF = 1672
|
||||
BINLOG_UNSAFE_SYSTEM_VARIABLE = 1673
|
||||
BINLOG_UNSAFE_SYSTEM_FUNCTION = 1674
|
||||
BINLOG_UNSAFE_NONTRANS_AFTER_TRANS = 1675
|
||||
MESSAGE_AND_STATEMENT = 1676
|
||||
SLAVE_CANT_CREATE_CONVERSION = 1678
|
||||
INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1679
|
||||
PATH_LENGTH = 1680
|
||||
WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT = 1681
|
||||
WRONG_NATIVE_TABLE_STRUCTURE = 1682
|
||||
WRONG_PERFSCHEMA_USAGE = 1683
|
||||
WARN_I_S_SKIPPED_TABLE = 1684
|
||||
INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1685
|
||||
STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1686
|
||||
SPATIAL_MUST_HAVE_GEOM_COL = 1687
|
||||
TOO_LONG_INDEX_COMMENT = 1688
|
||||
LOCK_ABORTED = 1689
|
||||
DATA_OUT_OF_RANGE = 1690
|
||||
WRONG_SPVAR_TYPE_IN_LIMIT = 1691
|
||||
BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1692
|
||||
BINLOG_UNSAFE_MIXED_STATEMENT = 1693
|
||||
INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1694
|
||||
STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1695
|
||||
FAILED_READ_FROM_PAR_FILE = 1696
|
||||
VALUES_IS_NOT_INT_TYPE_ERROR = 1697
|
||||
ACCESS_DENIED_NO_PASSWORD_ERROR = 1698
|
||||
SET_PASSWORD_AUTH_PLUGIN = 1699
|
||||
TRUNCATE_ILLEGAL_FK = 1701
|
||||
PLUGIN_IS_PERMANENT = 1702
|
||||
SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN = 1703
|
||||
SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX = 1704
|
||||
STMT_CACHE_FULL = 1705
|
||||
MULTI_UPDATE_KEY_CONFLICT = 1706
|
||||
TABLE_NEEDS_REBUILD = 1707
|
||||
WARN_OPTION_BELOW_LIMIT = 1708
|
||||
INDEX_COLUMN_TOO_LONG = 1709
|
||||
ERROR_IN_TRIGGER_BODY = 1710
|
||||
ERROR_IN_UNKNOWN_TRIGGER_BODY = 1711
|
||||
INDEX_CORRUPT = 1712
|
||||
UNDO_RECORD_TOO_BIG = 1713
|
||||
BINLOG_UNSAFE_INSERT_IGNORE_SELECT = 1714
|
||||
BINLOG_UNSAFE_INSERT_SELECT_UPDATE = 1715
|
||||
BINLOG_UNSAFE_REPLACE_SELECT = 1716
|
||||
BINLOG_UNSAFE_CREATE_IGNORE_SELECT = 1717
|
||||
BINLOG_UNSAFE_CREATE_REPLACE_SELECT = 1718
|
||||
BINLOG_UNSAFE_UPDATE_IGNORE = 1719
|
||||
PLUGIN_NO_UNINSTALL = 1720
|
||||
PLUGIN_NO_INSTALL = 1721
|
||||
BINLOG_UNSAFE_WRITE_AUTOINC_SELECT = 1722
|
||||
BINLOG_UNSAFE_CREATE_SELECT_AUTOINC = 1723
|
||||
BINLOG_UNSAFE_INSERT_TWO_KEYS = 1724
|
||||
TABLE_IN_FK_CHECK = 1725
|
||||
UNSUPPORTED_ENGINE = 1726
|
||||
BINLOG_UNSAFE_AUTOINC_NOT_FIRST = 1727
|
||||
CANNOT_LOAD_FROM_TABLE_V2 = 1728
|
||||
MASTER_DELAY_VALUE_OUT_OF_RANGE = 1729
|
||||
ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT = 1730
|
||||
PARTITION_EXCHANGE_DIFFERENT_OPTION = 1731
|
||||
PARTITION_EXCHANGE_PART_TABLE = 1732
|
||||
PARTITION_EXCHANGE_TEMP_TABLE = 1733
|
||||
PARTITION_INSTEAD_OF_SUBPARTITION = 1734
|
||||
UNKNOWN_PARTITION = 1735
|
||||
TABLES_DIFFERENT_METADATA = 1736
|
||||
ROW_DOES_NOT_MATCH_PARTITION = 1737
|
||||
BINLOG_CACHE_SIZE_GREATER_THAN_MAX = 1738
|
||||
WARN_INDEX_NOT_APPLICABLE = 1739
|
||||
PARTITION_EXCHANGE_FOREIGN_KEY = 1740
|
||||
RPL_INFO_DATA_TOO_LONG = 1742
|
||||
BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX = 1745
|
||||
CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT = 1746
|
||||
PARTITION_CLAUSE_ON_NONPARTITIONED = 1747
|
||||
ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET = 1748
|
||||
CHANGE_RPL_INFO_REPOSITORY_FAILURE = 1750
|
||||
WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE = 1751
|
||||
WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE = 1752
|
||||
MTS_FEATURE_IS_NOT_SUPPORTED = 1753
|
||||
MTS_UPDATED_DBS_GREATER_MAX = 1754
|
||||
MTS_CANT_PARALLEL = 1755
|
||||
MTS_INCONSISTENT_DATA = 1756
|
||||
FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING = 1757
|
||||
DA_INVALID_CONDITION_NUMBER = 1758
|
||||
INSECURE_PLAIN_TEXT = 1759
|
||||
INSECURE_CHANGE_MASTER = 1760
|
||||
FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO = 1761
|
||||
FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO = 1762
|
||||
SQLTHREAD_WITH_SECURE_SLAVE = 1763
|
||||
TABLE_HAS_NO_FT = 1764
|
||||
VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER = 1765
|
||||
VARIABLE_NOT_SETTABLE_IN_TRANSACTION = 1766
|
||||
SET_STATEMENT_CANNOT_INVOKE_FUNCTION = 1769
|
||||
GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL = 1770
|
||||
MALFORMED_GTID_SET_SPECIFICATION = 1772
|
||||
MALFORMED_GTID_SET_ENCODING = 1773
|
||||
MALFORMED_GTID_SPECIFICATION = 1774
|
||||
GNO_EXHAUSTED = 1775
|
||||
BAD_SLAVE_AUTO_POSITION = 1776
|
||||
AUTO_POSITION_REQUIRES_GTID_MODE_NOT_OFF = 1777
|
||||
CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET = 1778
|
||||
GTID_MODE_ON_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON = 1779
|
||||
CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF = 1781
|
||||
CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON = 1782
|
||||
CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF = 1783
|
||||
GTID_UNSAFE_NON_TRANSACTIONAL_TABLE = 1785
|
||||
GTID_UNSAFE_CREATE_SELECT = 1786
|
||||
GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION = 1787
|
||||
GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME = 1788
|
||||
MASTER_HAS_PURGED_REQUIRED_GTIDS = 1789
|
||||
CANT_SET_GTID_NEXT_WHEN_OWNING_GTID = 1790
|
||||
UNKNOWN_EXPLAIN_FORMAT = 1791
|
||||
CANT_EXECUTE_IN_READ_ONLY_TRANSACTION = 1792
|
||||
TOO_LONG_TABLE_PARTITION_COMMENT = 1793
|
||||
SLAVE_CONFIGURATION = 1794
|
||||
INNODB_FT_LIMIT = 1795
|
||||
INNODB_NO_FT_TEMP_TABLE = 1796
|
||||
INNODB_FT_WRONG_DOCID_COLUMN = 1797
|
||||
INNODB_FT_WRONG_DOCID_INDEX = 1798
|
||||
INNODB_ONLINE_LOG_TOO_BIG = 1799
|
||||
UNKNOWN_ALTER_ALGORITHM = 1800
|
||||
UNKNOWN_ALTER_LOCK = 1801
|
||||
MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS = 1802
|
||||
MTS_RECOVERY_FAILURE = 1803
|
||||
MTS_RESET_WORKERS = 1804
|
||||
COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 = 1805
|
||||
SLAVE_SILENT_RETRY_TRANSACTION = 1806
|
||||
DISCARD_FK_CHECKS_RUNNING = 1807
|
||||
TABLE_SCHEMA_MISMATCH = 1808
|
||||
TABLE_IN_SYSTEM_TABLESPACE = 1809
|
||||
IO_READ_ERROR = 1810
|
||||
IO_WRITE_ERROR = 1811
|
||||
TABLESPACE_MISSING = 1812
|
||||
TABLESPACE_EXISTS = 1813
|
||||
TABLESPACE_DISCARDED = 1814
|
||||
INTERNAL_ERROR = 1815
|
||||
INNODB_IMPORT_ERROR = 1816
|
||||
INNODB_INDEX_CORRUPT = 1817
|
||||
INVALID_YEAR_COLUMN_LENGTH = 1818
|
||||
NOT_VALID_PASSWORD = 1819
|
||||
MUST_CHANGE_PASSWORD = 1820
|
||||
FK_NO_INDEX_CHILD = 1821
|
||||
FK_NO_INDEX_PARENT = 1822
|
||||
FK_FAIL_ADD_SYSTEM = 1823
|
||||
FK_CANNOT_OPEN_PARENT = 1824
|
||||
FK_INCORRECT_OPTION = 1825
|
||||
FK_DUP_NAME = 1826
|
||||
PASSWORD_FORMAT = 1827
|
||||
FK_COLUMN_CANNOT_DROP = 1828
|
||||
FK_COLUMN_CANNOT_DROP_CHILD = 1829
|
||||
FK_COLUMN_NOT_NULL = 1830
|
||||
DUP_INDEX = 1831
|
||||
FK_COLUMN_CANNOT_CHANGE = 1832
|
||||
FK_COLUMN_CANNOT_CHANGE_CHILD = 1833
|
||||
MALFORMED_PACKET = 1835
|
||||
READ_ONLY_MODE = 1836
|
||||
GTID_NEXT_TYPE_UNDEFINED_GTID = 1837
|
||||
VARIABLE_NOT_SETTABLE_IN_SP = 1838
|
||||
CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY = 1840
|
||||
CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY = 1841
|
||||
GTID_PURGED_WAS_CHANGED = 1842
|
||||
GTID_EXECUTED_WAS_CHANGED = 1843
|
||||
BINLOG_STMT_MODE_AND_NO_REPL_TABLES = 1844
|
||||
ALTER_OPERATION_NOT_SUPPORTED = 1845
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON = 1846
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY = 1847
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION = 1848
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME = 1849
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE = 1850
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK = 1851
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK = 1853
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC = 1854
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS = 1855
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS = 1856
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS = 1857
|
||||
SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE = 1858
|
||||
DUP_UNKNOWN_IN_INDEX = 1859
|
||||
IDENT_CAUSES_TOO_LONG_PATH = 1860
|
||||
ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL = 1861
|
||||
MUST_CHANGE_PASSWORD_LOGIN = 1862
|
||||
ROW_IN_WRONG_PARTITION = 1863
|
||||
MTS_EVENT_BIGGER_PENDING_JOBS_SIZE_MAX = 1864
|
||||
BINLOG_LOGICAL_CORRUPTION = 1866
|
||||
WARN_PURGE_LOG_IN_USE = 1867
|
||||
WARN_PURGE_LOG_IS_ACTIVE = 1868
|
||||
AUTO_INCREMENT_CONFLICT = 1869
|
||||
WARN_ON_BLOCKHOLE_IN_RBR = 1870
|
||||
SLAVE_MI_INIT_REPOSITORY = 1871
|
||||
SLAVE_RLI_INIT_REPOSITORY = 1872
|
||||
ACCESS_DENIED_CHANGE_USER_ERROR = 1873
|
||||
INNODB_READ_ONLY = 1874
|
||||
STOP_SLAVE_SQL_THREAD_TIMEOUT = 1875
|
||||
STOP_SLAVE_IO_THREAD_TIMEOUT = 1876
|
||||
TABLE_CORRUPT = 1877
|
||||
TEMP_FILE_WRITE_FAILURE = 1878
|
||||
INNODB_FT_AUX_NOT_HEX_ID = 1879
|
||||
OLD_TEMPORALS_UPGRADED = 1880
|
||||
INNODB_FORCED_RECOVERY = 1881
|
||||
AES_INVALID_IV = 1882
|
||||
PLUGIN_CANNOT_BE_UNINSTALLED = 1883
|
||||
GTID_UNSAFE_BINLOG_SPLITTABLE_STATEMENT_AND_ASSIGNED_GTID = 1884
|
||||
SLAVE_HAS_MORE_GTIDS_THAN_MASTER = 1885
|
||||
MISSING_KEY = 1886
|
||||
ERROR_LAST = 1973
|
@ -1,40 +0,0 @@
|
||||
"""MySQL FIELD_TYPE Constants
|
||||
|
||||
These constants represent the various column (field) types that are
|
||||
supported by MySQL.
|
||||
"""
|
||||
|
||||
DECIMAL = 0
|
||||
TINY = 1
|
||||
SHORT = 2
|
||||
LONG = 3
|
||||
FLOAT = 4
|
||||
DOUBLE = 5
|
||||
NULL = 6
|
||||
TIMESTAMP = 7
|
||||
LONGLONG = 8
|
||||
INT24 = 9
|
||||
DATE = 10
|
||||
TIME = 11
|
||||
DATETIME = 12
|
||||
YEAR = 13
|
||||
# NEWDATE = 14 # Internal to MySQL.
|
||||
VARCHAR = 15
|
||||
BIT = 16
|
||||
# TIMESTAMP2 = 17
|
||||
# DATETIME2 = 18
|
||||
# TIME2 = 19
|
||||
JSON = 245
|
||||
NEWDECIMAL = 246
|
||||
ENUM = 247
|
||||
SET = 248
|
||||
TINY_BLOB = 249
|
||||
MEDIUM_BLOB = 250
|
||||
LONG_BLOB = 251
|
||||
BLOB = 252
|
||||
VAR_STRING = 253
|
||||
STRING = 254
|
||||
GEOMETRY = 255
|
||||
|
||||
CHAR = TINY
|
||||
INTERVAL = ENUM
|
@ -1,23 +0,0 @@
|
||||
"""MySQL FLAG Constants
|
||||
|
||||
These flags are used along with the FIELD_TYPE to indicate various
|
||||
properties of columns in a result set.
|
||||
|
||||
"""
|
||||
|
||||
NOT_NULL = 1
|
||||
PRI_KEY = 2
|
||||
UNIQUE_KEY = 4
|
||||
MULTIPLE_KEY = 8
|
||||
BLOB = 16
|
||||
UNSIGNED = 32
|
||||
ZEROFILL = 64
|
||||
BINARY = 128
|
||||
ENUM = 256
|
||||
AUTO_INCREMENT = 512
|
||||
TIMESTAMP = 1024
|
||||
SET = 2048
|
||||
NUM = 32768
|
||||
PART_KEY = 16384
|
||||
GROUP = 32768
|
||||
UNIQUE = 65536
|
@ -1 +0,0 @@
|
||||
__all__ = ["CR", "FIELD_TYPE", "CLIENT", "ER", "FLAG"]
|
@ -1,139 +0,0 @@
|
||||
"""MySQLdb type conversion module
|
||||
|
||||
This module handles all the type conversions for MySQL. If the default
|
||||
type conversions aren't what you need, you can make your own. The
|
||||
dictionary conversions maps some kind of type to a conversion function
|
||||
which returns the corresponding value:
|
||||
|
||||
Key: FIELD_TYPE.* (from MySQLdb.constants)
|
||||
|
||||
Conversion function:
|
||||
|
||||
Arguments: string
|
||||
|
||||
Returns: Python object
|
||||
|
||||
Key: Python type object (from types) or class
|
||||
|
||||
Conversion function:
|
||||
|
||||
Arguments: Python object of indicated type or class AND
|
||||
conversion dictionary
|
||||
|
||||
Returns: SQL literal value
|
||||
|
||||
Notes: Most conversion functions can ignore the dictionary, but
|
||||
it is a required parameter. It is necessary for converting
|
||||
things like sequences and instances.
|
||||
|
||||
Don't modify conversions if you can avoid it. Instead, make copies
|
||||
(with the copy() method), modify the copies, and then pass them to
|
||||
MySQL.connect().
|
||||
"""
|
||||
from decimal import Decimal
|
||||
|
||||
from MySQLdb._mysql import string_literal
|
||||
from MySQLdb.constants import FIELD_TYPE, FLAG
|
||||
from MySQLdb.times import (
|
||||
Date,
|
||||
DateTimeType,
|
||||
DateTime2literal,
|
||||
DateTimeDeltaType,
|
||||
DateTimeDelta2literal,
|
||||
DateTime_or_None,
|
||||
TimeDelta_or_None,
|
||||
Date_or_None,
|
||||
)
|
||||
from MySQLdb._exceptions import ProgrammingError
|
||||
|
||||
import array
|
||||
|
||||
NoneType = type(None)
|
||||
|
||||
try:
|
||||
ArrayType = array.ArrayType
|
||||
except AttributeError:
|
||||
ArrayType = array.array
|
||||
|
||||
|
||||
def Bool2Str(s, d):
|
||||
return b"1" if s else b"0"
|
||||
|
||||
|
||||
def Set2Str(s, d):
|
||||
# Only support ascii string. Not tested.
|
||||
return string_literal(",".join(s))
|
||||
|
||||
|
||||
def Thing2Str(s, d):
|
||||
"""Convert something into a string via str()."""
|
||||
return str(s)
|
||||
|
||||
|
||||
def Float2Str(o, d):
|
||||
s = repr(o)
|
||||
if s in ("inf", "-inf", "nan"):
|
||||
raise ProgrammingError("%s can not be used with MySQL" % s)
|
||||
if "e" not in s:
|
||||
s += "e0"
|
||||
return s
|
||||
|
||||
|
||||
def None2NULL(o, d):
|
||||
"""Convert None to NULL."""
|
||||
return b"NULL"
|
||||
|
||||
|
||||
def Thing2Literal(o, d):
|
||||
"""Convert something into a SQL string literal. If using
|
||||
MySQL-3.23 or newer, string_literal() is a method of the
|
||||
_mysql.MYSQL object, and this function will be overridden with
|
||||
that method when the connection is created."""
|
||||
return string_literal(o)
|
||||
|
||||
|
||||
def Decimal2Literal(o, d):
|
||||
return format(o, "f")
|
||||
|
||||
|
||||
def array2Str(o, d):
|
||||
return Thing2Literal(o.tostring(), d)
|
||||
|
||||
|
||||
# bytes or str regarding to BINARY_FLAG.
|
||||
_bytes_or_str = ((FLAG.BINARY, bytes), (None, str))
|
||||
|
||||
conversions = {
|
||||
int: Thing2Str,
|
||||
float: Float2Str,
|
||||
NoneType: None2NULL,
|
||||
ArrayType: array2Str,
|
||||
bool: Bool2Str,
|
||||
Date: Thing2Literal,
|
||||
DateTimeType: DateTime2literal,
|
||||
DateTimeDeltaType: DateTimeDelta2literal,
|
||||
set: Set2Str,
|
||||
Decimal: Decimal2Literal,
|
||||
FIELD_TYPE.TINY: int,
|
||||
FIELD_TYPE.SHORT: int,
|
||||
FIELD_TYPE.LONG: int,
|
||||
FIELD_TYPE.FLOAT: float,
|
||||
FIELD_TYPE.DOUBLE: float,
|
||||
FIELD_TYPE.DECIMAL: Decimal,
|
||||
FIELD_TYPE.NEWDECIMAL: Decimal,
|
||||
FIELD_TYPE.LONGLONG: int,
|
||||
FIELD_TYPE.INT24: int,
|
||||
FIELD_TYPE.YEAR: int,
|
||||
FIELD_TYPE.TIMESTAMP: DateTime_or_None,
|
||||
FIELD_TYPE.DATETIME: DateTime_or_None,
|
||||
FIELD_TYPE.TIME: TimeDelta_or_None,
|
||||
FIELD_TYPE.DATE: Date_or_None,
|
||||
FIELD_TYPE.TINY_BLOB: bytes,
|
||||
FIELD_TYPE.MEDIUM_BLOB: bytes,
|
||||
FIELD_TYPE.LONG_BLOB: bytes,
|
||||
FIELD_TYPE.BLOB: bytes,
|
||||
FIELD_TYPE.STRING: bytes,
|
||||
FIELD_TYPE.VAR_STRING: bytes,
|
||||
FIELD_TYPE.VARCHAR: bytes,
|
||||
FIELD_TYPE.JSON: bytes,
|
||||
}
|
@ -1,500 +0,0 @@
|
||||
"""MySQLdb Cursors
|
||||
|
||||
This module implements Cursors of various types for MySQLdb. By
|
||||
default, MySQLdb uses the Cursor class.
|
||||
"""
|
||||
import re
|
||||
|
||||
from ._exceptions import ProgrammingError
|
||||
|
||||
|
||||
#: Regular expression for ``Cursor.executemany```.
|
||||
#: executemany only supports simple bulk insert.
|
||||
#: You can use it to load large dataset.
|
||||
RE_INSERT_VALUES = re.compile(
|
||||
"".join(
|
||||
[
|
||||
r"\s*((?:INSERT|REPLACE)\b.+\bVALUES?\s*)",
|
||||
r"(\(\s*(?:%s|%\(.+\)s)\s*(?:,\s*(?:%s|%\(.+\)s)\s*)*\))",
|
||||
r"(\s*(?:ON DUPLICATE.*)?);?\s*\Z",
|
||||
]
|
||||
),
|
||||
re.IGNORECASE | re.DOTALL,
|
||||
)
|
||||
|
||||
|
||||
class BaseCursor:
|
||||
"""A base for Cursor classes. Useful attributes:
|
||||
|
||||
description
|
||||
A tuple of DB API 7-tuples describing the columns in
|
||||
the last executed query; see PEP-249 for details.
|
||||
|
||||
description_flags
|
||||
Tuple of column flags for last query, one entry per column
|
||||
in the result set. Values correspond to those in
|
||||
MySQLdb.constants.FLAG. See MySQL documentation (C API)
|
||||
for more information. Non-standard extension.
|
||||
|
||||
arraysize
|
||||
default number of rows fetchmany() will fetch
|
||||
"""
|
||||
|
||||
#: Max statement size which :meth:`executemany` generates.
|
||||
#:
|
||||
#: Max size of allowed statement is max_allowed_packet - packet_header_size.
|
||||
#: Default value of max_allowed_packet is 1048576.
|
||||
max_stmt_length = 64 * 1024
|
||||
|
||||
from ._exceptions import (
|
||||
MySQLError,
|
||||
Warning,
|
||||
Error,
|
||||
InterfaceError,
|
||||
DatabaseError,
|
||||
DataError,
|
||||
OperationalError,
|
||||
IntegrityError,
|
||||
InternalError,
|
||||
ProgrammingError,
|
||||
NotSupportedError,
|
||||
)
|
||||
|
||||
connection = None
|
||||
|
||||
def __init__(self, connection):
|
||||
self.connection = connection
|
||||
self.description = None
|
||||
self.description_flags = None
|
||||
self.rowcount = 0
|
||||
self.arraysize = 1
|
||||
self._executed = None
|
||||
|
||||
self.lastrowid = None
|
||||
self._result = None
|
||||
self.rownumber = None
|
||||
self._rows = None
|
||||
|
||||
def _discard(self):
|
||||
self.description = None
|
||||
self.description_flags = None
|
||||
# Django uses some member after __exit__.
|
||||
# So we keep rowcount and lastrowid here. They are cleared in Cursor._query().
|
||||
# self.rowcount = 0
|
||||
# self.lastrowid = None
|
||||
self._rows = None
|
||||
self.rownumber = None
|
||||
|
||||
if self._result:
|
||||
self._result.discard()
|
||||
self._result = None
|
||||
|
||||
con = self.connection
|
||||
if con is None:
|
||||
return
|
||||
while con.next_result() == 0: # -1 means no more data.
|
||||
con.discard_result()
|
||||
|
||||
def close(self):
|
||||
"""Close the cursor. No further queries will be possible."""
|
||||
try:
|
||||
if self.connection is None:
|
||||
return
|
||||
self._discard()
|
||||
finally:
|
||||
self.connection = None
|
||||
self._result = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
del exc_info
|
||||
self.close()
|
||||
|
||||
def _check_executed(self):
|
||||
if not self._executed:
|
||||
raise ProgrammingError("execute() first")
|
||||
|
||||
def nextset(self):
|
||||
"""Advance to the next result set.
|
||||
|
||||
Returns None if there are no more result sets.
|
||||
"""
|
||||
if self._executed:
|
||||
self.fetchall()
|
||||
|
||||
db = self._get_db()
|
||||
nr = db.next_result()
|
||||
if nr == -1:
|
||||
return None
|
||||
self._do_get_result(db)
|
||||
self._post_get_result()
|
||||
return 1
|
||||
|
||||
def _do_get_result(self, db):
|
||||
self._result = result = self._get_result()
|
||||
if result is None:
|
||||
self.description = self.description_flags = None
|
||||
else:
|
||||
self.description = result.describe()
|
||||
self.description_flags = result.field_flags()
|
||||
|
||||
self.rowcount = db.affected_rows()
|
||||
self.rownumber = 0
|
||||
self.lastrowid = db.insert_id()
|
||||
|
||||
def _post_get_result(self):
|
||||
pass
|
||||
|
||||
def setinputsizes(self, *args):
|
||||
"""Does nothing, required by DB API."""
|
||||
|
||||
def setoutputsizes(self, *args):
|
||||
"""Does nothing, required by DB API."""
|
||||
|
||||
def _get_db(self):
|
||||
con = self.connection
|
||||
if con is None:
|
||||
raise ProgrammingError("cursor closed")
|
||||
return con
|
||||
|
||||
def execute(self, query, args=None):
|
||||
"""Execute a query.
|
||||
|
||||
query -- string, query to execute on server
|
||||
args -- optional sequence or mapping, parameters to use with query.
|
||||
|
||||
Note: If args is a sequence, then %s must be used as the
|
||||
parameter placeholder in the query. If a mapping is used,
|
||||
%(key)s must be used as the placeholder.
|
||||
|
||||
Returns integer represents rows affected, if any
|
||||
"""
|
||||
self._discard()
|
||||
|
||||
mogrified_query = self._mogrify(query, args)
|
||||
|
||||
assert isinstance(mogrified_query, (bytes, bytearray))
|
||||
res = self._query(mogrified_query)
|
||||
return res
|
||||
|
||||
def _mogrify(self, query, args=None):
|
||||
"""Return query after binding args."""
|
||||
db = self._get_db()
|
||||
|
||||
if isinstance(query, str):
|
||||
query = query.encode(db.encoding)
|
||||
|
||||
if args is not None:
|
||||
if isinstance(args, dict):
|
||||
nargs = {}
|
||||
for key, item in args.items():
|
||||
if isinstance(key, str):
|
||||
key = key.encode(db.encoding)
|
||||
nargs[key] = db.literal(item)
|
||||
args = nargs
|
||||
else:
|
||||
args = tuple(map(db.literal, args))
|
||||
try:
|
||||
query = query % args
|
||||
except TypeError as m:
|
||||
raise ProgrammingError(str(m))
|
||||
|
||||
return query
|
||||
|
||||
def mogrify(self, query, args=None):
|
||||
"""Return query after binding args.
|
||||
|
||||
query -- string, query to mogrify
|
||||
args -- optional sequence or mapping, parameters to use with query.
|
||||
|
||||
Note: If args is a sequence, then %s must be used as the
|
||||
parameter placeholder in the query. If a mapping is used,
|
||||
%(key)s must be used as the placeholder.
|
||||
|
||||
Returns string representing query that would be executed by the server
|
||||
"""
|
||||
return self._mogrify(query, args).decode(self._get_db().encoding)
|
||||
|
||||
def executemany(self, query, args):
|
||||
# type: (str, list) -> int
|
||||
"""Execute a multi-row query.
|
||||
|
||||
:param query: query to execute on server
|
||||
:param args: Sequence of sequences or mappings. It is used as parameter.
|
||||
:return: Number of rows affected, if any.
|
||||
|
||||
This method improves performance on multiple-row INSERT and
|
||||
REPLACE. Otherwise it is equivalent to looping over args with
|
||||
execute().
|
||||
"""
|
||||
if not args:
|
||||
return
|
||||
|
||||
m = RE_INSERT_VALUES.match(query)
|
||||
if m:
|
||||
q_prefix = m.group(1) % ()
|
||||
q_values = m.group(2).rstrip()
|
||||
q_postfix = m.group(3) or ""
|
||||
assert q_values[0] == "(" and q_values[-1] == ")"
|
||||
return self._do_execute_many(
|
||||
q_prefix,
|
||||
q_values,
|
||||
q_postfix,
|
||||
args,
|
||||
self.max_stmt_length,
|
||||
self._get_db().encoding,
|
||||
)
|
||||
|
||||
self.rowcount = sum(self.execute(query, arg) for arg in args)
|
||||
return self.rowcount
|
||||
|
||||
def _do_execute_many(
|
||||
self, prefix, values, postfix, args, max_stmt_length, encoding
|
||||
):
|
||||
if isinstance(prefix, str):
|
||||
prefix = prefix.encode(encoding)
|
||||
if isinstance(values, str):
|
||||
values = values.encode(encoding)
|
||||
if isinstance(postfix, str):
|
||||
postfix = postfix.encode(encoding)
|
||||
sql = bytearray(prefix)
|
||||
args = iter(args)
|
||||
v = self._mogrify(values, next(args))
|
||||
sql += v
|
||||
rows = 0
|
||||
for arg in args:
|
||||
v = self._mogrify(values, arg)
|
||||
if len(sql) + len(v) + len(postfix) + 1 > max_stmt_length:
|
||||
rows += self.execute(sql + postfix)
|
||||
sql = bytearray(prefix)
|
||||
else:
|
||||
sql += b","
|
||||
sql += v
|
||||
rows += self.execute(sql + postfix)
|
||||
self.rowcount = rows
|
||||
return rows
|
||||
|
||||
def callproc(self, procname, args=()):
|
||||
"""Execute stored procedure procname with args
|
||||
|
||||
procname -- string, name of procedure to execute on server
|
||||
|
||||
args -- Sequence of parameters to use with procedure
|
||||
|
||||
Returns the original args.
|
||||
|
||||
Compatibility warning: PEP-249 specifies that any modified
|
||||
parameters must be returned. This is currently impossible
|
||||
as they are only available by storing them in a server
|
||||
variable and then retrieved by a query. Since stored
|
||||
procedures return zero or more result sets, there is no
|
||||
reliable way to get at OUT or INOUT parameters via callproc.
|
||||
The server variables are named @_procname_n, where procname
|
||||
is the parameter above and n is the position of the parameter
|
||||
(from zero). Once all result sets generated by the procedure
|
||||
have been fetched, you can issue a SELECT @_procname_0, ...
|
||||
query using .execute() to get any OUT or INOUT values.
|
||||
|
||||
Compatibility warning: The act of calling a stored procedure
|
||||
itself creates an empty result set. This appears after any
|
||||
result sets generated by the procedure. This is non-standard
|
||||
behavior with respect to the DB-API. Be sure to use nextset()
|
||||
to advance through all result sets; otherwise you may get
|
||||
disconnected.
|
||||
"""
|
||||
db = self._get_db()
|
||||
if isinstance(procname, str):
|
||||
procname = procname.encode(db.encoding)
|
||||
if args:
|
||||
fmt = b"@_" + procname + b"_%d=%s"
|
||||
q = b"SET %s" % b",".join(
|
||||
fmt % (index, db.literal(arg)) for index, arg in enumerate(args)
|
||||
)
|
||||
self._query(q)
|
||||
self.nextset()
|
||||
|
||||
q = b"CALL %s(%s)" % (
|
||||
procname,
|
||||
b",".join([b"@_%s_%d" % (procname, i) for i in range(len(args))]),
|
||||
)
|
||||
self._query(q)
|
||||
return args
|
||||
|
||||
def _query(self, q):
|
||||
db = self._get_db()
|
||||
self._result = None
|
||||
self.rowcount = None
|
||||
self.lastrowid = None
|
||||
db.query(q)
|
||||
self._do_get_result(db)
|
||||
self._post_get_result()
|
||||
self._executed = q
|
||||
return self.rowcount
|
||||
|
||||
def _fetch_row(self, size=1):
|
||||
if not self._result:
|
||||
return ()
|
||||
return self._result.fetch_row(size, self._fetch_type)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.fetchone, None)
|
||||
|
||||
Warning = Warning
|
||||
Error = Error
|
||||
InterfaceError = InterfaceError
|
||||
DatabaseError = DatabaseError
|
||||
DataError = DataError
|
||||
OperationalError = OperationalError
|
||||
IntegrityError = IntegrityError
|
||||
InternalError = InternalError
|
||||
ProgrammingError = ProgrammingError
|
||||
NotSupportedError = NotSupportedError
|
||||
|
||||
|
||||
class CursorStoreResultMixIn:
|
||||
"""This is a MixIn class which causes the entire result set to be
|
||||
stored on the client side, i.e. it uses mysql_store_result(). If the
|
||||
result set can be very large, consider adding a LIMIT clause to your
|
||||
query, or using CursorUseResultMixIn instead."""
|
||||
|
||||
def _get_result(self):
|
||||
return self._get_db().store_result()
|
||||
|
||||
def _post_get_result(self):
|
||||
self._rows = self._fetch_row(0)
|
||||
self._result = None
|
||||
|
||||
def fetchone(self):
|
||||
"""Fetches a single row from the cursor. None indicates that
|
||||
no more rows are available."""
|
||||
self._check_executed()
|
||||
if self.rownumber >= len(self._rows):
|
||||
return None
|
||||
result = self._rows[self.rownumber]
|
||||
self.rownumber = self.rownumber + 1
|
||||
return result
|
||||
|
||||
def fetchmany(self, size=None):
|
||||
"""Fetch up to size rows from the cursor. Result set may be smaller
|
||||
than size. If size is not defined, cursor.arraysize is used."""
|
||||
self._check_executed()
|
||||
end = self.rownumber + (size or self.arraysize)
|
||||
result = self._rows[self.rownumber : end]
|
||||
self.rownumber = min(end, len(self._rows))
|
||||
return result
|
||||
|
||||
def fetchall(self):
|
||||
"""Fetches all available rows from the cursor."""
|
||||
self._check_executed()
|
||||
if self.rownumber:
|
||||
result = self._rows[self.rownumber :]
|
||||
else:
|
||||
result = self._rows
|
||||
self.rownumber = len(self._rows)
|
||||
return result
|
||||
|
||||
def scroll(self, value, mode="relative"):
|
||||
"""Scroll the cursor in the result set to a new position according
|
||||
to mode.
|
||||
|
||||
If mode is 'relative' (default), value is taken as offset to
|
||||
the current position in the result set, if set to 'absolute',
|
||||
value states an absolute target position."""
|
||||
self._check_executed()
|
||||
if mode == "relative":
|
||||
r = self.rownumber + value
|
||||
elif mode == "absolute":
|
||||
r = value
|
||||
else:
|
||||
raise ProgrammingError("unknown scroll mode %s" % repr(mode))
|
||||
if r < 0 or r >= len(self._rows):
|
||||
raise IndexError("out of range")
|
||||
self.rownumber = r
|
||||
|
||||
def __iter__(self):
|
||||
self._check_executed()
|
||||
result = self.rownumber and self._rows[self.rownumber :] or self._rows
|
||||
return iter(result)
|
||||
|
||||
|
||||
class CursorUseResultMixIn:
|
||||
|
||||
"""This is a MixIn class which causes the result set to be stored
|
||||
in the server and sent row-by-row to client side, i.e. it uses
|
||||
mysql_use_result(). You MUST retrieve the entire result set and
|
||||
close() the cursor before additional queries can be performed on
|
||||
the connection."""
|
||||
|
||||
def _get_result(self):
|
||||
return self._get_db().use_result()
|
||||
|
||||
def fetchone(self):
|
||||
"""Fetches a single row from the cursor."""
|
||||
self._check_executed()
|
||||
r = self._fetch_row(1)
|
||||
if not r:
|
||||
return None
|
||||
self.rownumber = self.rownumber + 1
|
||||
return r[0]
|
||||
|
||||
def fetchmany(self, size=None):
|
||||
"""Fetch up to size rows from the cursor. Result set may be smaller
|
||||
than size. If size is not defined, cursor.arraysize is used."""
|
||||
self._check_executed()
|
||||
r = self._fetch_row(size or self.arraysize)
|
||||
self.rownumber = self.rownumber + len(r)
|
||||
return r
|
||||
|
||||
def fetchall(self):
|
||||
"""Fetches all available rows from the cursor."""
|
||||
self._check_executed()
|
||||
r = self._fetch_row(0)
|
||||
self.rownumber = self.rownumber + len(r)
|
||||
return r
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
row = self.fetchone()
|
||||
if row is None:
|
||||
raise StopIteration
|
||||
return row
|
||||
|
||||
__next__ = next
|
||||
|
||||
|
||||
class CursorTupleRowsMixIn:
|
||||
"""This is a MixIn class that causes all rows to be returned as tuples,
|
||||
which is the standard form required by DB API."""
|
||||
|
||||
_fetch_type = 0
|
||||
|
||||
|
||||
class CursorDictRowsMixIn:
|
||||
"""This is a MixIn class that causes all rows to be returned as
|
||||
dictionaries. This is a non-standard feature."""
|
||||
|
||||
_fetch_type = 1
|
||||
|
||||
|
||||
class Cursor(CursorStoreResultMixIn, CursorTupleRowsMixIn, BaseCursor):
|
||||
"""This is the standard Cursor class that returns rows as tuples
|
||||
and stores the result set in the client."""
|
||||
|
||||
|
||||
class DictCursor(CursorStoreResultMixIn, CursorDictRowsMixIn, BaseCursor):
|
||||
"""This is a Cursor class that returns rows as dictionaries and
|
||||
stores the result set in the client."""
|
||||
|
||||
|
||||
class SSCursor(CursorUseResultMixIn, CursorTupleRowsMixIn, BaseCursor):
|
||||
"""This is a Cursor class that returns rows as tuples and stores
|
||||
the result set in the server."""
|
||||
|
||||
|
||||
class SSDictCursor(CursorUseResultMixIn, CursorDictRowsMixIn, BaseCursor):
|
||||
"""This is a Cursor class that returns rows as dictionaries and
|
||||
stores the result set in the server."""
|
@ -1,3 +0,0 @@
|
||||
__author__ = "Inada Naoki <songofacandy@gmail.com>"
|
||||
__version__ = "2.2.4"
|
||||
version_info = (2, 2, 4, "final", 0)
|
@ -1,150 +0,0 @@
|
||||
"""times module
|
||||
|
||||
This module provides some Date and Time classes for dealing with MySQL data.
|
||||
|
||||
Use Python datetime module to handle date and time columns.
|
||||
"""
|
||||
from time import localtime
|
||||
from datetime import date, datetime, time, timedelta
|
||||
from MySQLdb._mysql import string_literal
|
||||
|
||||
Date = date
|
||||
Time = time
|
||||
TimeDelta = timedelta
|
||||
Timestamp = datetime
|
||||
|
||||
DateTimeDeltaType = timedelta
|
||||
DateTimeType = datetime
|
||||
|
||||
|
||||
def DateFromTicks(ticks):
|
||||
"""Convert UNIX ticks into a date instance."""
|
||||
return date(*localtime(ticks)[:3])
|
||||
|
||||
|
||||
def TimeFromTicks(ticks):
|
||||
"""Convert UNIX ticks into a time instance."""
|
||||
return time(*localtime(ticks)[3:6])
|
||||
|
||||
|
||||
def TimestampFromTicks(ticks):
|
||||
"""Convert UNIX ticks into a datetime instance."""
|
||||
return datetime(*localtime(ticks)[:6])
|
||||
|
||||
|
||||
format_TIME = format_DATE = str
|
||||
|
||||
|
||||
def format_TIMEDELTA(v):
|
||||
seconds = int(v.seconds) % 60
|
||||
minutes = int(v.seconds // 60) % 60
|
||||
hours = int(v.seconds // 3600) % 24
|
||||
return "%d %d:%d:%d" % (v.days, hours, minutes, seconds)
|
||||
|
||||
|
||||
def format_TIMESTAMP(d):
|
||||
"""
|
||||
:type d: datetime.datetime
|
||||
"""
|
||||
if d.microsecond:
|
||||
fmt = " ".join(
|
||||
[
|
||||
"{0.year:04}-{0.month:02}-{0.day:02}",
|
||||
"{0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}",
|
||||
]
|
||||
)
|
||||
else:
|
||||
fmt = " ".join(
|
||||
[
|
||||
"{0.year:04}-{0.month:02}-{0.day:02}",
|
||||
"{0.hour:02}:{0.minute:02}:{0.second:02}",
|
||||
]
|
||||
)
|
||||
return fmt.format(d)
|
||||
|
||||
|
||||
def DateTime_or_None(s):
|
||||
try:
|
||||
if len(s) < 11:
|
||||
return Date_or_None(s)
|
||||
|
||||
micros = s[20:]
|
||||
|
||||
if len(micros) == 0:
|
||||
# 12:00:00
|
||||
micros = 0
|
||||
elif len(micros) < 7:
|
||||
# 12:00:00.123456
|
||||
micros = int(micros) * 10 ** (6 - len(micros))
|
||||
else:
|
||||
return None
|
||||
|
||||
return datetime(
|
||||
int(s[:4]), # year
|
||||
int(s[5:7]), # month
|
||||
int(s[8:10]), # day
|
||||
int(s[11:13] or 0), # hour
|
||||
int(s[14:16] or 0), # minute
|
||||
int(s[17:19] or 0), # second
|
||||
micros, # microsecond
|
||||
)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def TimeDelta_or_None(s):
|
||||
try:
|
||||
h, m, s = s.split(":")
|
||||
if "." in s:
|
||||
s, ms = s.split(".")
|
||||
ms = ms.ljust(6, "0")
|
||||
else:
|
||||
ms = 0
|
||||
if h[0] == "-":
|
||||
negative = True
|
||||
else:
|
||||
negative = False
|
||||
h, m, s, ms = abs(int(h)), int(m), int(s), int(ms)
|
||||
td = timedelta(hours=h, minutes=m, seconds=s, microseconds=ms)
|
||||
if negative:
|
||||
return -td
|
||||
else:
|
||||
return td
|
||||
except ValueError:
|
||||
# unpacking or int/float conversion failed
|
||||
return None
|
||||
|
||||
|
||||
def Time_or_None(s):
|
||||
try:
|
||||
h, m, s = s.split(":")
|
||||
if "." in s:
|
||||
s, ms = s.split(".")
|
||||
ms = ms.ljust(6, "0")
|
||||
else:
|
||||
ms = 0
|
||||
h, m, s, ms = int(h), int(m), int(s), int(ms)
|
||||
return time(hour=h, minute=m, second=s, microsecond=ms)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def Date_or_None(s):
|
||||
try:
|
||||
return date(
|
||||
int(s[:4]),
|
||||
int(s[5:7]),
|
||||
int(s[8:10]),
|
||||
) # year # month # day
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def DateTime2literal(d, c):
|
||||
"""Format a DateTime object as an ISO timestamp."""
|
||||
return string_literal(format_TIMESTAMP(d))
|
||||
|
||||
|
||||
def DateTimeDelta2literal(d, c):
|
||||
"""Format a DateTimeDelta object as a time."""
|
||||
return string_literal(format_TIMEDELTA(d))
|
@ -1,133 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# bitmap distribution font (bdf) file parser
|
||||
#
|
||||
# history:
|
||||
# 1996-05-16 fl created (as bdf2pil)
|
||||
# 1997-08-25 fl converted to FontFile driver
|
||||
# 2001-05-25 fl removed bogus __init__ call
|
||||
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
|
||||
# 2003-04-22 fl more robustification (from Graham Dumpleton)
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1997-2003 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
"""
|
||||
Parse X Bitmap Distribution Format (BDF)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import BinaryIO
|
||||
|
||||
from . import FontFile, Image
|
||||
|
||||
bdf_slant = {
|
||||
"R": "Roman",
|
||||
"I": "Italic",
|
||||
"O": "Oblique",
|
||||
"RI": "Reverse Italic",
|
||||
"RO": "Reverse Oblique",
|
||||
"OT": "Other",
|
||||
}
|
||||
|
||||
bdf_spacing = {"P": "Proportional", "M": "Monospaced", "C": "Cell"}
|
||||
|
||||
|
||||
def bdf_char(
|
||||
f: BinaryIO,
|
||||
) -> (
|
||||
tuple[
|
||||
str,
|
||||
int,
|
||||
tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]],
|
||||
Image.Image,
|
||||
]
|
||||
| None
|
||||
):
|
||||
# skip to STARTCHAR
|
||||
while True:
|
||||
s = f.readline()
|
||||
if not s:
|
||||
return None
|
||||
if s[:9] == b"STARTCHAR":
|
||||
break
|
||||
id = s[9:].strip().decode("ascii")
|
||||
|
||||
# load symbol properties
|
||||
props = {}
|
||||
while True:
|
||||
s = f.readline()
|
||||
if not s or s[:6] == b"BITMAP":
|
||||
break
|
||||
i = s.find(b" ")
|
||||
props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
|
||||
|
||||
# load bitmap
|
||||
bitmap = bytearray()
|
||||
while True:
|
||||
s = f.readline()
|
||||
if not s or s[:7] == b"ENDCHAR":
|
||||
break
|
||||
bitmap += s[:-1]
|
||||
|
||||
# The word BBX
|
||||
# followed by the width in x (BBw), height in y (BBh),
|
||||
# and x and y displacement (BBxoff0, BByoff0)
|
||||
# of the lower left corner from the origin of the character.
|
||||
width, height, x_disp, y_disp = (int(p) for p in props["BBX"].split())
|
||||
|
||||
# The word DWIDTH
|
||||
# followed by the width in x and y of the character in device pixels.
|
||||
dwx, dwy = (int(p) for p in props["DWIDTH"].split())
|
||||
|
||||
bbox = (
|
||||
(dwx, dwy),
|
||||
(x_disp, -y_disp - height, width + x_disp, -y_disp),
|
||||
(0, 0, width, height),
|
||||
)
|
||||
|
||||
try:
|
||||
im = Image.frombytes("1", (width, height), bitmap, "hex", "1")
|
||||
except ValueError:
|
||||
# deal with zero-width characters
|
||||
im = Image.new("1", (width, height))
|
||||
|
||||
return id, int(props["ENCODING"]), bbox, im
|
||||
|
||||
|
||||
class BdfFontFile(FontFile.FontFile):
|
||||
"""Font file plugin for the X11 BDF format."""
|
||||
|
||||
def __init__(self, fp: BinaryIO):
|
||||
super().__init__()
|
||||
|
||||
s = fp.readline()
|
||||
if s[:13] != b"STARTFONT 2.1":
|
||||
msg = "not a valid BDF file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
props = {}
|
||||
comments = []
|
||||
|
||||
while True:
|
||||
s = fp.readline()
|
||||
if not s or s[:13] == b"ENDPROPERTIES":
|
||||
break
|
||||
i = s.find(b" ")
|
||||
props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
|
||||
if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
|
||||
if s.find(b"LogicalFontDescription") < 0:
|
||||
comments.append(s[i + 1 : -1].decode("ascii"))
|
||||
|
||||
while True:
|
||||
c = bdf_char(fp)
|
||||
if not c:
|
||||
break
|
||||
id, ch, (xy, dst, src), im = c
|
||||
if 0 <= ch < len(self.glyph):
|
||||
self.glyph[ch] = xy, dst, src, im
|
@ -1,475 +0,0 @@
|
||||
"""
|
||||
Blizzard Mipmap Format (.blp)
|
||||
Jerome Leclanche <jerome@leclan.ch>
|
||||
|
||||
The contents of this file are hereby released in the public domain (CC0)
|
||||
Full text of the CC0 license:
|
||||
https://creativecommons.org/publicdomain/zero/1.0/
|
||||
|
||||
BLP1 files, used mostly in Warcraft III, are not fully supported.
|
||||
All types of BLP2 files used in World of Warcraft are supported.
|
||||
|
||||
The BLP file structure consists of a header, up to 16 mipmaps of the
|
||||
texture
|
||||
|
||||
Texture sizes must be powers of two, though the two dimensions do
|
||||
not have to be equal; 512x256 is valid, but 512x200 is not.
|
||||
The first mipmap (mipmap #0) is the full size image; each subsequent
|
||||
mipmap halves both dimensions. The final mipmap should be 1x1.
|
||||
|
||||
BLP files come in many different flavours:
|
||||
* JPEG-compressed (type == 0) - only supported for BLP1.
|
||||
* RAW images (type == 1, encoding == 1). Each mipmap is stored as an
|
||||
array of 8-bit values, one per pixel, left to right, top to bottom.
|
||||
Each value is an index to the palette.
|
||||
* DXT-compressed (type == 1, encoding == 2):
|
||||
- DXT1 compression is used if alpha_encoding == 0.
|
||||
- An additional alpha bit is used if alpha_depth == 1.
|
||||
- DXT3 compression is used if alpha_encoding == 1.
|
||||
- DXT5 compression is used if alpha_encoding == 7.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import struct
|
||||
from enum import IntEnum
|
||||
from io import BytesIO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
|
||||
class Format(IntEnum):
|
||||
JPEG = 0
|
||||
|
||||
|
||||
class Encoding(IntEnum):
|
||||
UNCOMPRESSED = 1
|
||||
DXT = 2
|
||||
UNCOMPRESSED_RAW_BGRA = 3
|
||||
|
||||
|
||||
class AlphaEncoding(IntEnum):
|
||||
DXT1 = 0
|
||||
DXT3 = 1
|
||||
DXT5 = 7
|
||||
|
||||
|
||||
def unpack_565(i):
|
||||
return ((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3
|
||||
|
||||
|
||||
def decode_dxt1(data, alpha=False):
|
||||
"""
|
||||
input: one "row" of data (i.e. will produce 4*width pixels)
|
||||
"""
|
||||
|
||||
blocks = len(data) // 8 # number of blocks in row
|
||||
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
||||
|
||||
for block in range(blocks):
|
||||
# Decode next 8-byte block.
|
||||
idx = block * 8
|
||||
color0, color1, bits = struct.unpack_from("<HHI", data, idx)
|
||||
|
||||
r0, g0, b0 = unpack_565(color0)
|
||||
r1, g1, b1 = unpack_565(color1)
|
||||
|
||||
# Decode this block into 4x4 pixels
|
||||
# Accumulate the results onto our 4 row accumulators
|
||||
for j in range(4):
|
||||
for i in range(4):
|
||||
# get next control op and generate a pixel
|
||||
|
||||
control = bits & 3
|
||||
bits = bits >> 2
|
||||
|
||||
a = 0xFF
|
||||
if control == 0:
|
||||
r, g, b = r0, g0, b0
|
||||
elif control == 1:
|
||||
r, g, b = r1, g1, b1
|
||||
elif control == 2:
|
||||
if color0 > color1:
|
||||
r = (2 * r0 + r1) // 3
|
||||
g = (2 * g0 + g1) // 3
|
||||
b = (2 * b0 + b1) // 3
|
||||
else:
|
||||
r = (r0 + r1) // 2
|
||||
g = (g0 + g1) // 2
|
||||
b = (b0 + b1) // 2
|
||||
elif control == 3:
|
||||
if color0 > color1:
|
||||
r = (2 * r1 + r0) // 3
|
||||
g = (2 * g1 + g0) // 3
|
||||
b = (2 * b1 + b0) // 3
|
||||
else:
|
||||
r, g, b, a = 0, 0, 0, 0
|
||||
|
||||
if alpha:
|
||||
ret[j].extend([r, g, b, a])
|
||||
else:
|
||||
ret[j].extend([r, g, b])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def decode_dxt3(data):
|
||||
"""
|
||||
input: one "row" of data (i.e. will produce 4*width pixels)
|
||||
"""
|
||||
|
||||
blocks = len(data) // 16 # number of blocks in row
|
||||
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
||||
|
||||
for block in range(blocks):
|
||||
idx = block * 16
|
||||
block = data[idx : idx + 16]
|
||||
# Decode next 16-byte block.
|
||||
bits = struct.unpack_from("<8B", block)
|
||||
color0, color1 = struct.unpack_from("<HH", block, 8)
|
||||
|
||||
(code,) = struct.unpack_from("<I", block, 12)
|
||||
|
||||
r0, g0, b0 = unpack_565(color0)
|
||||
r1, g1, b1 = unpack_565(color1)
|
||||
|
||||
for j in range(4):
|
||||
high = False # Do we want the higher bits?
|
||||
for i in range(4):
|
||||
alphacode_index = (4 * j + i) // 2
|
||||
a = bits[alphacode_index]
|
||||
if high:
|
||||
high = False
|
||||
a >>= 4
|
||||
else:
|
||||
high = True
|
||||
a &= 0xF
|
||||
a *= 17 # We get a value between 0 and 15
|
||||
|
||||
color_code = (code >> 2 * (4 * j + i)) & 0x03
|
||||
|
||||
if color_code == 0:
|
||||
r, g, b = r0, g0, b0
|
||||
elif color_code == 1:
|
||||
r, g, b = r1, g1, b1
|
||||
elif color_code == 2:
|
||||
r = (2 * r0 + r1) // 3
|
||||
g = (2 * g0 + g1) // 3
|
||||
b = (2 * b0 + b1) // 3
|
||||
elif color_code == 3:
|
||||
r = (2 * r1 + r0) // 3
|
||||
g = (2 * g1 + g0) // 3
|
||||
b = (2 * b1 + b0) // 3
|
||||
|
||||
ret[j].extend([r, g, b, a])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def decode_dxt5(data):
|
||||
"""
|
||||
input: one "row" of data (i.e. will produce 4 * width pixels)
|
||||
"""
|
||||
|
||||
blocks = len(data) // 16 # number of blocks in row
|
||||
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
||||
|
||||
for block in range(blocks):
|
||||
idx = block * 16
|
||||
block = data[idx : idx + 16]
|
||||
# Decode next 16-byte block.
|
||||
a0, a1 = struct.unpack_from("<BB", block)
|
||||
|
||||
bits = struct.unpack_from("<6B", block, 2)
|
||||
alphacode1 = bits[2] | (bits[3] << 8) | (bits[4] << 16) | (bits[5] << 24)
|
||||
alphacode2 = bits[0] | (bits[1] << 8)
|
||||
|
||||
color0, color1 = struct.unpack_from("<HH", block, 8)
|
||||
|
||||
(code,) = struct.unpack_from("<I", block, 12)
|
||||
|
||||
r0, g0, b0 = unpack_565(color0)
|
||||
r1, g1, b1 = unpack_565(color1)
|
||||
|
||||
for j in range(4):
|
||||
for i in range(4):
|
||||
# get next control op and generate a pixel
|
||||
alphacode_index = 3 * (4 * j + i)
|
||||
|
||||
if alphacode_index <= 12:
|
||||
alphacode = (alphacode2 >> alphacode_index) & 0x07
|
||||
elif alphacode_index == 15:
|
||||
alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06)
|
||||
else: # alphacode_index >= 18 and alphacode_index <= 45
|
||||
alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07
|
||||
|
||||
if alphacode == 0:
|
||||
a = a0
|
||||
elif alphacode == 1:
|
||||
a = a1
|
||||
elif a0 > a1:
|
||||
a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7
|
||||
elif alphacode == 6:
|
||||
a = 0
|
||||
elif alphacode == 7:
|
||||
a = 255
|
||||
else:
|
||||
a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5
|
||||
|
||||
color_code = (code >> 2 * (4 * j + i)) & 0x03
|
||||
|
||||
if color_code == 0:
|
||||
r, g, b = r0, g0, b0
|
||||
elif color_code == 1:
|
||||
r, g, b = r1, g1, b1
|
||||
elif color_code == 2:
|
||||
r = (2 * r0 + r1) // 3
|
||||
g = (2 * g0 + g1) // 3
|
||||
b = (2 * b0 + b1) // 3
|
||||
elif color_code == 3:
|
||||
r = (2 * r1 + r0) // 3
|
||||
g = (2 * g1 + g0) // 3
|
||||
b = (2 * b1 + b0) // 3
|
||||
|
||||
ret[j].extend([r, g, b, a])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class BLPFormatError(NotImplementedError):
|
||||
pass
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] in (b"BLP1", b"BLP2")
|
||||
|
||||
|
||||
class BlpImageFile(ImageFile.ImageFile):
|
||||
"""
|
||||
Blizzard Mipmap Format
|
||||
"""
|
||||
|
||||
format = "BLP"
|
||||
format_description = "Blizzard Mipmap Format"
|
||||
|
||||
def _open(self):
|
||||
self.magic = self.fp.read(4)
|
||||
|
||||
self.fp.seek(5, os.SEEK_CUR)
|
||||
(self._blp_alpha_depth,) = struct.unpack("<b", self.fp.read(1))
|
||||
|
||||
self.fp.seek(2, os.SEEK_CUR)
|
||||
self._size = struct.unpack("<II", self.fp.read(8))
|
||||
|
||||
if self.magic in (b"BLP1", b"BLP2"):
|
||||
decoder = self.magic.decode()
|
||||
else:
|
||||
msg = f"Bad BLP magic {repr(self.magic)}"
|
||||
raise BLPFormatError(msg)
|
||||
|
||||
self._mode = "RGBA" if self._blp_alpha_depth else "RGB"
|
||||
self.tile = [(decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))]
|
||||
|
||||
|
||||
class _BLPBaseDecoder(ImageFile.PyDecoder):
|
||||
_pulls_fd = True
|
||||
|
||||
def decode(self, buffer):
|
||||
try:
|
||||
self._read_blp_header()
|
||||
self._load()
|
||||
except struct.error as e:
|
||||
msg = "Truncated BLP file"
|
||||
raise OSError(msg) from e
|
||||
return -1, 0
|
||||
|
||||
def _read_blp_header(self):
|
||||
self.fd.seek(4)
|
||||
(self._blp_compression,) = struct.unpack("<i", self._safe_read(4))
|
||||
|
||||
(self._blp_encoding,) = struct.unpack("<b", self._safe_read(1))
|
||||
(self._blp_alpha_depth,) = struct.unpack("<b", self._safe_read(1))
|
||||
(self._blp_alpha_encoding,) = struct.unpack("<b", self._safe_read(1))
|
||||
self.fd.seek(1, os.SEEK_CUR) # mips
|
||||
|
||||
self.size = struct.unpack("<II", self._safe_read(8))
|
||||
|
||||
if isinstance(self, BLP1Decoder):
|
||||
# Only present for BLP1
|
||||
(self._blp_encoding,) = struct.unpack("<i", self._safe_read(4))
|
||||
self.fd.seek(4, os.SEEK_CUR) # subtype
|
||||
|
||||
self._blp_offsets = struct.unpack("<16I", self._safe_read(16 * 4))
|
||||
self._blp_lengths = struct.unpack("<16I", self._safe_read(16 * 4))
|
||||
|
||||
def _safe_read(self, length):
|
||||
return ImageFile._safe_read(self.fd, length)
|
||||
|
||||
def _read_palette(self):
|
||||
ret = []
|
||||
for i in range(256):
|
||||
try:
|
||||
b, g, r, a = struct.unpack("<4B", self._safe_read(4))
|
||||
except struct.error:
|
||||
break
|
||||
ret.append((b, g, r, a))
|
||||
return ret
|
||||
|
||||
def _read_bgra(self, palette):
|
||||
data = bytearray()
|
||||
_data = BytesIO(self._safe_read(self._blp_lengths[0]))
|
||||
while True:
|
||||
try:
|
||||
(offset,) = struct.unpack("<B", _data.read(1))
|
||||
except struct.error:
|
||||
break
|
||||
b, g, r, a = palette[offset]
|
||||
d = (r, g, b)
|
||||
if self._blp_alpha_depth:
|
||||
d += (a,)
|
||||
data.extend(d)
|
||||
return data
|
||||
|
||||
|
||||
class BLP1Decoder(_BLPBaseDecoder):
|
||||
def _load(self):
|
||||
if self._blp_compression == Format.JPEG:
|
||||
self._decode_jpeg_stream()
|
||||
|
||||
elif self._blp_compression == 1:
|
||||
if self._blp_encoding in (4, 5):
|
||||
palette = self._read_palette()
|
||||
data = self._read_bgra(palette)
|
||||
self.set_as_raw(bytes(data))
|
||||
else:
|
||||
msg = f"Unsupported BLP encoding {repr(self._blp_encoding)}"
|
||||
raise BLPFormatError(msg)
|
||||
else:
|
||||
msg = f"Unsupported BLP compression {repr(self._blp_encoding)}"
|
||||
raise BLPFormatError(msg)
|
||||
|
||||
def _decode_jpeg_stream(self):
|
||||
from .JpegImagePlugin import JpegImageFile
|
||||
|
||||
(jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
|
||||
jpeg_header = self._safe_read(jpeg_header_size)
|
||||
self._safe_read(self._blp_offsets[0] - self.fd.tell()) # What IS this?
|
||||
data = self._safe_read(self._blp_lengths[0])
|
||||
data = jpeg_header + data
|
||||
data = BytesIO(data)
|
||||
image = JpegImageFile(data)
|
||||
Image._decompression_bomb_check(image.size)
|
||||
if image.mode == "CMYK":
|
||||
decoder_name, extents, offset, args = image.tile[0]
|
||||
image.tile = [(decoder_name, extents, offset, (args[0], "CMYK"))]
|
||||
r, g, b = image.convert("RGB").split()
|
||||
image = Image.merge("RGB", (b, g, r))
|
||||
self.set_as_raw(image.tobytes())
|
||||
|
||||
|
||||
class BLP2Decoder(_BLPBaseDecoder):
|
||||
def _load(self):
|
||||
palette = self._read_palette()
|
||||
|
||||
self.fd.seek(self._blp_offsets[0])
|
||||
|
||||
if self._blp_compression == 1:
|
||||
# Uncompressed or DirectX compression
|
||||
|
||||
if self._blp_encoding == Encoding.UNCOMPRESSED:
|
||||
data = self._read_bgra(palette)
|
||||
|
||||
elif self._blp_encoding == Encoding.DXT:
|
||||
data = bytearray()
|
||||
if self._blp_alpha_encoding == AlphaEncoding.DXT1:
|
||||
linesize = (self.size[0] + 3) // 4 * 8
|
||||
for yb in range((self.size[1] + 3) // 4):
|
||||
for d in decode_dxt1(
|
||||
self._safe_read(linesize), alpha=bool(self._blp_alpha_depth)
|
||||
):
|
||||
data += d
|
||||
|
||||
elif self._blp_alpha_encoding == AlphaEncoding.DXT3:
|
||||
linesize = (self.size[0] + 3) // 4 * 16
|
||||
for yb in range((self.size[1] + 3) // 4):
|
||||
for d in decode_dxt3(self._safe_read(linesize)):
|
||||
data += d
|
||||
|
||||
elif self._blp_alpha_encoding == AlphaEncoding.DXT5:
|
||||
linesize = (self.size[0] + 3) // 4 * 16
|
||||
for yb in range((self.size[1] + 3) // 4):
|
||||
for d in decode_dxt5(self._safe_read(linesize)):
|
||||
data += d
|
||||
else:
|
||||
msg = f"Unsupported alpha encoding {repr(self._blp_alpha_encoding)}"
|
||||
raise BLPFormatError(msg)
|
||||
else:
|
||||
msg = f"Unknown BLP encoding {repr(self._blp_encoding)}"
|
||||
raise BLPFormatError(msg)
|
||||
|
||||
else:
|
||||
msg = f"Unknown BLP compression {repr(self._blp_compression)}"
|
||||
raise BLPFormatError(msg)
|
||||
|
||||
self.set_as_raw(bytes(data))
|
||||
|
||||
|
||||
class BLPEncoder(ImageFile.PyEncoder):
|
||||
_pushes_fd = True
|
||||
|
||||
def _write_palette(self):
|
||||
data = b""
|
||||
palette = self.im.getpalette("RGBA", "RGBA")
|
||||
for i in range(len(palette) // 4):
|
||||
r, g, b, a = palette[i * 4 : (i + 1) * 4]
|
||||
data += struct.pack("<4B", b, g, r, a)
|
||||
while len(data) < 256 * 4:
|
||||
data += b"\x00" * 4
|
||||
return data
|
||||
|
||||
def encode(self, bufsize):
|
||||
palette_data = self._write_palette()
|
||||
|
||||
offset = 20 + 16 * 4 * 2 + len(palette_data)
|
||||
data = struct.pack("<16I", offset, *((0,) * 15))
|
||||
|
||||
w, h = self.im.size
|
||||
data += struct.pack("<16I", w * h, *((0,) * 15))
|
||||
|
||||
data += palette_data
|
||||
|
||||
for y in range(h):
|
||||
for x in range(w):
|
||||
data += struct.pack("<B", self.im.getpixel((x, y)))
|
||||
|
||||
return len(data), 0, data
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if im.mode != "P":
|
||||
msg = "Unsupported BLP image mode"
|
||||
raise ValueError(msg)
|
||||
|
||||
magic = b"BLP1" if im.encoderinfo.get("blp_version") == "BLP1" else b"BLP2"
|
||||
fp.write(magic)
|
||||
|
||||
fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression
|
||||
fp.write(struct.pack("<b", Encoding.UNCOMPRESSED))
|
||||
fp.write(struct.pack("<b", 1 if im.palette.mode == "RGBA" else 0))
|
||||
fp.write(struct.pack("<b", 0)) # alpha encoding
|
||||
fp.write(struct.pack("<b", 0)) # mips
|
||||
fp.write(struct.pack("<II", *im.size))
|
||||
if magic == b"BLP1":
|
||||
fp.write(struct.pack("<i", 5))
|
||||
fp.write(struct.pack("<i", 0))
|
||||
|
||||
ImageFile._save(im, fp, [("BLP", (0, 0) + im.size, 0, im.mode)])
|
||||
|
||||
|
||||
Image.register_open(BlpImageFile.format, BlpImageFile, _accept)
|
||||
Image.register_extension(BlpImageFile.format, ".blp")
|
||||
Image.register_decoder("BLP1", BLP1Decoder)
|
||||
Image.register_decoder("BLP2", BLP2Decoder)
|
||||
|
||||
Image.register_save(BlpImageFile.format, _save)
|
||||
Image.register_encoder("BLP", BLPEncoder)
|
@ -1,471 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# BMP file handler
|
||||
#
|
||||
# Windows (and OS/2) native bitmap storage format.
|
||||
#
|
||||
# history:
|
||||
# 1995-09-01 fl Created
|
||||
# 1996-04-30 fl Added save
|
||||
# 1997-08-27 fl Fixed save of 1-bit images
|
||||
# 1998-03-06 fl Load P images as L where possible
|
||||
# 1998-07-03 fl Load P images as 1 where possible
|
||||
# 1998-12-29 fl Handle small palettes
|
||||
# 2002-12-30 fl Fixed load of 1-bit palette images
|
||||
# 2003-04-21 fl Fixed load of 1-bit monochrome images
|
||||
# 2003-04-23 fl Added limited support for BI_BITFIELDS compression
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB
|
||||
# Copyright (c) 1995-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from . import Image, ImageFile, ImagePalette
|
||||
from ._binary import i16le as i16
|
||||
from ._binary import i32le as i32
|
||||
from ._binary import o8
|
||||
from ._binary import o16le as o16
|
||||
from ._binary import o32le as o32
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Read BMP file
|
||||
|
||||
BIT2MODE = {
|
||||
# bits => mode, rawmode
|
||||
1: ("P", "P;1"),
|
||||
4: ("P", "P;4"),
|
||||
8: ("P", "P"),
|
||||
16: ("RGB", "BGR;15"),
|
||||
24: ("RGB", "BGR"),
|
||||
32: ("RGB", "BGRX"),
|
||||
}
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:2] == b"BM"
|
||||
|
||||
|
||||
def _dib_accept(prefix):
|
||||
return i32(prefix) in [12, 40, 64, 108, 124]
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Image plugin for the Windows BMP format.
|
||||
# =============================================================================
|
||||
class BmpImageFile(ImageFile.ImageFile):
|
||||
"""Image plugin for the Windows Bitmap format (BMP)"""
|
||||
|
||||
# ------------------------------------------------------------- Description
|
||||
format_description = "Windows Bitmap"
|
||||
format = "BMP"
|
||||
|
||||
# -------------------------------------------------- BMP Compression values
|
||||
COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5}
|
||||
for k, v in COMPRESSIONS.items():
|
||||
vars()[k] = v
|
||||
|
||||
def _bitmap(self, header=0, offset=0):
|
||||
"""Read relevant info about the BMP"""
|
||||
read, seek = self.fp.read, self.fp.seek
|
||||
if header:
|
||||
seek(header)
|
||||
# read bmp header size @offset 14 (this is part of the header size)
|
||||
file_info = {"header_size": i32(read(4)), "direction": -1}
|
||||
|
||||
# -------------------- If requested, read header at a specific position
|
||||
# read the rest of the bmp header, without its size
|
||||
header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4)
|
||||
|
||||
# -------------------------------------------------- IBM OS/2 Bitmap v1
|
||||
# ----- This format has different offsets because of width/height types
|
||||
if file_info["header_size"] == 12:
|
||||
file_info["width"] = i16(header_data, 0)
|
||||
file_info["height"] = i16(header_data, 2)
|
||||
file_info["planes"] = i16(header_data, 4)
|
||||
file_info["bits"] = i16(header_data, 6)
|
||||
file_info["compression"] = self.RAW
|
||||
file_info["palette_padding"] = 3
|
||||
|
||||
# --------------------------------------------- Windows Bitmap v2 to v5
|
||||
# v3, OS/2 v2, v4, v5
|
||||
elif file_info["header_size"] in (40, 64, 108, 124):
|
||||
file_info["y_flip"] = header_data[7] == 0xFF
|
||||
file_info["direction"] = 1 if file_info["y_flip"] else -1
|
||||
file_info["width"] = i32(header_data, 0)
|
||||
file_info["height"] = (
|
||||
i32(header_data, 4)
|
||||
if not file_info["y_flip"]
|
||||
else 2**32 - i32(header_data, 4)
|
||||
)
|
||||
file_info["planes"] = i16(header_data, 8)
|
||||
file_info["bits"] = i16(header_data, 10)
|
||||
file_info["compression"] = i32(header_data, 12)
|
||||
# byte size of pixel data
|
||||
file_info["data_size"] = i32(header_data, 16)
|
||||
file_info["pixels_per_meter"] = (
|
||||
i32(header_data, 20),
|
||||
i32(header_data, 24),
|
||||
)
|
||||
file_info["colors"] = i32(header_data, 28)
|
||||
file_info["palette_padding"] = 4
|
||||
self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"])
|
||||
if file_info["compression"] == self.BITFIELDS:
|
||||
if len(header_data) >= 52:
|
||||
for idx, mask in enumerate(
|
||||
["r_mask", "g_mask", "b_mask", "a_mask"]
|
||||
):
|
||||
file_info[mask] = i32(header_data, 36 + idx * 4)
|
||||
else:
|
||||
# 40 byte headers only have the three components in the
|
||||
# bitfields masks, ref:
|
||||
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
|
||||
# See also
|
||||
# https://github.com/python-pillow/Pillow/issues/1293
|
||||
# There is a 4th component in the RGBQuad, in the alpha
|
||||
# location, but it is listed as a reserved component,
|
||||
# and it is not generally an alpha channel
|
||||
file_info["a_mask"] = 0x0
|
||||
for mask in ["r_mask", "g_mask", "b_mask"]:
|
||||
file_info[mask] = i32(read(4))
|
||||
file_info["rgb_mask"] = (
|
||||
file_info["r_mask"],
|
||||
file_info["g_mask"],
|
||||
file_info["b_mask"],
|
||||
)
|
||||
file_info["rgba_mask"] = (
|
||||
file_info["r_mask"],
|
||||
file_info["g_mask"],
|
||||
file_info["b_mask"],
|
||||
file_info["a_mask"],
|
||||
)
|
||||
else:
|
||||
msg = f"Unsupported BMP header type ({file_info['header_size']})"
|
||||
raise OSError(msg)
|
||||
|
||||
# ------------------ Special case : header is reported 40, which
|
||||
# ---------------------- is shorter than real size for bpp >= 16
|
||||
self._size = file_info["width"], file_info["height"]
|
||||
|
||||
# ------- If color count was not found in the header, compute from bits
|
||||
file_info["colors"] = (
|
||||
file_info["colors"]
|
||||
if file_info.get("colors", 0)
|
||||
else (1 << file_info["bits"])
|
||||
)
|
||||
if offset == 14 + file_info["header_size"] and file_info["bits"] <= 8:
|
||||
offset += 4 * file_info["colors"]
|
||||
|
||||
# ---------------------- Check bit depth for unusual unsupported values
|
||||
self._mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None))
|
||||
if self.mode is None:
|
||||
msg = f"Unsupported BMP pixel depth ({file_info['bits']})"
|
||||
raise OSError(msg)
|
||||
|
||||
# ---------------- Process BMP with Bitfields compression (not palette)
|
||||
decoder_name = "raw"
|
||||
if file_info["compression"] == self.BITFIELDS:
|
||||
SUPPORTED = {
|
||||
32: [
|
||||
(0xFF0000, 0xFF00, 0xFF, 0x0),
|
||||
(0xFF000000, 0xFF0000, 0xFF00, 0x0),
|
||||
(0xFF000000, 0xFF0000, 0xFF00, 0xFF),
|
||||
(0xFF, 0xFF00, 0xFF0000, 0xFF000000),
|
||||
(0xFF0000, 0xFF00, 0xFF, 0xFF000000),
|
||||
(0x0, 0x0, 0x0, 0x0),
|
||||
],
|
||||
24: [(0xFF0000, 0xFF00, 0xFF)],
|
||||
16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],
|
||||
}
|
||||
MASK_MODES = {
|
||||
(32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX",
|
||||
(32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR",
|
||||
(32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): "ABGR",
|
||||
(32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA",
|
||||
(32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA",
|
||||
(32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
|
||||
(24, (0xFF0000, 0xFF00, 0xFF)): "BGR",
|
||||
(16, (0xF800, 0x7E0, 0x1F)): "BGR;16",
|
||||
(16, (0x7C00, 0x3E0, 0x1F)): "BGR;15",
|
||||
}
|
||||
if file_info["bits"] in SUPPORTED:
|
||||
if (
|
||||
file_info["bits"] == 32
|
||||
and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]]
|
||||
):
|
||||
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])]
|
||||
self._mode = "RGBA" if "A" in raw_mode else self.mode
|
||||
elif (
|
||||
file_info["bits"] in (24, 16)
|
||||
and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]]
|
||||
):
|
||||
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])]
|
||||
else:
|
||||
msg = "Unsupported BMP bitfields layout"
|
||||
raise OSError(msg)
|
||||
else:
|
||||
msg = "Unsupported BMP bitfields layout"
|
||||
raise OSError(msg)
|
||||
elif file_info["compression"] == self.RAW:
|
||||
if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset
|
||||
raw_mode, self._mode = "BGRA", "RGBA"
|
||||
elif file_info["compression"] in (self.RLE8, self.RLE4):
|
||||
decoder_name = "bmp_rle"
|
||||
else:
|
||||
msg = f"Unsupported BMP compression ({file_info['compression']})"
|
||||
raise OSError(msg)
|
||||
|
||||
# --------------- Once the header is processed, process the palette/LUT
|
||||
if self.mode == "P": # Paletted for 1, 4 and 8 bit images
|
||||
# ---------------------------------------------------- 1-bit images
|
||||
if not (0 < file_info["colors"] <= 65536):
|
||||
msg = f"Unsupported BMP Palette size ({file_info['colors']})"
|
||||
raise OSError(msg)
|
||||
else:
|
||||
padding = file_info["palette_padding"]
|
||||
palette = read(padding * file_info["colors"])
|
||||
grayscale = True
|
||||
indices = (
|
||||
(0, 255)
|
||||
if file_info["colors"] == 2
|
||||
else list(range(file_info["colors"]))
|
||||
)
|
||||
|
||||
# ----------------- Check if grayscale and ignore palette if so
|
||||
for ind, val in enumerate(indices):
|
||||
rgb = palette[ind * padding : ind * padding + 3]
|
||||
if rgb != o8(val) * 3:
|
||||
grayscale = False
|
||||
|
||||
# ------- If all colors are gray, white or black, ditch palette
|
||||
if grayscale:
|
||||
self._mode = "1" if file_info["colors"] == 2 else "L"
|
||||
raw_mode = self.mode
|
||||
else:
|
||||
self._mode = "P"
|
||||
self.palette = ImagePalette.raw(
|
||||
"BGRX" if padding == 4 else "BGR", palette
|
||||
)
|
||||
|
||||
# ---------------------------- Finally set the tile data for the plugin
|
||||
self.info["compression"] = file_info["compression"]
|
||||
args = [raw_mode]
|
||||
if decoder_name == "bmp_rle":
|
||||
args.append(file_info["compression"] == self.RLE4)
|
||||
else:
|
||||
args.append(((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3))
|
||||
args.append(file_info["direction"])
|
||||
self.tile = [
|
||||
(
|
||||
decoder_name,
|
||||
(0, 0, file_info["width"], file_info["height"]),
|
||||
offset or self.fp.tell(),
|
||||
tuple(args),
|
||||
)
|
||||
]
|
||||
|
||||
def _open(self):
|
||||
"""Open file, check magic number and read header"""
|
||||
# read 14 bytes: magic number, filesize, reserved, header final offset
|
||||
head_data = self.fp.read(14)
|
||||
# choke if the file does not have the required magic bytes
|
||||
if not _accept(head_data):
|
||||
msg = "Not a BMP file"
|
||||
raise SyntaxError(msg)
|
||||
# read the start position of the BMP image data (u32)
|
||||
offset = i32(head_data, 10)
|
||||
# load bitmap information (offset=raster info)
|
||||
self._bitmap(offset=offset)
|
||||
|
||||
|
||||
class BmpRleDecoder(ImageFile.PyDecoder):
|
||||
_pulls_fd = True
|
||||
|
||||
def decode(self, buffer):
|
||||
rle4 = self.args[1]
|
||||
data = bytearray()
|
||||
x = 0
|
||||
while len(data) < self.state.xsize * self.state.ysize:
|
||||
pixels = self.fd.read(1)
|
||||
byte = self.fd.read(1)
|
||||
if not pixels or not byte:
|
||||
break
|
||||
num_pixels = pixels[0]
|
||||
if num_pixels:
|
||||
# encoded mode
|
||||
if x + num_pixels > self.state.xsize:
|
||||
# Too much data for row
|
||||
num_pixels = max(0, self.state.xsize - x)
|
||||
if rle4:
|
||||
first_pixel = o8(byte[0] >> 4)
|
||||
second_pixel = o8(byte[0] & 0x0F)
|
||||
for index in range(num_pixels):
|
||||
if index % 2 == 0:
|
||||
data += first_pixel
|
||||
else:
|
||||
data += second_pixel
|
||||
else:
|
||||
data += byte * num_pixels
|
||||
x += num_pixels
|
||||
else:
|
||||
if byte[0] == 0:
|
||||
# end of line
|
||||
while len(data) % self.state.xsize != 0:
|
||||
data += b"\x00"
|
||||
x = 0
|
||||
elif byte[0] == 1:
|
||||
# end of bitmap
|
||||
break
|
||||
elif byte[0] == 2:
|
||||
# delta
|
||||
bytes_read = self.fd.read(2)
|
||||
if len(bytes_read) < 2:
|
||||
break
|
||||
right, up = self.fd.read(2)
|
||||
data += b"\x00" * (right + up * self.state.xsize)
|
||||
x = len(data) % self.state.xsize
|
||||
else:
|
||||
# absolute mode
|
||||
if rle4:
|
||||
# 2 pixels per byte
|
||||
byte_count = byte[0] // 2
|
||||
bytes_read = self.fd.read(byte_count)
|
||||
for byte_read in bytes_read:
|
||||
data += o8(byte_read >> 4)
|
||||
data += o8(byte_read & 0x0F)
|
||||
else:
|
||||
byte_count = byte[0]
|
||||
bytes_read = self.fd.read(byte_count)
|
||||
data += bytes_read
|
||||
if len(bytes_read) < byte_count:
|
||||
break
|
||||
x += byte[0]
|
||||
|
||||
# align to 16-bit word boundary
|
||||
if self.fd.tell() % 2 != 0:
|
||||
self.fd.seek(1, os.SEEK_CUR)
|
||||
rawmode = "L" if self.mode == "L" else "P"
|
||||
self.set_as_raw(bytes(data), (rawmode, 0, self.args[-1]))
|
||||
return -1, 0
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Image plugin for the DIB format (BMP alias)
|
||||
# =============================================================================
|
||||
class DibImageFile(BmpImageFile):
|
||||
format = "DIB"
|
||||
format_description = "Windows Bitmap"
|
||||
|
||||
def _open(self):
|
||||
self._bitmap()
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Write BMP file
|
||||
|
||||
|
||||
SAVE = {
|
||||
"1": ("1", 1, 2),
|
||||
"L": ("L", 8, 256),
|
||||
"P": ("P", 8, 256),
|
||||
"RGB": ("BGR", 24, 0),
|
||||
"RGBA": ("BGRA", 32, 0),
|
||||
}
|
||||
|
||||
|
||||
def _dib_save(im, fp, filename):
|
||||
_save(im, fp, filename, False)
|
||||
|
||||
|
||||
def _save(im, fp, filename, bitmap_header=True):
|
||||
try:
|
||||
rawmode, bits, colors = SAVE[im.mode]
|
||||
except KeyError as e:
|
||||
msg = f"cannot write mode {im.mode} as BMP"
|
||||
raise OSError(msg) from e
|
||||
|
||||
info = im.encoderinfo
|
||||
|
||||
dpi = info.get("dpi", (96, 96))
|
||||
|
||||
# 1 meter == 39.3701 inches
|
||||
ppm = tuple(int(x * 39.3701 + 0.5) for x in dpi)
|
||||
|
||||
stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3)
|
||||
header = 40 # or 64 for OS/2 version 2
|
||||
image = stride * im.size[1]
|
||||
|
||||
if im.mode == "1":
|
||||
palette = b"".join(o8(i) * 4 for i in (0, 255))
|
||||
elif im.mode == "L":
|
||||
palette = b"".join(o8(i) * 4 for i in range(256))
|
||||
elif im.mode == "P":
|
||||
palette = im.im.getpalette("RGB", "BGRX")
|
||||
colors = len(palette) // 4
|
||||
else:
|
||||
palette = None
|
||||
|
||||
# bitmap header
|
||||
if bitmap_header:
|
||||
offset = 14 + header + colors * 4
|
||||
file_size = offset + image
|
||||
if file_size > 2**32 - 1:
|
||||
msg = "File size is too large for the BMP format"
|
||||
raise ValueError(msg)
|
||||
fp.write(
|
||||
b"BM" # file type (magic)
|
||||
+ o32(file_size) # file size
|
||||
+ o32(0) # reserved
|
||||
+ o32(offset) # image data offset
|
||||
)
|
||||
|
||||
# bitmap info header
|
||||
fp.write(
|
||||
o32(header) # info header size
|
||||
+ o32(im.size[0]) # width
|
||||
+ o32(im.size[1]) # height
|
||||
+ o16(1) # planes
|
||||
+ o16(bits) # depth
|
||||
+ o32(0) # compression (0=uncompressed)
|
||||
+ o32(image) # size of bitmap
|
||||
+ o32(ppm[0]) # resolution
|
||||
+ o32(ppm[1]) # resolution
|
||||
+ o32(colors) # colors used
|
||||
+ o32(colors) # colors important
|
||||
)
|
||||
|
||||
fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
|
||||
|
||||
if palette:
|
||||
fp.write(palette)
|
||||
|
||||
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))])
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
|
||||
Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
|
||||
Image.register_save(BmpImageFile.format, _save)
|
||||
|
||||
Image.register_extension(BmpImageFile.format, ".bmp")
|
||||
|
||||
Image.register_mime(BmpImageFile.format, "image/bmp")
|
||||
|
||||
Image.register_decoder("bmp_rle", BmpRleDecoder)
|
||||
|
||||
Image.register_open(DibImageFile.format, DibImageFile, _dib_accept)
|
||||
Image.register_save(DibImageFile.format, _dib_save)
|
||||
|
||||
Image.register_extension(DibImageFile.format, ".dib")
|
||||
|
||||
Image.register_mime(DibImageFile.format, "image/bmp")
|
@ -1,74 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# BUFR stub adapter
|
||||
#
|
||||
# Copyright (c) 1996-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
_handler = None
|
||||
|
||||
|
||||
def register_handler(handler):
|
||||
"""
|
||||
Install application-specific BUFR image handler.
|
||||
|
||||
:param handler: Handler object.
|
||||
"""
|
||||
global _handler
|
||||
_handler = handler
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Image adapter
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC"
|
||||
|
||||
|
||||
class BufrStubImageFile(ImageFile.StubImageFile):
|
||||
format = "BUFR"
|
||||
format_description = "BUFR"
|
||||
|
||||
def _open(self):
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(4)):
|
||||
msg = "Not a BUFR file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# make something up
|
||||
self._mode = "F"
|
||||
self._size = 1, 1
|
||||
|
||||
loader = self._load()
|
||||
if loader:
|
||||
loader.open(self)
|
||||
|
||||
def _load(self):
|
||||
return _handler
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if _handler is None or not hasattr(_handler, "save"):
|
||||
msg = "BUFR save handler not installed"
|
||||
raise OSError(msg)
|
||||
_handler.save(im, fp, filename)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
|
||||
Image.register_save(BufrStubImageFile.format, _save)
|
||||
|
||||
Image.register_extension(BufrStubImageFile.format, ".bufr")
|
@ -1,121 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# a class to read from a container file
|
||||
#
|
||||
# History:
|
||||
# 1995-06-18 fl Created
|
||||
# 1995-09-07 fl Added readline(), readlines()
|
||||
#
|
||||
# Copyright (c) 1997-2001 by Secret Labs AB
|
||||
# Copyright (c) 1995 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
from typing import IO, AnyStr, Generic, Literal
|
||||
|
||||
|
||||
class ContainerIO(Generic[AnyStr]):
|
||||
"""
|
||||
A file object that provides read access to a part of an existing
|
||||
file (for example a TAR file).
|
||||
"""
|
||||
|
||||
def __init__(self, file: IO[AnyStr], offset: int, length: int) -> None:
|
||||
"""
|
||||
Create file object.
|
||||
|
||||
:param file: Existing file.
|
||||
:param offset: Start of region, in bytes.
|
||||
:param length: Size of region, in bytes.
|
||||
"""
|
||||
self.fh: IO[AnyStr] = file
|
||||
self.pos = 0
|
||||
self.offset = offset
|
||||
self.length = length
|
||||
self.fh.seek(offset)
|
||||
|
||||
##
|
||||
# Always false.
|
||||
|
||||
def isatty(self) -> bool:
|
||||
return False
|
||||
|
||||
def seek(self, offset: int, mode: Literal[0, 1, 2] = io.SEEK_SET) -> None:
|
||||
"""
|
||||
Move file pointer.
|
||||
|
||||
:param offset: Offset in bytes.
|
||||
:param mode: Starting position. Use 0 for beginning of region, 1
|
||||
for current offset, and 2 for end of region. You cannot move
|
||||
the pointer outside the defined region.
|
||||
"""
|
||||
if mode == 1:
|
||||
self.pos = self.pos + offset
|
||||
elif mode == 2:
|
||||
self.pos = self.length + offset
|
||||
else:
|
||||
self.pos = offset
|
||||
# clamp
|
||||
self.pos = max(0, min(self.pos, self.length))
|
||||
self.fh.seek(self.offset + self.pos)
|
||||
|
||||
def tell(self) -> int:
|
||||
"""
|
||||
Get current file pointer.
|
||||
|
||||
:returns: Offset from start of region, in bytes.
|
||||
"""
|
||||
return self.pos
|
||||
|
||||
def read(self, n: int = 0) -> AnyStr:
|
||||
"""
|
||||
Read data.
|
||||
|
||||
:param n: Number of bytes to read. If omitted or zero,
|
||||
read until end of region.
|
||||
:returns: An 8-bit string.
|
||||
"""
|
||||
if n:
|
||||
n = min(n, self.length - self.pos)
|
||||
else:
|
||||
n = self.length - self.pos
|
||||
if not n: # EOF
|
||||
return b"" if "b" in self.fh.mode else "" # type: ignore[return-value]
|
||||
self.pos = self.pos + n
|
||||
return self.fh.read(n)
|
||||
|
||||
def readline(self) -> AnyStr:
|
||||
"""
|
||||
Read a line of text.
|
||||
|
||||
:returns: An 8-bit string.
|
||||
"""
|
||||
s: AnyStr = b"" if "b" in self.fh.mode else "" # type: ignore[assignment]
|
||||
newline_character = b"\n" if "b" in self.fh.mode else "\n"
|
||||
while True:
|
||||
c = self.read(1)
|
||||
if not c:
|
||||
break
|
||||
s = s + c
|
||||
if c == newline_character:
|
||||
break
|
||||
return s
|
||||
|
||||
def readlines(self) -> list[AnyStr]:
|
||||
"""
|
||||
Read multiple lines of text.
|
||||
|
||||
:returns: A list of 8-bit strings.
|
||||
"""
|
||||
lines = []
|
||||
while True:
|
||||
s = self.readline()
|
||||
if not s:
|
||||
break
|
||||
lines.append(s)
|
||||
return lines
|
@ -1,75 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Windows Cursor support for PIL
|
||||
#
|
||||
# notes:
|
||||
# uses BmpImagePlugin.py to read the bitmap data.
|
||||
#
|
||||
# history:
|
||||
# 96-05-27 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import BmpImagePlugin, Image
|
||||
from ._binary import i16le as i16
|
||||
from ._binary import i32le as i32
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == b"\0\0\2\0"
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for Windows Cursor files.
|
||||
|
||||
|
||||
class CurImageFile(BmpImagePlugin.BmpImageFile):
|
||||
format = "CUR"
|
||||
format_description = "Windows Cursor"
|
||||
|
||||
def _open(self):
|
||||
offset = self.fp.tell()
|
||||
|
||||
# check magic
|
||||
s = self.fp.read(6)
|
||||
if not _accept(s):
|
||||
msg = "not a CUR file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# pick the largest cursor in the file
|
||||
m = b""
|
||||
for i in range(i16(s, 4)):
|
||||
s = self.fp.read(16)
|
||||
if not m:
|
||||
m = s
|
||||
elif s[0] > m[0] and s[1] > m[1]:
|
||||
m = s
|
||||
if not m:
|
||||
msg = "No cursors were found"
|
||||
raise TypeError(msg)
|
||||
|
||||
# load as bitmap
|
||||
self._bitmap(i32(m, 12) + offset)
|
||||
|
||||
# patch up the bitmap height
|
||||
self._size = self.size[0], self.size[1] // 2
|
||||
d, e, o, a = self.tile[0]
|
||||
self.tile[0] = d, (0, 0) + self.size, o, a
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open(CurImageFile.format, CurImageFile, _accept)
|
||||
|
||||
Image.register_extension(CurImageFile.format, ".cur")
|
@ -1,80 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# DCX file handling
|
||||
#
|
||||
# DCX is a container file format defined by Intel, commonly used
|
||||
# for fax applications. Each DCX file consists of a directory
|
||||
# (a list of file offsets) followed by a set of (usually 1-bit)
|
||||
# PCX files.
|
||||
#
|
||||
# History:
|
||||
# 1995-09-09 fl Created
|
||||
# 1996-03-20 fl Properly derived from PcxImageFile.
|
||||
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
||||
# 2002-07-30 fl Fixed file handling
|
||||
#
|
||||
# Copyright (c) 1997-98 by Secret Labs AB.
|
||||
# Copyright (c) 1995-96 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image
|
||||
from ._binary import i32le as i32
|
||||
from .PcxImagePlugin import PcxImageFile
|
||||
|
||||
MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return len(prefix) >= 4 and i32(prefix) == MAGIC
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for the Intel DCX format.
|
||||
|
||||
|
||||
class DcxImageFile(PcxImageFile):
|
||||
format = "DCX"
|
||||
format_description = "Intel DCX"
|
||||
_close_exclusive_fp_after_loading = False
|
||||
|
||||
def _open(self):
|
||||
# Header
|
||||
s = self.fp.read(4)
|
||||
if not _accept(s):
|
||||
msg = "not a DCX file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# Component directory
|
||||
self._offset = []
|
||||
for i in range(1024):
|
||||
offset = i32(self.fp.read(4))
|
||||
if not offset:
|
||||
break
|
||||
self._offset.append(offset)
|
||||
|
||||
self._fp = self.fp
|
||||
self.frame = None
|
||||
self.n_frames = len(self._offset)
|
||||
self.is_animated = self.n_frames > 1
|
||||
self.seek(0)
|
||||
|
||||
def seek(self, frame):
|
||||
if not self._seek_check(frame):
|
||||
return
|
||||
self.frame = frame
|
||||
self.fp = self._fp
|
||||
self.fp.seek(self._offset[frame])
|
||||
PcxImageFile._open(self)
|
||||
|
||||
def tell(self):
|
||||
return self.frame
|
||||
|
||||
|
||||
Image.register_open(DcxImageFile.format, DcxImageFile, _accept)
|
||||
|
||||
Image.register_extension(DcxImageFile.format, ".dcx")
|
@ -1,566 +0,0 @@
|
||||
"""
|
||||
A Pillow loader for .dds files (S3TC-compressed aka DXTC)
|
||||
Jerome Leclanche <jerome@leclan.ch>
|
||||
|
||||
Documentation:
|
||||
https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt
|
||||
|
||||
The contents of this file are hereby released in the public domain (CC0)
|
||||
Full text of the CC0 license:
|
||||
https://creativecommons.org/publicdomain/zero/1.0/
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import struct
|
||||
import sys
|
||||
from enum import IntEnum, IntFlag
|
||||
|
||||
from . import Image, ImageFile, ImagePalette
|
||||
from ._binary import i32le as i32
|
||||
from ._binary import o8
|
||||
from ._binary import o32le as o32
|
||||
|
||||
# Magic ("DDS ")
|
||||
DDS_MAGIC = 0x20534444
|
||||
|
||||
|
||||
# DDS flags
|
||||
class DDSD(IntFlag):
|
||||
CAPS = 0x1
|
||||
HEIGHT = 0x2
|
||||
WIDTH = 0x4
|
||||
PITCH = 0x8
|
||||
PIXELFORMAT = 0x1000
|
||||
MIPMAPCOUNT = 0x20000
|
||||
LINEARSIZE = 0x80000
|
||||
DEPTH = 0x800000
|
||||
|
||||
|
||||
# DDS caps
|
||||
class DDSCAPS(IntFlag):
|
||||
COMPLEX = 0x8
|
||||
TEXTURE = 0x1000
|
||||
MIPMAP = 0x400000
|
||||
|
||||
|
||||
class DDSCAPS2(IntFlag):
|
||||
CUBEMAP = 0x200
|
||||
CUBEMAP_POSITIVEX = 0x400
|
||||
CUBEMAP_NEGATIVEX = 0x800
|
||||
CUBEMAP_POSITIVEY = 0x1000
|
||||
CUBEMAP_NEGATIVEY = 0x2000
|
||||
CUBEMAP_POSITIVEZ = 0x4000
|
||||
CUBEMAP_NEGATIVEZ = 0x8000
|
||||
VOLUME = 0x200000
|
||||
|
||||
|
||||
# Pixel Format
|
||||
class DDPF(IntFlag):
|
||||
ALPHAPIXELS = 0x1
|
||||
ALPHA = 0x2
|
||||
FOURCC = 0x4
|
||||
PALETTEINDEXED8 = 0x20
|
||||
RGB = 0x40
|
||||
LUMINANCE = 0x20000
|
||||
|
||||
|
||||
# dxgiformat.h
|
||||
class DXGI_FORMAT(IntEnum):
|
||||
UNKNOWN = 0
|
||||
R32G32B32A32_TYPELESS = 1
|
||||
R32G32B32A32_FLOAT = 2
|
||||
R32G32B32A32_UINT = 3
|
||||
R32G32B32A32_SINT = 4
|
||||
R32G32B32_TYPELESS = 5
|
||||
R32G32B32_FLOAT = 6
|
||||
R32G32B32_UINT = 7
|
||||
R32G32B32_SINT = 8
|
||||
R16G16B16A16_TYPELESS = 9
|
||||
R16G16B16A16_FLOAT = 10
|
||||
R16G16B16A16_UNORM = 11
|
||||
R16G16B16A16_UINT = 12
|
||||
R16G16B16A16_SNORM = 13
|
||||
R16G16B16A16_SINT = 14
|
||||
R32G32_TYPELESS = 15
|
||||
R32G32_FLOAT = 16
|
||||
R32G32_UINT = 17
|
||||
R32G32_SINT = 18
|
||||
R32G8X24_TYPELESS = 19
|
||||
D32_FLOAT_S8X24_UINT = 20
|
||||
R32_FLOAT_X8X24_TYPELESS = 21
|
||||
X32_TYPELESS_G8X24_UINT = 22
|
||||
R10G10B10A2_TYPELESS = 23
|
||||
R10G10B10A2_UNORM = 24
|
||||
R10G10B10A2_UINT = 25
|
||||
R11G11B10_FLOAT = 26
|
||||
R8G8B8A8_TYPELESS = 27
|
||||
R8G8B8A8_UNORM = 28
|
||||
R8G8B8A8_UNORM_SRGB = 29
|
||||
R8G8B8A8_UINT = 30
|
||||
R8G8B8A8_SNORM = 31
|
||||
R8G8B8A8_SINT = 32
|
||||
R16G16_TYPELESS = 33
|
||||
R16G16_FLOAT = 34
|
||||
R16G16_UNORM = 35
|
||||
R16G16_UINT = 36
|
||||
R16G16_SNORM = 37
|
||||
R16G16_SINT = 38
|
||||
R32_TYPELESS = 39
|
||||
D32_FLOAT = 40
|
||||
R32_FLOAT = 41
|
||||
R32_UINT = 42
|
||||
R32_SINT = 43
|
||||
R24G8_TYPELESS = 44
|
||||
D24_UNORM_S8_UINT = 45
|
||||
R24_UNORM_X8_TYPELESS = 46
|
||||
X24_TYPELESS_G8_UINT = 47
|
||||
R8G8_TYPELESS = 48
|
||||
R8G8_UNORM = 49
|
||||
R8G8_UINT = 50
|
||||
R8G8_SNORM = 51
|
||||
R8G8_SINT = 52
|
||||
R16_TYPELESS = 53
|
||||
R16_FLOAT = 54
|
||||
D16_UNORM = 55
|
||||
R16_UNORM = 56
|
||||
R16_UINT = 57
|
||||
R16_SNORM = 58
|
||||
R16_SINT = 59
|
||||
R8_TYPELESS = 60
|
||||
R8_UNORM = 61
|
||||
R8_UINT = 62
|
||||
R8_SNORM = 63
|
||||
R8_SINT = 64
|
||||
A8_UNORM = 65
|
||||
R1_UNORM = 66
|
||||
R9G9B9E5_SHAREDEXP = 67
|
||||
R8G8_B8G8_UNORM = 68
|
||||
G8R8_G8B8_UNORM = 69
|
||||
BC1_TYPELESS = 70
|
||||
BC1_UNORM = 71
|
||||
BC1_UNORM_SRGB = 72
|
||||
BC2_TYPELESS = 73
|
||||
BC2_UNORM = 74
|
||||
BC2_UNORM_SRGB = 75
|
||||
BC3_TYPELESS = 76
|
||||
BC3_UNORM = 77
|
||||
BC3_UNORM_SRGB = 78
|
||||
BC4_TYPELESS = 79
|
||||
BC4_UNORM = 80
|
||||
BC4_SNORM = 81
|
||||
BC5_TYPELESS = 82
|
||||
BC5_UNORM = 83
|
||||
BC5_SNORM = 84
|
||||
B5G6R5_UNORM = 85
|
||||
B5G5R5A1_UNORM = 86
|
||||
B8G8R8A8_UNORM = 87
|
||||
B8G8R8X8_UNORM = 88
|
||||
R10G10B10_XR_BIAS_A2_UNORM = 89
|
||||
B8G8R8A8_TYPELESS = 90
|
||||
B8G8R8A8_UNORM_SRGB = 91
|
||||
B8G8R8X8_TYPELESS = 92
|
||||
B8G8R8X8_UNORM_SRGB = 93
|
||||
BC6H_TYPELESS = 94
|
||||
BC6H_UF16 = 95
|
||||
BC6H_SF16 = 96
|
||||
BC7_TYPELESS = 97
|
||||
BC7_UNORM = 98
|
||||
BC7_UNORM_SRGB = 99
|
||||
AYUV = 100
|
||||
Y410 = 101
|
||||
Y416 = 102
|
||||
NV12 = 103
|
||||
P010 = 104
|
||||
P016 = 105
|
||||
OPAQUE_420 = 106
|
||||
YUY2 = 107
|
||||
Y210 = 108
|
||||
Y216 = 109
|
||||
NV11 = 110
|
||||
AI44 = 111
|
||||
IA44 = 112
|
||||
P8 = 113
|
||||
A8P8 = 114
|
||||
B4G4R4A4_UNORM = 115
|
||||
P208 = 130
|
||||
V208 = 131
|
||||
V408 = 132
|
||||
SAMPLER_FEEDBACK_MIN_MIP_OPAQUE = 189
|
||||
SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE = 190
|
||||
|
||||
|
||||
class D3DFMT(IntEnum):
|
||||
UNKNOWN = 0
|
||||
R8G8B8 = 20
|
||||
A8R8G8B8 = 21
|
||||
X8R8G8B8 = 22
|
||||
R5G6B5 = 23
|
||||
X1R5G5B5 = 24
|
||||
A1R5G5B5 = 25
|
||||
A4R4G4B4 = 26
|
||||
R3G3B2 = 27
|
||||
A8 = 28
|
||||
A8R3G3B2 = 29
|
||||
X4R4G4B4 = 30
|
||||
A2B10G10R10 = 31
|
||||
A8B8G8R8 = 32
|
||||
X8B8G8R8 = 33
|
||||
G16R16 = 34
|
||||
A2R10G10B10 = 35
|
||||
A16B16G16R16 = 36
|
||||
A8P8 = 40
|
||||
P8 = 41
|
||||
L8 = 50
|
||||
A8L8 = 51
|
||||
A4L4 = 52
|
||||
V8U8 = 60
|
||||
L6V5U5 = 61
|
||||
X8L8V8U8 = 62
|
||||
Q8W8V8U8 = 63
|
||||
V16U16 = 64
|
||||
A2W10V10U10 = 67
|
||||
D16_LOCKABLE = 70
|
||||
D32 = 71
|
||||
D15S1 = 73
|
||||
D24S8 = 75
|
||||
D24X8 = 77
|
||||
D24X4S4 = 79
|
||||
D16 = 80
|
||||
D32F_LOCKABLE = 82
|
||||
D24FS8 = 83
|
||||
D32_LOCKABLE = 84
|
||||
S8_LOCKABLE = 85
|
||||
L16 = 81
|
||||
VERTEXDATA = 100
|
||||
INDEX16 = 101
|
||||
INDEX32 = 102
|
||||
Q16W16V16U16 = 110
|
||||
R16F = 111
|
||||
G16R16F = 112
|
||||
A16B16G16R16F = 113
|
||||
R32F = 114
|
||||
G32R32F = 115
|
||||
A32B32G32R32F = 116
|
||||
CxV8U8 = 117
|
||||
A1 = 118
|
||||
A2B10G10R10_XR_BIAS = 119
|
||||
BINARYBUFFER = 199
|
||||
|
||||
UYVY = i32(b"UYVY")
|
||||
R8G8_B8G8 = i32(b"RGBG")
|
||||
YUY2 = i32(b"YUY2")
|
||||
G8R8_G8B8 = i32(b"GRGB")
|
||||
DXT1 = i32(b"DXT1")
|
||||
DXT2 = i32(b"DXT2")
|
||||
DXT3 = i32(b"DXT3")
|
||||
DXT4 = i32(b"DXT4")
|
||||
DXT5 = i32(b"DXT5")
|
||||
DX10 = i32(b"DX10")
|
||||
BC4S = i32(b"BC4S")
|
||||
BC4U = i32(b"BC4U")
|
||||
BC5S = i32(b"BC5S")
|
||||
BC5U = i32(b"BC5U")
|
||||
ATI1 = i32(b"ATI1")
|
||||
ATI2 = i32(b"ATI2")
|
||||
MULTI2_ARGB8 = i32(b"MET1")
|
||||
|
||||
|
||||
# Backward compatibility layer
|
||||
module = sys.modules[__name__]
|
||||
for item in DDSD:
|
||||
setattr(module, "DDSD_" + item.name, item.value)
|
||||
for item in DDSCAPS:
|
||||
setattr(module, "DDSCAPS_" + item.name, item.value)
|
||||
for item in DDSCAPS2:
|
||||
setattr(module, "DDSCAPS2_" + item.name, item.value)
|
||||
for item in DDPF:
|
||||
setattr(module, "DDPF_" + item.name, item.value)
|
||||
|
||||
DDS_FOURCC = DDPF.FOURCC
|
||||
DDS_RGB = DDPF.RGB
|
||||
DDS_RGBA = DDPF.RGB | DDPF.ALPHAPIXELS
|
||||
DDS_LUMINANCE = DDPF.LUMINANCE
|
||||
DDS_LUMINANCEA = DDPF.LUMINANCE | DDPF.ALPHAPIXELS
|
||||
DDS_ALPHA = DDPF.ALPHA
|
||||
DDS_PAL8 = DDPF.PALETTEINDEXED8
|
||||
|
||||
DDS_HEADER_FLAGS_TEXTURE = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PIXELFORMAT
|
||||
DDS_HEADER_FLAGS_MIPMAP = DDSD.MIPMAPCOUNT
|
||||
DDS_HEADER_FLAGS_VOLUME = DDSD.DEPTH
|
||||
DDS_HEADER_FLAGS_PITCH = DDSD.PITCH
|
||||
DDS_HEADER_FLAGS_LINEARSIZE = DDSD.LINEARSIZE
|
||||
|
||||
DDS_HEIGHT = DDSD.HEIGHT
|
||||
DDS_WIDTH = DDSD.WIDTH
|
||||
|
||||
DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS.TEXTURE
|
||||
DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS.COMPLEX | DDSCAPS.MIPMAP
|
||||
DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS.COMPLEX
|
||||
|
||||
DDS_CUBEMAP_POSITIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEX
|
||||
DDS_CUBEMAP_NEGATIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEX
|
||||
DDS_CUBEMAP_POSITIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEY
|
||||
DDS_CUBEMAP_NEGATIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEY
|
||||
DDS_CUBEMAP_POSITIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEZ
|
||||
DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEZ
|
||||
|
||||
DXT1_FOURCC = D3DFMT.DXT1
|
||||
DXT3_FOURCC = D3DFMT.DXT3
|
||||
DXT5_FOURCC = D3DFMT.DXT5
|
||||
|
||||
DXGI_FORMAT_R8G8B8A8_TYPELESS = DXGI_FORMAT.R8G8B8A8_TYPELESS
|
||||
DXGI_FORMAT_R8G8B8A8_UNORM = DXGI_FORMAT.R8G8B8A8_UNORM
|
||||
DXGI_FORMAT_R8G8B8A8_UNORM_SRGB = DXGI_FORMAT.R8G8B8A8_UNORM_SRGB
|
||||
DXGI_FORMAT_BC5_TYPELESS = DXGI_FORMAT.BC5_TYPELESS
|
||||
DXGI_FORMAT_BC5_UNORM = DXGI_FORMAT.BC5_UNORM
|
||||
DXGI_FORMAT_BC5_SNORM = DXGI_FORMAT.BC5_SNORM
|
||||
DXGI_FORMAT_BC6H_UF16 = DXGI_FORMAT.BC6H_UF16
|
||||
DXGI_FORMAT_BC6H_SF16 = DXGI_FORMAT.BC6H_SF16
|
||||
DXGI_FORMAT_BC7_TYPELESS = DXGI_FORMAT.BC7_TYPELESS
|
||||
DXGI_FORMAT_BC7_UNORM = DXGI_FORMAT.BC7_UNORM
|
||||
DXGI_FORMAT_BC7_UNORM_SRGB = DXGI_FORMAT.BC7_UNORM_SRGB
|
||||
|
||||
|
||||
class DdsImageFile(ImageFile.ImageFile):
|
||||
format = "DDS"
|
||||
format_description = "DirectDraw Surface"
|
||||
|
||||
def _open(self):
|
||||
if not _accept(self.fp.read(4)):
|
||||
msg = "not a DDS file"
|
||||
raise SyntaxError(msg)
|
||||
(header_size,) = struct.unpack("<I", self.fp.read(4))
|
||||
if header_size != 124:
|
||||
msg = f"Unsupported header size {repr(header_size)}"
|
||||
raise OSError(msg)
|
||||
header_bytes = self.fp.read(header_size - 4)
|
||||
if len(header_bytes) != 120:
|
||||
msg = f"Incomplete header: {len(header_bytes)} bytes"
|
||||
raise OSError(msg)
|
||||
header = io.BytesIO(header_bytes)
|
||||
|
||||
flags, height, width = struct.unpack("<3I", header.read(12))
|
||||
self._size = (width, height)
|
||||
extents = (0, 0) + self.size
|
||||
|
||||
pitch, depth, mipmaps = struct.unpack("<3I", header.read(12))
|
||||
struct.unpack("<11I", header.read(44)) # reserved
|
||||
|
||||
# pixel format
|
||||
pfsize, pfflags, fourcc, bitcount = struct.unpack("<4I", header.read(16))
|
||||
n = 0
|
||||
rawmode = None
|
||||
if pfflags & DDPF.RGB:
|
||||
# Texture contains uncompressed RGB data
|
||||
if pfflags & DDPF.ALPHAPIXELS:
|
||||
self._mode = "RGBA"
|
||||
mask_count = 4
|
||||
else:
|
||||
self._mode = "RGB"
|
||||
mask_count = 3
|
||||
|
||||
masks = struct.unpack(f"<{mask_count}I", header.read(mask_count * 4))
|
||||
self.tile = [("dds_rgb", extents, 0, (bitcount, masks))]
|
||||
return
|
||||
elif pfflags & DDPF.LUMINANCE:
|
||||
if bitcount == 8:
|
||||
self._mode = "L"
|
||||
elif bitcount == 16 and pfflags & DDPF.ALPHAPIXELS:
|
||||
self._mode = "LA"
|
||||
else:
|
||||
msg = f"Unsupported bitcount {bitcount} for {pfflags}"
|
||||
raise OSError(msg)
|
||||
elif pfflags & DDPF.PALETTEINDEXED8:
|
||||
self._mode = "P"
|
||||
self.palette = ImagePalette.raw("RGBA", self.fp.read(1024))
|
||||
elif pfflags & DDPF.FOURCC:
|
||||
offset = header_size + 4
|
||||
if fourcc == D3DFMT.DXT1:
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "DXT1"
|
||||
n = 1
|
||||
elif fourcc == D3DFMT.DXT3:
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "DXT3"
|
||||
n = 2
|
||||
elif fourcc == D3DFMT.DXT5:
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "DXT5"
|
||||
n = 3
|
||||
elif fourcc in (D3DFMT.BC4U, D3DFMT.ATI1):
|
||||
self._mode = "L"
|
||||
self.pixel_format = "BC4"
|
||||
n = 4
|
||||
elif fourcc == D3DFMT.BC5S:
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC5S"
|
||||
n = 5
|
||||
elif fourcc in (D3DFMT.BC5U, D3DFMT.ATI2):
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC5"
|
||||
n = 5
|
||||
elif fourcc == D3DFMT.DX10:
|
||||
offset += 20
|
||||
# ignoring flags which pertain to volume textures and cubemaps
|
||||
(dxgi_format,) = struct.unpack("<I", self.fp.read(4))
|
||||
self.fp.read(16)
|
||||
if dxgi_format in (
|
||||
DXGI_FORMAT.BC1_UNORM,
|
||||
DXGI_FORMAT.BC1_TYPELESS,
|
||||
):
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "BC1"
|
||||
n = 1
|
||||
elif dxgi_format in (DXGI_FORMAT.BC4_TYPELESS, DXGI_FORMAT.BC4_UNORM):
|
||||
self._mode = "L"
|
||||
self.pixel_format = "BC4"
|
||||
n = 4
|
||||
elif dxgi_format in (DXGI_FORMAT.BC5_TYPELESS, DXGI_FORMAT.BC5_UNORM):
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC5"
|
||||
n = 5
|
||||
elif dxgi_format == DXGI_FORMAT.BC5_SNORM:
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC5S"
|
||||
n = 5
|
||||
elif dxgi_format == DXGI_FORMAT.BC6H_UF16:
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC6H"
|
||||
n = 6
|
||||
elif dxgi_format == DXGI_FORMAT.BC6H_SF16:
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC6HS"
|
||||
n = 6
|
||||
elif dxgi_format in (
|
||||
DXGI_FORMAT.BC7_TYPELESS,
|
||||
DXGI_FORMAT.BC7_UNORM,
|
||||
DXGI_FORMAT.BC7_UNORM_SRGB,
|
||||
):
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "BC7"
|
||||
n = 7
|
||||
if dxgi_format == DXGI_FORMAT.BC7_UNORM_SRGB:
|
||||
self.info["gamma"] = 1 / 2.2
|
||||
elif dxgi_format in (
|
||||
DXGI_FORMAT.R8G8B8A8_TYPELESS,
|
||||
DXGI_FORMAT.R8G8B8A8_UNORM,
|
||||
DXGI_FORMAT.R8G8B8A8_UNORM_SRGB,
|
||||
):
|
||||
self._mode = "RGBA"
|
||||
if dxgi_format == DXGI_FORMAT.R8G8B8A8_UNORM_SRGB:
|
||||
self.info["gamma"] = 1 / 2.2
|
||||
else:
|
||||
msg = f"Unimplemented DXGI format {dxgi_format}"
|
||||
raise NotImplementedError(msg)
|
||||
else:
|
||||
msg = f"Unimplemented pixel format {repr(fourcc)}"
|
||||
raise NotImplementedError(msg)
|
||||
else:
|
||||
msg = f"Unknown pixel format flags {pfflags}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
if n:
|
||||
self.tile = [
|
||||
ImageFile._Tile("bcn", extents, offset, (n, self.pixel_format))
|
||||
]
|
||||
else:
|
||||
self.tile = [ImageFile._Tile("raw", extents, 0, rawmode or self.mode)]
|
||||
|
||||
def load_seek(self, pos):
|
||||
pass
|
||||
|
||||
|
||||
class DdsRgbDecoder(ImageFile.PyDecoder):
|
||||
_pulls_fd = True
|
||||
|
||||
def decode(self, buffer):
|
||||
bitcount, masks = self.args
|
||||
|
||||
# Some masks will be padded with zeros, e.g. R 0b11 G 0b1100
|
||||
# Calculate how many zeros each mask is padded with
|
||||
mask_offsets = []
|
||||
# And the maximum value of each channel without the padding
|
||||
mask_totals = []
|
||||
for mask in masks:
|
||||
offset = 0
|
||||
if mask != 0:
|
||||
while mask >> (offset + 1) << (offset + 1) == mask:
|
||||
offset += 1
|
||||
mask_offsets.append(offset)
|
||||
mask_totals.append(mask >> offset)
|
||||
|
||||
data = bytearray()
|
||||
bytecount = bitcount // 8
|
||||
while len(data) < self.state.xsize * self.state.ysize * len(masks):
|
||||
value = int.from_bytes(self.fd.read(bytecount), "little")
|
||||
for i, mask in enumerate(masks):
|
||||
masked_value = value & mask
|
||||
# Remove the zero padding, and scale it to 8 bits
|
||||
data += o8(
|
||||
int(((masked_value >> mask_offsets[i]) / mask_totals[i]) * 255)
|
||||
)
|
||||
self.set_as_raw(bytes(data))
|
||||
return -1, 0
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if im.mode not in ("RGB", "RGBA", "L", "LA"):
|
||||
msg = f"cannot write mode {im.mode} as DDS"
|
||||
raise OSError(msg)
|
||||
|
||||
alpha = im.mode[-1] == "A"
|
||||
if im.mode[0] == "L":
|
||||
pixel_flags = DDPF.LUMINANCE
|
||||
rawmode = im.mode
|
||||
if alpha:
|
||||
rgba_mask = [0x000000FF, 0x000000FF, 0x000000FF]
|
||||
else:
|
||||
rgba_mask = [0xFF000000, 0xFF000000, 0xFF000000]
|
||||
else:
|
||||
pixel_flags = DDPF.RGB
|
||||
rawmode = im.mode[::-1]
|
||||
rgba_mask = [0x00FF0000, 0x0000FF00, 0x000000FF]
|
||||
|
||||
if alpha:
|
||||
r, g, b, a = im.split()
|
||||
im = Image.merge("RGBA", (a, r, g, b))
|
||||
if alpha:
|
||||
pixel_flags |= DDPF.ALPHAPIXELS
|
||||
rgba_mask.append(0xFF000000 if alpha else 0)
|
||||
|
||||
flags = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PITCH | DDSD.PIXELFORMAT
|
||||
bitcount = len(im.getbands()) * 8
|
||||
pitch = (im.width * bitcount + 7) // 8
|
||||
|
||||
fp.write(
|
||||
o32(DDS_MAGIC)
|
||||
+ struct.pack(
|
||||
"<7I",
|
||||
124, # header size
|
||||
flags, # flags
|
||||
im.height,
|
||||
im.width,
|
||||
pitch,
|
||||
0, # depth
|
||||
0, # mipmaps
|
||||
)
|
||||
+ struct.pack("11I", *((0,) * 11)) # reserved
|
||||
# pfsize, pfflags, fourcc, bitcount
|
||||
+ struct.pack("<4I", 32, pixel_flags, 0, bitcount)
|
||||
+ struct.pack("<4I", *rgba_mask) # dwRGBABitMask
|
||||
+ struct.pack("<5I", DDSCAPS.TEXTURE, 0, 0, 0, 0)
|
||||
)
|
||||
ImageFile._save(
|
||||
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]
|
||||
)
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == b"DDS "
|
||||
|
||||
|
||||
Image.register_open(DdsImageFile.format, DdsImageFile, _accept)
|
||||
Image.register_decoder("dds_rgb", DdsRgbDecoder)
|
||||
Image.register_save(DdsImageFile.format, _save)
|
||||
Image.register_extension(DdsImageFile.format, ".dds")
|
@ -1,478 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# EPS file handling
|
||||
#
|
||||
# History:
|
||||
# 1995-09-01 fl Created (0.1)
|
||||
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
|
||||
# 1996-08-22 fl Don't choke on floating point BoundingBox values
|
||||
# 1996-08-23 fl Handle files from Macintosh (0.3)
|
||||
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
|
||||
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
|
||||
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution
|
||||
# resizing
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i32le as i32
|
||||
from ._deprecate import deprecate
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
|
||||
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
|
||||
|
||||
gs_binary = None
|
||||
gs_windows_binary = None
|
||||
|
||||
|
||||
def has_ghostscript():
|
||||
global gs_binary, gs_windows_binary
|
||||
if gs_binary is None:
|
||||
if sys.platform.startswith("win"):
|
||||
if gs_windows_binary is None:
|
||||
import shutil
|
||||
|
||||
for binary in ("gswin32c", "gswin64c", "gs"):
|
||||
if shutil.which(binary) is not None:
|
||||
gs_windows_binary = binary
|
||||
break
|
||||
else:
|
||||
gs_windows_binary = False
|
||||
gs_binary = gs_windows_binary
|
||||
else:
|
||||
try:
|
||||
subprocess.check_call(["gs", "--version"], stdout=subprocess.DEVNULL)
|
||||
gs_binary = "gs"
|
||||
except OSError:
|
||||
gs_binary = False
|
||||
return gs_binary is not False
|
||||
|
||||
|
||||
def Ghostscript(tile, size, fp, scale=1, transparency=False):
|
||||
"""Render an image using Ghostscript"""
|
||||
global gs_binary
|
||||
if not has_ghostscript():
|
||||
msg = "Unable to locate Ghostscript on paths"
|
||||
raise OSError(msg)
|
||||
|
||||
# Unpack decoder tile
|
||||
decoder, tile, offset, data = tile[0]
|
||||
length, bbox = data
|
||||
|
||||
# Hack to support hi-res rendering
|
||||
scale = int(scale) or 1
|
||||
width = size[0] * scale
|
||||
height = size[1] * scale
|
||||
# resolution is dependent on bbox and size
|
||||
res_x = 72.0 * width / (bbox[2] - bbox[0])
|
||||
res_y = 72.0 * height / (bbox[3] - bbox[1])
|
||||
|
||||
out_fd, outfile = tempfile.mkstemp()
|
||||
os.close(out_fd)
|
||||
|
||||
infile_temp = None
|
||||
if hasattr(fp, "name") and os.path.exists(fp.name):
|
||||
infile = fp.name
|
||||
else:
|
||||
in_fd, infile_temp = tempfile.mkstemp()
|
||||
os.close(in_fd)
|
||||
infile = infile_temp
|
||||
|
||||
# Ignore length and offset!
|
||||
# Ghostscript can read it
|
||||
# Copy whole file to read in Ghostscript
|
||||
with open(infile_temp, "wb") as f:
|
||||
# fetch length of fp
|
||||
fp.seek(0, io.SEEK_END)
|
||||
fsize = fp.tell()
|
||||
# ensure start position
|
||||
# go back
|
||||
fp.seek(0)
|
||||
lengthfile = fsize
|
||||
while lengthfile > 0:
|
||||
s = fp.read(min(lengthfile, 100 * 1024))
|
||||
if not s:
|
||||
break
|
||||
lengthfile -= len(s)
|
||||
f.write(s)
|
||||
|
||||
device = "pngalpha" if transparency else "ppmraw"
|
||||
|
||||
# Build Ghostscript command
|
||||
command = [
|
||||
gs_binary,
|
||||
"-q", # quiet mode
|
||||
f"-g{width:d}x{height:d}", # set output geometry (pixels)
|
||||
f"-r{res_x:f}x{res_y:f}", # set input DPI (dots per inch)
|
||||
"-dBATCH", # exit after processing
|
||||
"-dNOPAUSE", # don't pause between pages
|
||||
"-dSAFER", # safe mode
|
||||
f"-sDEVICE={device}",
|
||||
f"-sOutputFile={outfile}", # output file
|
||||
# adjust for image origin
|
||||
"-c",
|
||||
f"{-bbox[0]} {-bbox[1]} translate",
|
||||
"-f",
|
||||
infile, # input file
|
||||
# showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272)
|
||||
"-c",
|
||||
"showpage",
|
||||
]
|
||||
|
||||
# push data through Ghostscript
|
||||
try:
|
||||
startupinfo = None
|
||||
if sys.platform.startswith("win"):
|
||||
startupinfo = subprocess.STARTUPINFO()
|
||||
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
||||
subprocess.check_call(command, startupinfo=startupinfo)
|
||||
out_im = Image.open(outfile)
|
||||
out_im.load()
|
||||
finally:
|
||||
try:
|
||||
os.unlink(outfile)
|
||||
if infile_temp:
|
||||
os.unlink(infile_temp)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
im = out_im.im.copy()
|
||||
out_im.close()
|
||||
return im
|
||||
|
||||
|
||||
class PSFile:
|
||||
"""
|
||||
Wrapper for bytesio object that treats either CR or LF as end of line.
|
||||
This class is no longer used internally, but kept for backwards compatibility.
|
||||
"""
|
||||
|
||||
def __init__(self, fp):
|
||||
deprecate(
|
||||
"PSFile",
|
||||
11,
|
||||
action="If you need the functionality of this class "
|
||||
"you will need to implement it yourself.",
|
||||
)
|
||||
self.fp = fp
|
||||
self.char = None
|
||||
|
||||
def seek(self, offset, whence=io.SEEK_SET):
|
||||
self.char = None
|
||||
self.fp.seek(offset, whence)
|
||||
|
||||
def readline(self):
|
||||
s = [self.char or b""]
|
||||
self.char = None
|
||||
|
||||
c = self.fp.read(1)
|
||||
while (c not in b"\r\n") and len(c):
|
||||
s.append(c)
|
||||
c = self.fp.read(1)
|
||||
|
||||
self.char = self.fp.read(1)
|
||||
# line endings can be 1 or 2 of \r \n, in either order
|
||||
if self.char in b"\r\n":
|
||||
self.char = None
|
||||
|
||||
return b"".join(s).decode("latin-1")
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5)
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for Encapsulated PostScript. This plugin supports only
|
||||
# a few variants of this format.
|
||||
|
||||
|
||||
class EpsImageFile(ImageFile.ImageFile):
|
||||
"""EPS File Parser for the Python Imaging Library"""
|
||||
|
||||
format = "EPS"
|
||||
format_description = "Encapsulated Postscript"
|
||||
|
||||
mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"}
|
||||
|
||||
def _open(self):
|
||||
(length, offset) = self._find_offset(self.fp)
|
||||
|
||||
# go to offset - start of "%!PS"
|
||||
self.fp.seek(offset)
|
||||
|
||||
self._mode = "RGB"
|
||||
self._size = None
|
||||
|
||||
byte_arr = bytearray(255)
|
||||
bytes_mv = memoryview(byte_arr)
|
||||
bytes_read = 0
|
||||
reading_header_comments = True
|
||||
reading_trailer_comments = False
|
||||
trailer_reached = False
|
||||
|
||||
def check_required_header_comments():
|
||||
if "PS-Adobe" not in self.info:
|
||||
msg = 'EPS header missing "%!PS-Adobe" comment'
|
||||
raise SyntaxError(msg)
|
||||
if "BoundingBox" not in self.info:
|
||||
msg = 'EPS header missing "%%BoundingBox" comment'
|
||||
raise SyntaxError(msg)
|
||||
|
||||
def _read_comment(s):
|
||||
nonlocal reading_trailer_comments
|
||||
try:
|
||||
m = split.match(s)
|
||||
except re.error as e:
|
||||
msg = "not an EPS file"
|
||||
raise SyntaxError(msg) from e
|
||||
|
||||
if m:
|
||||
k, v = m.group(1, 2)
|
||||
self.info[k] = v
|
||||
if k == "BoundingBox":
|
||||
if v == "(atend)":
|
||||
reading_trailer_comments = True
|
||||
elif not self._size or (
|
||||
trailer_reached and reading_trailer_comments
|
||||
):
|
||||
try:
|
||||
# Note: The DSC spec says that BoundingBox
|
||||
# fields should be integers, but some drivers
|
||||
# put floating point values there anyway.
|
||||
box = [int(float(i)) for i in v.split()]
|
||||
self._size = box[2] - box[0], box[3] - box[1]
|
||||
self.tile = [
|
||||
("eps", (0, 0) + self.size, offset, (length, box))
|
||||
]
|
||||
except Exception:
|
||||
pass
|
||||
return True
|
||||
|
||||
while True:
|
||||
byte = self.fp.read(1)
|
||||
if byte == b"":
|
||||
# if we didn't read a byte we must be at the end of the file
|
||||
if bytes_read == 0:
|
||||
break
|
||||
elif byte in b"\r\n":
|
||||
# if we read a line ending character, ignore it and parse what
|
||||
# we have already read. if we haven't read any other characters,
|
||||
# continue reading
|
||||
if bytes_read == 0:
|
||||
continue
|
||||
else:
|
||||
# ASCII/hexadecimal lines in an EPS file must not exceed
|
||||
# 255 characters, not including line ending characters
|
||||
if bytes_read >= 255:
|
||||
# only enforce this for lines starting with a "%",
|
||||
# otherwise assume it's binary data
|
||||
if byte_arr[0] == ord("%"):
|
||||
msg = "not an EPS file"
|
||||
raise SyntaxError(msg)
|
||||
else:
|
||||
if reading_header_comments:
|
||||
check_required_header_comments()
|
||||
reading_header_comments = False
|
||||
# reset bytes_read so we can keep reading
|
||||
# data until the end of the line
|
||||
bytes_read = 0
|
||||
byte_arr[bytes_read] = byte[0]
|
||||
bytes_read += 1
|
||||
continue
|
||||
|
||||
if reading_header_comments:
|
||||
# Load EPS header
|
||||
|
||||
# if this line doesn't start with a "%",
|
||||
# or does start with "%%EndComments",
|
||||
# then we've reached the end of the header/comments
|
||||
if byte_arr[0] != ord("%") or bytes_mv[:13] == b"%%EndComments":
|
||||
check_required_header_comments()
|
||||
reading_header_comments = False
|
||||
continue
|
||||
|
||||
s = str(bytes_mv[:bytes_read], "latin-1")
|
||||
if not _read_comment(s):
|
||||
m = field.match(s)
|
||||
if m:
|
||||
k = m.group(1)
|
||||
if k[:8] == "PS-Adobe":
|
||||
self.info["PS-Adobe"] = k[9:]
|
||||
else:
|
||||
self.info[k] = ""
|
||||
elif s[0] == "%":
|
||||
# handle non-DSC PostScript comments that some
|
||||
# tools mistakenly put in the Comments section
|
||||
pass
|
||||
else:
|
||||
msg = "bad EPS header"
|
||||
raise OSError(msg)
|
||||
elif bytes_mv[:11] == b"%ImageData:":
|
||||
# Check for an "ImageData" descriptor
|
||||
# https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577413_pgfId-1035096
|
||||
|
||||
# Values:
|
||||
# columns
|
||||
# rows
|
||||
# bit depth (1 or 8)
|
||||
# mode (1: L, 2: LAB, 3: RGB, 4: CMYK)
|
||||
# number of padding channels
|
||||
# block size (number of bytes per row per channel)
|
||||
# binary/ascii (1: binary, 2: ascii)
|
||||
# data start identifier (the image data follows after a single line
|
||||
# consisting only of this quoted value)
|
||||
image_data_values = byte_arr[11:bytes_read].split(None, 7)
|
||||
columns, rows, bit_depth, mode_id = (
|
||||
int(value) for value in image_data_values[:4]
|
||||
)
|
||||
|
||||
if bit_depth == 1:
|
||||
self._mode = "1"
|
||||
elif bit_depth == 8:
|
||||
try:
|
||||
self._mode = self.mode_map[mode_id]
|
||||
except ValueError:
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
self._size = columns, rows
|
||||
return
|
||||
elif trailer_reached and reading_trailer_comments:
|
||||
# Load EPS trailer
|
||||
|
||||
# if this line starts with "%%EOF",
|
||||
# then we've reached the end of the file
|
||||
if bytes_mv[:5] == b"%%EOF":
|
||||
break
|
||||
|
||||
s = str(bytes_mv[:bytes_read], "latin-1")
|
||||
_read_comment(s)
|
||||
elif bytes_mv[:9] == b"%%Trailer":
|
||||
trailer_reached = True
|
||||
bytes_read = 0
|
||||
|
||||
check_required_header_comments()
|
||||
|
||||
if not self._size:
|
||||
msg = "cannot determine EPS bounding box"
|
||||
raise OSError(msg)
|
||||
|
||||
def _find_offset(self, fp):
|
||||
s = fp.read(4)
|
||||
|
||||
if s == b"%!PS":
|
||||
# for HEAD without binary preview
|
||||
fp.seek(0, io.SEEK_END)
|
||||
length = fp.tell()
|
||||
offset = 0
|
||||
elif i32(s) == 0xC6D3D0C5:
|
||||
# FIX for: Some EPS file not handled correctly / issue #302
|
||||
# EPS can contain binary data
|
||||
# or start directly with latin coding
|
||||
# more info see:
|
||||
# https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
|
||||
s = fp.read(8)
|
||||
offset = i32(s)
|
||||
length = i32(s, 4)
|
||||
else:
|
||||
msg = "not an EPS file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
return length, offset
|
||||
|
||||
def load(self, scale=1, transparency=False):
|
||||
# Load EPS via Ghostscript
|
||||
if self.tile:
|
||||
self.im = Ghostscript(self.tile, self.size, self.fp, scale, transparency)
|
||||
self._mode = self.im.mode
|
||||
self._size = self.im.size
|
||||
self.tile = []
|
||||
return Image.Image.load(self)
|
||||
|
||||
def load_seek(self, *args, **kwargs):
|
||||
# we can't incrementally load, so force ImageFile.parser to
|
||||
# use our custom load method by defining this method.
|
||||
pass
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _save(im, fp, filename, eps=1):
|
||||
"""EPS Writer for the Python Imaging Library."""
|
||||
|
||||
# make sure image data is available
|
||||
im.load()
|
||||
|
||||
# determine PostScript image mode
|
||||
if im.mode == "L":
|
||||
operator = (8, 1, b"image")
|
||||
elif im.mode == "RGB":
|
||||
operator = (8, 3, b"false 3 colorimage")
|
||||
elif im.mode == "CMYK":
|
||||
operator = (8, 4, b"false 4 colorimage")
|
||||
else:
|
||||
msg = "image mode is not supported"
|
||||
raise ValueError(msg)
|
||||
|
||||
if eps:
|
||||
# write EPS header
|
||||
fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
|
||||
fp.write(b"%%Creator: PIL 0.1 EpsEncode\n")
|
||||
# fp.write("%%CreationDate: %s"...)
|
||||
fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size)
|
||||
fp.write(b"%%Pages: 1\n")
|
||||
fp.write(b"%%EndComments\n")
|
||||
fp.write(b"%%Page: 1 1\n")
|
||||
fp.write(b"%%ImageData: %d %d " % im.size)
|
||||
fp.write(b'%d %d 0 1 1 "%s"\n' % operator)
|
||||
|
||||
# image header
|
||||
fp.write(b"gsave\n")
|
||||
fp.write(b"10 dict begin\n")
|
||||
fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1]))
|
||||
fp.write(b"%d %d scale\n" % im.size)
|
||||
fp.write(b"%d %d 8\n" % im.size) # <= bits
|
||||
fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
|
||||
fp.write(b"{ currentfile buf readhexstring pop } bind\n")
|
||||
fp.write(operator[2] + b"\n")
|
||||
if hasattr(fp, "flush"):
|
||||
fp.flush()
|
||||
|
||||
ImageFile._save(im, fp, [("eps", (0, 0) + im.size, 0, None)])
|
||||
|
||||
fp.write(b"\n%%%%EndBinary\n")
|
||||
fp.write(b"grestore end\n")
|
||||
if hasattr(fp, "flush"):
|
||||
fp.flush()
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
|
||||
|
||||
Image.register_save(EpsImageFile.format, _save)
|
||||
|
||||
Image.register_extensions(EpsImageFile.format, [".ps", ".eps"])
|
||||
|
||||
Image.register_mime(EpsImageFile.format, "application/postscript")
|
@ -1,381 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# EXIF tags
|
||||
#
|
||||
# Copyright (c) 2003 by Secret Labs AB
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
"""
|
||||
This module provides constants and clear-text names for various
|
||||
well-known EXIF tags.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import IntEnum
|
||||
|
||||
|
||||
class Base(IntEnum):
|
||||
# possibly incomplete
|
||||
InteropIndex = 0x0001
|
||||
ProcessingSoftware = 0x000B
|
||||
NewSubfileType = 0x00FE
|
||||
SubfileType = 0x00FF
|
||||
ImageWidth = 0x0100
|
||||
ImageLength = 0x0101
|
||||
BitsPerSample = 0x0102
|
||||
Compression = 0x0103
|
||||
PhotometricInterpretation = 0x0106
|
||||
Thresholding = 0x0107
|
||||
CellWidth = 0x0108
|
||||
CellLength = 0x0109
|
||||
FillOrder = 0x010A
|
||||
DocumentName = 0x010D
|
||||
ImageDescription = 0x010E
|
||||
Make = 0x010F
|
||||
Model = 0x0110
|
||||
StripOffsets = 0x0111
|
||||
Orientation = 0x0112
|
||||
SamplesPerPixel = 0x0115
|
||||
RowsPerStrip = 0x0116
|
||||
StripByteCounts = 0x0117
|
||||
MinSampleValue = 0x0118
|
||||
MaxSampleValue = 0x0119
|
||||
XResolution = 0x011A
|
||||
YResolution = 0x011B
|
||||
PlanarConfiguration = 0x011C
|
||||
PageName = 0x011D
|
||||
FreeOffsets = 0x0120
|
||||
FreeByteCounts = 0x0121
|
||||
GrayResponseUnit = 0x0122
|
||||
GrayResponseCurve = 0x0123
|
||||
T4Options = 0x0124
|
||||
T6Options = 0x0125
|
||||
ResolutionUnit = 0x0128
|
||||
PageNumber = 0x0129
|
||||
TransferFunction = 0x012D
|
||||
Software = 0x0131
|
||||
DateTime = 0x0132
|
||||
Artist = 0x013B
|
||||
HostComputer = 0x013C
|
||||
Predictor = 0x013D
|
||||
WhitePoint = 0x013E
|
||||
PrimaryChromaticities = 0x013F
|
||||
ColorMap = 0x0140
|
||||
HalftoneHints = 0x0141
|
||||
TileWidth = 0x0142
|
||||
TileLength = 0x0143
|
||||
TileOffsets = 0x0144
|
||||
TileByteCounts = 0x0145
|
||||
SubIFDs = 0x014A
|
||||
InkSet = 0x014C
|
||||
InkNames = 0x014D
|
||||
NumberOfInks = 0x014E
|
||||
DotRange = 0x0150
|
||||
TargetPrinter = 0x0151
|
||||
ExtraSamples = 0x0152
|
||||
SampleFormat = 0x0153
|
||||
SMinSampleValue = 0x0154
|
||||
SMaxSampleValue = 0x0155
|
||||
TransferRange = 0x0156
|
||||
ClipPath = 0x0157
|
||||
XClipPathUnits = 0x0158
|
||||
YClipPathUnits = 0x0159
|
||||
Indexed = 0x015A
|
||||
JPEGTables = 0x015B
|
||||
OPIProxy = 0x015F
|
||||
JPEGProc = 0x0200
|
||||
JpegIFOffset = 0x0201
|
||||
JpegIFByteCount = 0x0202
|
||||
JpegRestartInterval = 0x0203
|
||||
JpegLosslessPredictors = 0x0205
|
||||
JpegPointTransforms = 0x0206
|
||||
JpegQTables = 0x0207
|
||||
JpegDCTables = 0x0208
|
||||
JpegACTables = 0x0209
|
||||
YCbCrCoefficients = 0x0211
|
||||
YCbCrSubSampling = 0x0212
|
||||
YCbCrPositioning = 0x0213
|
||||
ReferenceBlackWhite = 0x0214
|
||||
XMLPacket = 0x02BC
|
||||
RelatedImageFileFormat = 0x1000
|
||||
RelatedImageWidth = 0x1001
|
||||
RelatedImageLength = 0x1002
|
||||
Rating = 0x4746
|
||||
RatingPercent = 0x4749
|
||||
ImageID = 0x800D
|
||||
CFARepeatPatternDim = 0x828D
|
||||
BatteryLevel = 0x828F
|
||||
Copyright = 0x8298
|
||||
ExposureTime = 0x829A
|
||||
FNumber = 0x829D
|
||||
IPTCNAA = 0x83BB
|
||||
ImageResources = 0x8649
|
||||
ExifOffset = 0x8769
|
||||
InterColorProfile = 0x8773
|
||||
ExposureProgram = 0x8822
|
||||
SpectralSensitivity = 0x8824
|
||||
GPSInfo = 0x8825
|
||||
ISOSpeedRatings = 0x8827
|
||||
OECF = 0x8828
|
||||
Interlace = 0x8829
|
||||
TimeZoneOffset = 0x882A
|
||||
SelfTimerMode = 0x882B
|
||||
SensitivityType = 0x8830
|
||||
StandardOutputSensitivity = 0x8831
|
||||
RecommendedExposureIndex = 0x8832
|
||||
ISOSpeed = 0x8833
|
||||
ISOSpeedLatitudeyyy = 0x8834
|
||||
ISOSpeedLatitudezzz = 0x8835
|
||||
ExifVersion = 0x9000
|
||||
DateTimeOriginal = 0x9003
|
||||
DateTimeDigitized = 0x9004
|
||||
OffsetTime = 0x9010
|
||||
OffsetTimeOriginal = 0x9011
|
||||
OffsetTimeDigitized = 0x9012
|
||||
ComponentsConfiguration = 0x9101
|
||||
CompressedBitsPerPixel = 0x9102
|
||||
ShutterSpeedValue = 0x9201
|
||||
ApertureValue = 0x9202
|
||||
BrightnessValue = 0x9203
|
||||
ExposureBiasValue = 0x9204
|
||||
MaxApertureValue = 0x9205
|
||||
SubjectDistance = 0x9206
|
||||
MeteringMode = 0x9207
|
||||
LightSource = 0x9208
|
||||
Flash = 0x9209
|
||||
FocalLength = 0x920A
|
||||
Noise = 0x920D
|
||||
ImageNumber = 0x9211
|
||||
SecurityClassification = 0x9212
|
||||
ImageHistory = 0x9213
|
||||
TIFFEPStandardID = 0x9216
|
||||
MakerNote = 0x927C
|
||||
UserComment = 0x9286
|
||||
SubsecTime = 0x9290
|
||||
SubsecTimeOriginal = 0x9291
|
||||
SubsecTimeDigitized = 0x9292
|
||||
AmbientTemperature = 0x9400
|
||||
Humidity = 0x9401
|
||||
Pressure = 0x9402
|
||||
WaterDepth = 0x9403
|
||||
Acceleration = 0x9404
|
||||
CameraElevationAngle = 0x9405
|
||||
XPTitle = 0x9C9B
|
||||
XPComment = 0x9C9C
|
||||
XPAuthor = 0x9C9D
|
||||
XPKeywords = 0x9C9E
|
||||
XPSubject = 0x9C9F
|
||||
FlashPixVersion = 0xA000
|
||||
ColorSpace = 0xA001
|
||||
ExifImageWidth = 0xA002
|
||||
ExifImageHeight = 0xA003
|
||||
RelatedSoundFile = 0xA004
|
||||
ExifInteroperabilityOffset = 0xA005
|
||||
FlashEnergy = 0xA20B
|
||||
SpatialFrequencyResponse = 0xA20C
|
||||
FocalPlaneXResolution = 0xA20E
|
||||
FocalPlaneYResolution = 0xA20F
|
||||
FocalPlaneResolutionUnit = 0xA210
|
||||
SubjectLocation = 0xA214
|
||||
ExposureIndex = 0xA215
|
||||
SensingMethod = 0xA217
|
||||
FileSource = 0xA300
|
||||
SceneType = 0xA301
|
||||
CFAPattern = 0xA302
|
||||
CustomRendered = 0xA401
|
||||
ExposureMode = 0xA402
|
||||
WhiteBalance = 0xA403
|
||||
DigitalZoomRatio = 0xA404
|
||||
FocalLengthIn35mmFilm = 0xA405
|
||||
SceneCaptureType = 0xA406
|
||||
GainControl = 0xA407
|
||||
Contrast = 0xA408
|
||||
Saturation = 0xA409
|
||||
Sharpness = 0xA40A
|
||||
DeviceSettingDescription = 0xA40B
|
||||
SubjectDistanceRange = 0xA40C
|
||||
ImageUniqueID = 0xA420
|
||||
CameraOwnerName = 0xA430
|
||||
BodySerialNumber = 0xA431
|
||||
LensSpecification = 0xA432
|
||||
LensMake = 0xA433
|
||||
LensModel = 0xA434
|
||||
LensSerialNumber = 0xA435
|
||||
CompositeImage = 0xA460
|
||||
CompositeImageCount = 0xA461
|
||||
CompositeImageExposureTimes = 0xA462
|
||||
Gamma = 0xA500
|
||||
PrintImageMatching = 0xC4A5
|
||||
DNGVersion = 0xC612
|
||||
DNGBackwardVersion = 0xC613
|
||||
UniqueCameraModel = 0xC614
|
||||
LocalizedCameraModel = 0xC615
|
||||
CFAPlaneColor = 0xC616
|
||||
CFALayout = 0xC617
|
||||
LinearizationTable = 0xC618
|
||||
BlackLevelRepeatDim = 0xC619
|
||||
BlackLevel = 0xC61A
|
||||
BlackLevelDeltaH = 0xC61B
|
||||
BlackLevelDeltaV = 0xC61C
|
||||
WhiteLevel = 0xC61D
|
||||
DefaultScale = 0xC61E
|
||||
DefaultCropOrigin = 0xC61F
|
||||
DefaultCropSize = 0xC620
|
||||
ColorMatrix1 = 0xC621
|
||||
ColorMatrix2 = 0xC622
|
||||
CameraCalibration1 = 0xC623
|
||||
CameraCalibration2 = 0xC624
|
||||
ReductionMatrix1 = 0xC625
|
||||
ReductionMatrix2 = 0xC626
|
||||
AnalogBalance = 0xC627
|
||||
AsShotNeutral = 0xC628
|
||||
AsShotWhiteXY = 0xC629
|
||||
BaselineExposure = 0xC62A
|
||||
BaselineNoise = 0xC62B
|
||||
BaselineSharpness = 0xC62C
|
||||
BayerGreenSplit = 0xC62D
|
||||
LinearResponseLimit = 0xC62E
|
||||
CameraSerialNumber = 0xC62F
|
||||
LensInfo = 0xC630
|
||||
ChromaBlurRadius = 0xC631
|
||||
AntiAliasStrength = 0xC632
|
||||
ShadowScale = 0xC633
|
||||
DNGPrivateData = 0xC634
|
||||
MakerNoteSafety = 0xC635
|
||||
CalibrationIlluminant1 = 0xC65A
|
||||
CalibrationIlluminant2 = 0xC65B
|
||||
BestQualityScale = 0xC65C
|
||||
RawDataUniqueID = 0xC65D
|
||||
OriginalRawFileName = 0xC68B
|
||||
OriginalRawFileData = 0xC68C
|
||||
ActiveArea = 0xC68D
|
||||
MaskedAreas = 0xC68E
|
||||
AsShotICCProfile = 0xC68F
|
||||
AsShotPreProfileMatrix = 0xC690
|
||||
CurrentICCProfile = 0xC691
|
||||
CurrentPreProfileMatrix = 0xC692
|
||||
ColorimetricReference = 0xC6BF
|
||||
CameraCalibrationSignature = 0xC6F3
|
||||
ProfileCalibrationSignature = 0xC6F4
|
||||
AsShotProfileName = 0xC6F6
|
||||
NoiseReductionApplied = 0xC6F7
|
||||
ProfileName = 0xC6F8
|
||||
ProfileHueSatMapDims = 0xC6F9
|
||||
ProfileHueSatMapData1 = 0xC6FA
|
||||
ProfileHueSatMapData2 = 0xC6FB
|
||||
ProfileToneCurve = 0xC6FC
|
||||
ProfileEmbedPolicy = 0xC6FD
|
||||
ProfileCopyright = 0xC6FE
|
||||
ForwardMatrix1 = 0xC714
|
||||
ForwardMatrix2 = 0xC715
|
||||
PreviewApplicationName = 0xC716
|
||||
PreviewApplicationVersion = 0xC717
|
||||
PreviewSettingsName = 0xC718
|
||||
PreviewSettingsDigest = 0xC719
|
||||
PreviewColorSpace = 0xC71A
|
||||
PreviewDateTime = 0xC71B
|
||||
RawImageDigest = 0xC71C
|
||||
OriginalRawFileDigest = 0xC71D
|
||||
SubTileBlockSize = 0xC71E
|
||||
RowInterleaveFactor = 0xC71F
|
||||
ProfileLookTableDims = 0xC725
|
||||
ProfileLookTableData = 0xC726
|
||||
OpcodeList1 = 0xC740
|
||||
OpcodeList2 = 0xC741
|
||||
OpcodeList3 = 0xC74E
|
||||
NoiseProfile = 0xC761
|
||||
|
||||
|
||||
"""Maps EXIF tags to tag names."""
|
||||
TAGS = {
|
||||
**{i.value: i.name for i in Base},
|
||||
0x920C: "SpatialFrequencyResponse",
|
||||
0x9214: "SubjectLocation",
|
||||
0x9215: "ExposureIndex",
|
||||
0x828E: "CFAPattern",
|
||||
0x920B: "FlashEnergy",
|
||||
0x9216: "TIFF/EPStandardID",
|
||||
}
|
||||
|
||||
|
||||
class GPS(IntEnum):
|
||||
GPSVersionID = 0
|
||||
GPSLatitudeRef = 1
|
||||
GPSLatitude = 2
|
||||
GPSLongitudeRef = 3
|
||||
GPSLongitude = 4
|
||||
GPSAltitudeRef = 5
|
||||
GPSAltitude = 6
|
||||
GPSTimeStamp = 7
|
||||
GPSSatellites = 8
|
||||
GPSStatus = 9
|
||||
GPSMeasureMode = 10
|
||||
GPSDOP = 11
|
||||
GPSSpeedRef = 12
|
||||
GPSSpeed = 13
|
||||
GPSTrackRef = 14
|
||||
GPSTrack = 15
|
||||
GPSImgDirectionRef = 16
|
||||
GPSImgDirection = 17
|
||||
GPSMapDatum = 18
|
||||
GPSDestLatitudeRef = 19
|
||||
GPSDestLatitude = 20
|
||||
GPSDestLongitudeRef = 21
|
||||
GPSDestLongitude = 22
|
||||
GPSDestBearingRef = 23
|
||||
GPSDestBearing = 24
|
||||
GPSDestDistanceRef = 25
|
||||
GPSDestDistance = 26
|
||||
GPSProcessingMethod = 27
|
||||
GPSAreaInformation = 28
|
||||
GPSDateStamp = 29
|
||||
GPSDifferential = 30
|
||||
GPSHPositioningError = 31
|
||||
|
||||
|
||||
"""Maps EXIF GPS tags to tag names."""
|
||||
GPSTAGS = {i.value: i.name for i in GPS}
|
||||
|
||||
|
||||
class Interop(IntEnum):
|
||||
InteropIndex = 1
|
||||
InteropVersion = 2
|
||||
RelatedImageFileFormat = 4096
|
||||
RelatedImageWidth = 4097
|
||||
RleatedImageHeight = 4098
|
||||
|
||||
|
||||
class IFD(IntEnum):
|
||||
Exif = 34665
|
||||
GPSInfo = 34853
|
||||
Makernote = 37500
|
||||
Interop = 40965
|
||||
IFD1 = -1
|
||||
|
||||
|
||||
class LightSource(IntEnum):
|
||||
Unknown = 0
|
||||
Daylight = 1
|
||||
Fluorescent = 2
|
||||
Tungsten = 3
|
||||
Flash = 4
|
||||
Fine = 9
|
||||
Cloudy = 10
|
||||
Shade = 11
|
||||
DaylightFluorescent = 12
|
||||
DayWhiteFluorescent = 13
|
||||
CoolWhiteFluorescent = 14
|
||||
WhiteFluorescent = 15
|
||||
StandardLightA = 17
|
||||
StandardLightB = 18
|
||||
StandardLightC = 19
|
||||
D55 = 20
|
||||
D65 = 21
|
||||
D75 = 22
|
||||
D50 = 23
|
||||
ISO = 24
|
||||
Other = 255
|
@ -1,72 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# FITS file handling
|
||||
#
|
||||
# Copyright (c) 1998-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:6] == b"SIMPLE"
|
||||
|
||||
|
||||
class FitsImageFile(ImageFile.ImageFile):
|
||||
format = "FITS"
|
||||
format_description = "FITS"
|
||||
|
||||
def _open(self):
|
||||
headers = {}
|
||||
while True:
|
||||
header = self.fp.read(80)
|
||||
if not header:
|
||||
msg = "Truncated FITS file"
|
||||
raise OSError(msg)
|
||||
keyword = header[:8].strip()
|
||||
if keyword == b"END":
|
||||
break
|
||||
value = header[8:].split(b"/")[0].strip()
|
||||
if value.startswith(b"="):
|
||||
value = value[1:].strip()
|
||||
if not headers and (not _accept(keyword) or value != b"T"):
|
||||
msg = "Not a FITS file"
|
||||
raise SyntaxError(msg)
|
||||
headers[keyword] = value
|
||||
|
||||
naxis = int(headers[b"NAXIS"])
|
||||
if naxis == 0:
|
||||
msg = "No image data"
|
||||
raise ValueError(msg)
|
||||
elif naxis == 1:
|
||||
self._size = 1, int(headers[b"NAXIS1"])
|
||||
else:
|
||||
self._size = int(headers[b"NAXIS1"]), int(headers[b"NAXIS2"])
|
||||
|
||||
number_of_bits = int(headers[b"BITPIX"])
|
||||
if number_of_bits == 8:
|
||||
self._mode = "L"
|
||||
elif number_of_bits == 16:
|
||||
self._mode = "I"
|
||||
elif number_of_bits == 32:
|
||||
self._mode = "I"
|
||||
elif number_of_bits in (-32, -64):
|
||||
self._mode = "F"
|
||||
|
||||
offset = math.ceil(self.fp.tell() / 2880) * 2880
|
||||
self.tile = [("raw", (0, 0) + self.size, offset, (self.mode, 0, -1))]
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
|
||||
|
||||
Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])
|
@ -1,173 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# FLI/FLC file handling.
|
||||
#
|
||||
# History:
|
||||
# 95-09-01 fl Created
|
||||
# 97-01-03 fl Fixed parser, setup decoder tile
|
||||
# 98-07-15 fl Renamed offset attribute to avoid name clash
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-98.
|
||||
# Copyright (c) Fredrik Lundh 1995-97.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from . import Image, ImageFile, ImagePalette
|
||||
from ._binary import i16le as i16
|
||||
from ._binary import i32le as i32
|
||||
from ._binary import o8
|
||||
|
||||
#
|
||||
# decoder
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return (
|
||||
len(prefix) >= 6
|
||||
and i16(prefix, 4) in [0xAF11, 0xAF12]
|
||||
and i16(prefix, 14) in [0, 3] # flags
|
||||
)
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for the FLI/FLC animation format. Use the <b>seek</b>
|
||||
# method to load individual frames.
|
||||
|
||||
|
||||
class FliImageFile(ImageFile.ImageFile):
|
||||
format = "FLI"
|
||||
format_description = "Autodesk FLI/FLC Animation"
|
||||
_close_exclusive_fp_after_loading = False
|
||||
|
||||
def _open(self):
|
||||
# HEAD
|
||||
s = self.fp.read(128)
|
||||
if not (_accept(s) and s[20:22] == b"\x00\x00"):
|
||||
msg = "not an FLI/FLC file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# frames
|
||||
self.n_frames = i16(s, 6)
|
||||
self.is_animated = self.n_frames > 1
|
||||
|
||||
# image characteristics
|
||||
self._mode = "P"
|
||||
self._size = i16(s, 8), i16(s, 10)
|
||||
|
||||
# animation speed
|
||||
duration = i32(s, 16)
|
||||
magic = i16(s, 4)
|
||||
if magic == 0xAF11:
|
||||
duration = (duration * 1000) // 70
|
||||
self.info["duration"] = duration
|
||||
|
||||
# look for palette
|
||||
palette = [(a, a, a) for a in range(256)]
|
||||
|
||||
s = self.fp.read(16)
|
||||
|
||||
self.__offset = 128
|
||||
|
||||
if i16(s, 4) == 0xF100:
|
||||
# prefix chunk; ignore it
|
||||
self.__offset = self.__offset + i32(s)
|
||||
s = self.fp.read(16)
|
||||
|
||||
if i16(s, 4) == 0xF1FA:
|
||||
# look for palette chunk
|
||||
number_of_subchunks = i16(s, 6)
|
||||
chunk_size = None
|
||||
for _ in range(number_of_subchunks):
|
||||
if chunk_size is not None:
|
||||
self.fp.seek(chunk_size - 6, os.SEEK_CUR)
|
||||
s = self.fp.read(6)
|
||||
chunk_type = i16(s, 4)
|
||||
if chunk_type in (4, 11):
|
||||
self._palette(palette, 2 if chunk_type == 11 else 0)
|
||||
break
|
||||
chunk_size = i32(s)
|
||||
if not chunk_size:
|
||||
break
|
||||
|
||||
palette = [o8(r) + o8(g) + o8(b) for (r, g, b) in palette]
|
||||
self.palette = ImagePalette.raw("RGB", b"".join(palette))
|
||||
|
||||
# set things up to decode first frame
|
||||
self.__frame = -1
|
||||
self._fp = self.fp
|
||||
self.__rewind = self.fp.tell()
|
||||
self.seek(0)
|
||||
|
||||
def _palette(self, palette, shift):
|
||||
# load palette
|
||||
|
||||
i = 0
|
||||
for e in range(i16(self.fp.read(2))):
|
||||
s = self.fp.read(2)
|
||||
i = i + s[0]
|
||||
n = s[1]
|
||||
if n == 0:
|
||||
n = 256
|
||||
s = self.fp.read(n * 3)
|
||||
for n in range(0, len(s), 3):
|
||||
r = s[n] << shift
|
||||
g = s[n + 1] << shift
|
||||
b = s[n + 2] << shift
|
||||
palette[i] = (r, g, b)
|
||||
i += 1
|
||||
|
||||
def seek(self, frame):
|
||||
if not self._seek_check(frame):
|
||||
return
|
||||
if frame < self.__frame:
|
||||
self._seek(0)
|
||||
|
||||
for f in range(self.__frame + 1, frame + 1):
|
||||
self._seek(f)
|
||||
|
||||
def _seek(self, frame):
|
||||
if frame == 0:
|
||||
self.__frame = -1
|
||||
self._fp.seek(self.__rewind)
|
||||
self.__offset = 128
|
||||
else:
|
||||
# ensure that the previous frame was loaded
|
||||
self.load()
|
||||
|
||||
if frame != self.__frame + 1:
|
||||
msg = f"cannot seek to frame {frame}"
|
||||
raise ValueError(msg)
|
||||
self.__frame = frame
|
||||
|
||||
# move to next frame
|
||||
self.fp = self._fp
|
||||
self.fp.seek(self.__offset)
|
||||
|
||||
s = self.fp.read(4)
|
||||
if not s:
|
||||
msg = "missing frame size"
|
||||
raise EOFError(msg)
|
||||
|
||||
framesize = i32(s)
|
||||
|
||||
self.decodermaxblock = framesize
|
||||
self.tile = [("fli", (0, 0) + self.size, self.__offset, None)]
|
||||
|
||||
self.__offset += framesize
|
||||
|
||||
def tell(self):
|
||||
return self.__frame
|
||||
|
||||
|
||||
#
|
||||
# registry
|
||||
|
||||
Image.register_open(FliImageFile.format, FliImageFile, _accept)
|
||||
|
||||
Image.register_extensions(FliImageFile.format, [".fli", ".flc"])
|
@ -1,136 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# base class for raster font file parsers
|
||||
#
|
||||
# history:
|
||||
# 1997-06-05 fl created
|
||||
# 1997-08-19 fl restrict image width
|
||||
#
|
||||
# Copyright (c) 1997-1998 by Secret Labs AB
|
||||
# Copyright (c) 1997-1998 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import BinaryIO
|
||||
|
||||
from . import Image, _binary
|
||||
|
||||
WIDTH = 800
|
||||
|
||||
|
||||
def puti16(
|
||||
fp: BinaryIO, values: tuple[int, int, int, int, int, int, int, int, int, int]
|
||||
) -> None:
|
||||
"""Write network order (big-endian) 16-bit sequence"""
|
||||
for v in values:
|
||||
if v < 0:
|
||||
v += 65536
|
||||
fp.write(_binary.o16be(v))
|
||||
|
||||
|
||||
class FontFile:
|
||||
"""Base class for raster font file handlers."""
|
||||
|
||||
bitmap: Image.Image | None = None
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.info: dict[bytes, bytes | int] = {}
|
||||
self.glyph: list[
|
||||
tuple[
|
||||
tuple[int, int],
|
||||
tuple[int, int, int, int],
|
||||
tuple[int, int, int, int],
|
||||
Image.Image,
|
||||
]
|
||||
| None
|
||||
] = [None] * 256
|
||||
|
||||
def __getitem__(
|
||||
self, ix: int
|
||||
) -> (
|
||||
tuple[
|
||||
tuple[int, int],
|
||||
tuple[int, int, int, int],
|
||||
tuple[int, int, int, int],
|
||||
Image.Image,
|
||||
]
|
||||
| None
|
||||
):
|
||||
return self.glyph[ix]
|
||||
|
||||
def compile(self) -> None:
|
||||
"""Create metrics and bitmap"""
|
||||
|
||||
if self.bitmap:
|
||||
return
|
||||
|
||||
# create bitmap large enough to hold all data
|
||||
h = w = maxwidth = 0
|
||||
lines = 1
|
||||
for glyph in self.glyph:
|
||||
if glyph:
|
||||
d, dst, src, im = glyph
|
||||
h = max(h, src[3] - src[1])
|
||||
w = w + (src[2] - src[0])
|
||||
if w > WIDTH:
|
||||
lines += 1
|
||||
w = src[2] - src[0]
|
||||
maxwidth = max(maxwidth, w)
|
||||
|
||||
xsize = maxwidth
|
||||
ysize = lines * h
|
||||
|
||||
if xsize == 0 and ysize == 0:
|
||||
return
|
||||
|
||||
self.ysize = h
|
||||
|
||||
# paste glyphs into bitmap
|
||||
self.bitmap = Image.new("1", (xsize, ysize))
|
||||
self.metrics: list[
|
||||
tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]]
|
||||
| None
|
||||
] = [None] * 256
|
||||
x = y = 0
|
||||
for i in range(256):
|
||||
glyph = self[i]
|
||||
if glyph:
|
||||
d, dst, src, im = glyph
|
||||
xx = src[2] - src[0]
|
||||
x0, y0 = x, y
|
||||
x = x + xx
|
||||
if x > WIDTH:
|
||||
x, y = 0, y + h
|
||||
x0, y0 = x, y
|
||||
x = xx
|
||||
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
|
||||
self.bitmap.paste(im.crop(src), s)
|
||||
self.metrics[i] = d, dst, s
|
||||
|
||||
def save(self, filename: str) -> None:
|
||||
"""Save font"""
|
||||
|
||||
self.compile()
|
||||
|
||||
# font data
|
||||
if not self.bitmap:
|
||||
msg = "No bitmap created"
|
||||
raise ValueError(msg)
|
||||
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
|
||||
|
||||
# font metrics
|
||||
with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp:
|
||||
fp.write(b"PILfont\n")
|
||||
fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!!
|
||||
fp.write(b"DATA\n")
|
||||
for id in range(256):
|
||||
m = self.metrics[id]
|
||||
if not m:
|
||||
puti16(fp, (0,) * 10)
|
||||
else:
|
||||
puti16(fp, m[0] + m[1] + m[2])
|
@ -1,255 +0,0 @@
|
||||
#
|
||||
# THIS IS WORK IN PROGRESS
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# FlashPix support for PIL
|
||||
#
|
||||
# History:
|
||||
# 97-01-25 fl Created (reads uncompressed RGB images only)
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import olefile
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i32le as i32
|
||||
|
||||
# we map from colour field tuples to (mode, rawmode) descriptors
|
||||
MODES = {
|
||||
# opacity
|
||||
(0x00007FFE,): ("A", "L"),
|
||||
# monochrome
|
||||
(0x00010000,): ("L", "L"),
|
||||
(0x00018000, 0x00017FFE): ("RGBA", "LA"),
|
||||
# photo YCC
|
||||
(0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
|
||||
(0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"),
|
||||
# standard RGB (NIFRGB)
|
||||
(0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"),
|
||||
(0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"),
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:8] == olefile.MAGIC
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for the FlashPix images.
|
||||
|
||||
|
||||
class FpxImageFile(ImageFile.ImageFile):
|
||||
format = "FPX"
|
||||
format_description = "FlashPix"
|
||||
|
||||
def _open(self):
|
||||
#
|
||||
# read the OLE directory and see if this is a likely
|
||||
# to be a FlashPix file
|
||||
|
||||
try:
|
||||
self.ole = olefile.OleFileIO(self.fp)
|
||||
except OSError as e:
|
||||
msg = "not an FPX file; invalid OLE file"
|
||||
raise SyntaxError(msg) from e
|
||||
|
||||
if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
|
||||
msg = "not an FPX file; bad root CLSID"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self._open_index(1)
|
||||
|
||||
def _open_index(self, index=1):
|
||||
#
|
||||
# get the Image Contents Property Set
|
||||
|
||||
prop = self.ole.getproperties(
|
||||
[f"Data Object Store {index:06d}", "\005Image Contents"]
|
||||
)
|
||||
|
||||
# size (highest resolution)
|
||||
|
||||
self._size = prop[0x1000002], prop[0x1000003]
|
||||
|
||||
size = max(self.size)
|
||||
i = 1
|
||||
while size > 64:
|
||||
size = size / 2
|
||||
i += 1
|
||||
self.maxid = i - 1
|
||||
|
||||
# mode. instead of using a single field for this, flashpix
|
||||
# requires you to specify the mode for each channel in each
|
||||
# resolution subimage, and leaves it to the decoder to make
|
||||
# sure that they all match. for now, we'll cheat and assume
|
||||
# that this is always the case.
|
||||
|
||||
id = self.maxid << 16
|
||||
|
||||
s = prop[0x2000002 | id]
|
||||
|
||||
bands = i32(s, 4)
|
||||
if bands > 4:
|
||||
msg = "Invalid number of bands"
|
||||
raise OSError(msg)
|
||||
|
||||
# note: for now, we ignore the "uncalibrated" flag
|
||||
colors = tuple(i32(s, 8 + i * 4) & 0x7FFFFFFF for i in range(bands))
|
||||
|
||||
self._mode, self.rawmode = MODES[colors]
|
||||
|
||||
# load JPEG tables, if any
|
||||
self.jpeg = {}
|
||||
for i in range(256):
|
||||
id = 0x3000001 | (i << 16)
|
||||
if id in prop:
|
||||
self.jpeg[i] = prop[id]
|
||||
|
||||
self._open_subimage(1, self.maxid)
|
||||
|
||||
def _open_subimage(self, index=1, subimage=0):
|
||||
#
|
||||
# setup tile descriptors for a given subimage
|
||||
|
||||
stream = [
|
||||
f"Data Object Store {index:06d}",
|
||||
f"Resolution {subimage:04d}",
|
||||
"Subimage 0000 Header",
|
||||
]
|
||||
|
||||
fp = self.ole.openstream(stream)
|
||||
|
||||
# skip prefix
|
||||
fp.read(28)
|
||||
|
||||
# header stream
|
||||
s = fp.read(36)
|
||||
|
||||
size = i32(s, 4), i32(s, 8)
|
||||
# tilecount = i32(s, 12)
|
||||
tilesize = i32(s, 16), i32(s, 20)
|
||||
# channels = i32(s, 24)
|
||||
offset = i32(s, 28)
|
||||
length = i32(s, 32)
|
||||
|
||||
if size != self.size:
|
||||
msg = "subimage mismatch"
|
||||
raise OSError(msg)
|
||||
|
||||
# get tile descriptors
|
||||
fp.seek(28 + offset)
|
||||
s = fp.read(i32(s, 12) * length)
|
||||
|
||||
x = y = 0
|
||||
xsize, ysize = size
|
||||
xtile, ytile = tilesize
|
||||
self.tile = []
|
||||
|
||||
for i in range(0, len(s), length):
|
||||
x1 = min(xsize, x + xtile)
|
||||
y1 = min(ysize, y + ytile)
|
||||
|
||||
compression = i32(s, i + 8)
|
||||
|
||||
if compression == 0:
|
||||
self.tile.append(
|
||||
(
|
||||
"raw",
|
||||
(x, y, x1, y1),
|
||||
i32(s, i) + 28,
|
||||
(self.rawmode,),
|
||||
)
|
||||
)
|
||||
|
||||
elif compression == 1:
|
||||
# FIXME: the fill decoder is not implemented
|
||||
self.tile.append(
|
||||
(
|
||||
"fill",
|
||||
(x, y, x1, y1),
|
||||
i32(s, i) + 28,
|
||||
(self.rawmode, s[12:16]),
|
||||
)
|
||||
)
|
||||
|
||||
elif compression == 2:
|
||||
internal_color_conversion = s[14]
|
||||
jpeg_tables = s[15]
|
||||
rawmode = self.rawmode
|
||||
|
||||
if internal_color_conversion:
|
||||
# The image is stored as usual (usually YCbCr).
|
||||
if rawmode == "RGBA":
|
||||
# For "RGBA", data is stored as YCbCrA based on
|
||||
# negative RGB. The following trick works around
|
||||
# this problem :
|
||||
jpegmode, rawmode = "YCbCrK", "CMYK"
|
||||
else:
|
||||
jpegmode = None # let the decoder decide
|
||||
|
||||
else:
|
||||
# The image is stored as defined by rawmode
|
||||
jpegmode = rawmode
|
||||
|
||||
self.tile.append(
|
||||
(
|
||||
"jpeg",
|
||||
(x, y, x1, y1),
|
||||
i32(s, i) + 28,
|
||||
(rawmode, jpegmode),
|
||||
)
|
||||
)
|
||||
|
||||
# FIXME: jpeg tables are tile dependent; the prefix
|
||||
# data must be placed in the tile descriptor itself!
|
||||
|
||||
if jpeg_tables:
|
||||
self.tile_prefix = self.jpeg[jpeg_tables]
|
||||
|
||||
else:
|
||||
msg = "unknown/invalid compression"
|
||||
raise OSError(msg)
|
||||
|
||||
x = x + xtile
|
||||
if x >= xsize:
|
||||
x, y = 0, y + ytile
|
||||
if y >= ysize:
|
||||
break # isn't really required
|
||||
|
||||
self.stream = stream
|
||||
self._fp = self.fp
|
||||
self.fp = None
|
||||
|
||||
def load(self):
|
||||
if not self.fp:
|
||||
self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"])
|
||||
|
||||
return ImageFile.ImageFile.load(self)
|
||||
|
||||
def close(self):
|
||||
self.ole.close()
|
||||
super().close()
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.ole.close()
|
||||
super().__exit__()
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
Image.register_open(FpxImageFile.format, FpxImageFile, _accept)
|
||||
|
||||
Image.register_extension(FpxImageFile.format, ".fpx")
|
@ -1,114 +0,0 @@
|
||||
"""
|
||||
A Pillow loader for .ftc and .ftu files (FTEX)
|
||||
Jerome Leclanche <jerome@leclan.ch>
|
||||
|
||||
The contents of this file are hereby released in the public domain (CC0)
|
||||
Full text of the CC0 license:
|
||||
https://creativecommons.org/publicdomain/zero/1.0/
|
||||
|
||||
Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001
|
||||
|
||||
The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a
|
||||
packed custom format called FTEX. This file format uses file extensions FTC
|
||||
and FTU.
|
||||
* FTC files are compressed textures (using standard texture compression).
|
||||
* FTU files are not compressed.
|
||||
Texture File Format
|
||||
The FTC and FTU texture files both use the same format. This
|
||||
has the following structure:
|
||||
{header}
|
||||
{format_directory}
|
||||
{data}
|
||||
Where:
|
||||
{header} = {
|
||||
u32:magic,
|
||||
u32:version,
|
||||
u32:width,
|
||||
u32:height,
|
||||
u32:mipmap_count,
|
||||
u32:format_count
|
||||
}
|
||||
|
||||
* The "magic" number is "FTEX".
|
||||
* "width" and "height" are the dimensions of the texture.
|
||||
* "mipmap_count" is the number of mipmaps in the texture.
|
||||
* "format_count" is the number of texture formats (different versions of the
|
||||
same texture) in this file.
|
||||
|
||||
{format_directory} = format_count * { u32:format, u32:where }
|
||||
|
||||
The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB
|
||||
uncompressed textures.
|
||||
The texture data for a format starts at the position "where" in the file.
|
||||
|
||||
Each set of texture data in the file has the following structure:
|
||||
{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } }
|
||||
* "mipmap_size" is the number of bytes in that mip level. For compressed
|
||||
textures this is the size of the texture data compressed with DXT1. For 24 bit
|
||||
uncompressed textures, this is 3 * width * height. Following this are the image
|
||||
bytes for that mipmap level.
|
||||
|
||||
Note: All data is stored in little-Endian (Intel) byte order.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import struct
|
||||
from enum import IntEnum
|
||||
from io import BytesIO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
MAGIC = b"FTEX"
|
||||
|
||||
|
||||
class Format(IntEnum):
|
||||
DXT1 = 0
|
||||
UNCOMPRESSED = 1
|
||||
|
||||
|
||||
class FtexImageFile(ImageFile.ImageFile):
|
||||
format = "FTEX"
|
||||
format_description = "Texture File Format (IW2:EOC)"
|
||||
|
||||
def _open(self):
|
||||
if not _accept(self.fp.read(4)):
|
||||
msg = "not an FTEX file"
|
||||
raise SyntaxError(msg)
|
||||
struct.unpack("<i", self.fp.read(4)) # version
|
||||
self._size = struct.unpack("<2i", self.fp.read(8))
|
||||
mipmap_count, format_count = struct.unpack("<2i", self.fp.read(8))
|
||||
|
||||
self._mode = "RGB"
|
||||
|
||||
# Only support single-format files.
|
||||
# I don't know of any multi-format file.
|
||||
assert format_count == 1
|
||||
|
||||
format, where = struct.unpack("<2i", self.fp.read(8))
|
||||
self.fp.seek(where)
|
||||
(mipmap_size,) = struct.unpack("<i", self.fp.read(4))
|
||||
|
||||
data = self.fp.read(mipmap_size)
|
||||
|
||||
if format == Format.DXT1:
|
||||
self._mode = "RGBA"
|
||||
self.tile = [("bcn", (0, 0) + self.size, 0, 1)]
|
||||
elif format == Format.UNCOMPRESSED:
|
||||
self.tile = [("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))]
|
||||
else:
|
||||
msg = f"Invalid texture compression format: {repr(format)}"
|
||||
raise ValueError(msg)
|
||||
|
||||
self.fp.close()
|
||||
self.fp = BytesIO(data)
|
||||
|
||||
def load_seek(self, pos):
|
||||
pass
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == MAGIC
|
||||
|
||||
|
||||
Image.register_open(FtexImageFile.format, FtexImageFile, _accept)
|
||||
Image.register_extensions(FtexImageFile.format, [".ftc", ".ftu"])
|
@ -1,103 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
#
|
||||
# load a GIMP brush file
|
||||
#
|
||||
# History:
|
||||
# 96-03-14 fl Created
|
||||
# 16-01-08 es Version 2
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
# Copyright (c) Eric Soroos 2016.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
#
|
||||
# See https://github.com/GNOME/gimp/blob/mainline/devel-docs/gbr.txt for
|
||||
# format documentation.
|
||||
#
|
||||
# This code Interprets version 1 and 2 .gbr files.
|
||||
# Version 1 files are obsolete, and should not be used for new
|
||||
# brushes.
|
||||
# Version 2 files are saved by GIMP v2.8 (at least)
|
||||
# Version 3 files have a format specifier of 18 for 16bit floats in
|
||||
# the color depth field. This is currently unsupported by Pillow.
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i32be as i32
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2)
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for the GIMP brush format.
|
||||
|
||||
|
||||
class GbrImageFile(ImageFile.ImageFile):
|
||||
format = "GBR"
|
||||
format_description = "GIMP brush file"
|
||||
|
||||
def _open(self):
|
||||
header_size = i32(self.fp.read(4))
|
||||
if header_size < 20:
|
||||
msg = "not a GIMP brush"
|
||||
raise SyntaxError(msg)
|
||||
version = i32(self.fp.read(4))
|
||||
if version not in (1, 2):
|
||||
msg = f"Unsupported GIMP brush version: {version}"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
width = i32(self.fp.read(4))
|
||||
height = i32(self.fp.read(4))
|
||||
color_depth = i32(self.fp.read(4))
|
||||
if width <= 0 or height <= 0:
|
||||
msg = "not a GIMP brush"
|
||||
raise SyntaxError(msg)
|
||||
if color_depth not in (1, 4):
|
||||
msg = f"Unsupported GIMP brush color depth: {color_depth}"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
if version == 1:
|
||||
comment_length = header_size - 20
|
||||
else:
|
||||
comment_length = header_size - 28
|
||||
magic_number = self.fp.read(4)
|
||||
if magic_number != b"GIMP":
|
||||
msg = "not a GIMP brush, bad magic number"
|
||||
raise SyntaxError(msg)
|
||||
self.info["spacing"] = i32(self.fp.read(4))
|
||||
|
||||
comment = self.fp.read(comment_length)[:-1]
|
||||
|
||||
if color_depth == 1:
|
||||
self._mode = "L"
|
||||
else:
|
||||
self._mode = "RGBA"
|
||||
|
||||
self._size = width, height
|
||||
|
||||
self.info["comment"] = comment
|
||||
|
||||
# Image might not be small
|
||||
Image._decompression_bomb_check(self.size)
|
||||
|
||||
# Data is an uncompressed block of w * h * bytes/pixel
|
||||
self._data_size = width * height * color_depth
|
||||
|
||||
def load(self):
|
||||
if not self.im:
|
||||
self.im = Image.core.new(self.mode, self.size)
|
||||
self.frombytes(self.fp.read(self._data_size))
|
||||
return Image.Image.load(self)
|
||||
|
||||
|
||||
#
|
||||
# registry
|
||||
|
||||
|
||||
Image.register_open(GbrImageFile.format, GbrImageFile, _accept)
|
||||
Image.register_extension(GbrImageFile.format, ".gbr")
|
@ -1,97 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# GD file handling
|
||||
#
|
||||
# History:
|
||||
# 1996-04-12 fl Created
|
||||
#
|
||||
# Copyright (c) 1997 by Secret Labs AB.
|
||||
# Copyright (c) 1996 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
"""
|
||||
.. note::
|
||||
This format cannot be automatically recognized, so the
|
||||
class is not registered for use with :py:func:`PIL.Image.open()`. To open a
|
||||
gd file, use the :py:func:`PIL.GdImageFile.open()` function instead.
|
||||
|
||||
.. warning::
|
||||
THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This
|
||||
implementation is provided for convenience and demonstrational
|
||||
purposes only.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from . import ImageFile, ImagePalette, UnidentifiedImageError
|
||||
from ._binary import i16be as i16
|
||||
from ._binary import i32be as i32
|
||||
|
||||
|
||||
class GdImageFile(ImageFile.ImageFile):
|
||||
"""
|
||||
Image plugin for the GD uncompressed format. Note that this format
|
||||
is not supported by the standard :py:func:`PIL.Image.open()` function. To use
|
||||
this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and
|
||||
use the :py:func:`PIL.GdImageFile.open()` function.
|
||||
"""
|
||||
|
||||
format = "GD"
|
||||
format_description = "GD uncompressed images"
|
||||
|
||||
def _open(self):
|
||||
# Header
|
||||
s = self.fp.read(1037)
|
||||
|
||||
if i16(s) not in [65534, 65535]:
|
||||
msg = "Not a valid GD 2.x .gd file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self._mode = "L" # FIXME: "P"
|
||||
self._size = i16(s, 2), i16(s, 4)
|
||||
|
||||
true_color = s[6]
|
||||
true_color_offset = 2 if true_color else 0
|
||||
|
||||
# transparency index
|
||||
tindex = i32(s, 7 + true_color_offset)
|
||||
if tindex < 256:
|
||||
self.info["transparency"] = tindex
|
||||
|
||||
self.palette = ImagePalette.raw(
|
||||
"XBGR", s[7 + true_color_offset + 4 : 7 + true_color_offset + 4 + 256 * 4]
|
||||
)
|
||||
|
||||
self.tile = [
|
||||
(
|
||||
"raw",
|
||||
(0, 0) + self.size,
|
||||
7 + true_color_offset + 4 + 256 * 4,
|
||||
("L", 0, 1),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def open(fp, mode="r"):
|
||||
"""
|
||||
Load texture from a GD image file.
|
||||
|
||||
:param fp: GD file name, or an opened file handle.
|
||||
:param mode: Optional mode. In this version, if the mode argument
|
||||
is given, it must be "r".
|
||||
:returns: An image instance.
|
||||
:raises OSError: If the image could not be read.
|
||||
"""
|
||||
if mode != "r":
|
||||
msg = "bad mode"
|
||||
raise ValueError(msg)
|
||||
|
||||
try:
|
||||
return GdImageFile(fp)
|
||||
except SyntaxError as e:
|
||||
msg = "cannot identify this image file"
|
||||
raise UnidentifiedImageError(msg) from e
|
File diff suppressed because it is too large
Load Diff
@ -1,137 +0,0 @@
|
||||
#
|
||||
# Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# stuff to read (and render) GIMP gradient files
|
||||
#
|
||||
# History:
|
||||
# 97-08-23 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
"""
|
||||
Stuff to translate curve segments to palette values (derived from
|
||||
the corresponding code in GIMP, written by Federico Mena Quintero.
|
||||
See the GIMP distribution for more information.)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from math import log, pi, sin, sqrt
|
||||
|
||||
from ._binary import o8
|
||||
|
||||
EPSILON = 1e-10
|
||||
"""""" # Enable auto-doc for data member
|
||||
|
||||
|
||||
def linear(middle, pos):
|
||||
if pos <= middle:
|
||||
if middle < EPSILON:
|
||||
return 0.0
|
||||
else:
|
||||
return 0.5 * pos / middle
|
||||
else:
|
||||
pos = pos - middle
|
||||
middle = 1.0 - middle
|
||||
if middle < EPSILON:
|
||||
return 1.0
|
||||
else:
|
||||
return 0.5 + 0.5 * pos / middle
|
||||
|
||||
|
||||
def curved(middle, pos):
|
||||
return pos ** (log(0.5) / log(max(middle, EPSILON)))
|
||||
|
||||
|
||||
def sine(middle, pos):
|
||||
return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0
|
||||
|
||||
|
||||
def sphere_increasing(middle, pos):
|
||||
return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2)
|
||||
|
||||
|
||||
def sphere_decreasing(middle, pos):
|
||||
return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2)
|
||||
|
||||
|
||||
SEGMENTS = [linear, curved, sine, sphere_increasing, sphere_decreasing]
|
||||
"""""" # Enable auto-doc for data member
|
||||
|
||||
|
||||
class GradientFile:
|
||||
gradient = None
|
||||
|
||||
def getpalette(self, entries=256):
|
||||
palette = []
|
||||
|
||||
ix = 0
|
||||
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
|
||||
|
||||
for i in range(entries):
|
||||
x = i / (entries - 1)
|
||||
|
||||
while x1 < x:
|
||||
ix += 1
|
||||
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
|
||||
|
||||
w = x1 - x0
|
||||
|
||||
if w < EPSILON:
|
||||
scale = segment(0.5, 0.5)
|
||||
else:
|
||||
scale = segment((xm - x0) / w, (x - x0) / w)
|
||||
|
||||
# expand to RGBA
|
||||
r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5))
|
||||
g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5))
|
||||
b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5))
|
||||
a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5))
|
||||
|
||||
# add to palette
|
||||
palette.append(r + g + b + a)
|
||||
|
||||
return b"".join(palette), "RGBA"
|
||||
|
||||
|
||||
class GimpGradientFile(GradientFile):
|
||||
"""File handler for GIMP's gradient format."""
|
||||
|
||||
def __init__(self, fp):
|
||||
if fp.readline()[:13] != b"GIMP Gradient":
|
||||
msg = "not a GIMP gradient file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
line = fp.readline()
|
||||
|
||||
# GIMP 1.2 gradient files don't contain a name, but GIMP 1.3 files do
|
||||
if line.startswith(b"Name: "):
|
||||
line = fp.readline().strip()
|
||||
|
||||
count = int(line)
|
||||
|
||||
gradient = []
|
||||
|
||||
for i in range(count):
|
||||
s = fp.readline().split()
|
||||
w = [float(x) for x in s[:11]]
|
||||
|
||||
x0, x1 = w[0], w[2]
|
||||
xm = w[1]
|
||||
rgb0 = w[3:7]
|
||||
rgb1 = w[7:11]
|
||||
|
||||
segment = SEGMENTS[int(s[11])]
|
||||
cspace = int(s[12])
|
||||
|
||||
if cspace != 0:
|
||||
msg = "cannot handle HSV colour space"
|
||||
raise OSError(msg)
|
||||
|
||||
gradient.append((x0, x1, xm, rgb0, rgb1, segment))
|
||||
|
||||
self.gradient = gradient
|
@ -1,57 +0,0 @@
|
||||
#
|
||||
# Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# stuff to read GIMP palette files
|
||||
#
|
||||
# History:
|
||||
# 1997-08-23 fl Created
|
||||
# 2004-09-07 fl Support GIMP 2.0 palette files.
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
|
||||
# Copyright (c) Fredrik Lundh 1997-2004.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
from ._binary import o8
|
||||
|
||||
|
||||
class GimpPaletteFile:
|
||||
"""File handler for GIMP's palette format."""
|
||||
|
||||
rawmode = "RGB"
|
||||
|
||||
def __init__(self, fp):
|
||||
self.palette = [o8(i) * 3 for i in range(256)]
|
||||
|
||||
if fp.readline()[:12] != b"GIMP Palette":
|
||||
msg = "not a GIMP palette file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
for i in range(256):
|
||||
s = fp.readline()
|
||||
if not s:
|
||||
break
|
||||
|
||||
# skip fields and comment lines
|
||||
if re.match(rb"\w+:|#", s):
|
||||
continue
|
||||
if len(s) > 100:
|
||||
msg = "bad palette file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
v = tuple(map(int, s.split()[:3]))
|
||||
if len(v) != 3:
|
||||
msg = "bad palette entry"
|
||||
raise ValueError(msg)
|
||||
|
||||
self.palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2])
|
||||
|
||||
self.palette = b"".join(self.palette)
|
||||
|
||||
def getpalette(self):
|
||||
return self.palette, self.rawmode
|
@ -1,74 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# GRIB stub adapter
|
||||
#
|
||||
# Copyright (c) 1996-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
_handler = None
|
||||
|
||||
|
||||
def register_handler(handler):
|
||||
"""
|
||||
Install application-specific GRIB image handler.
|
||||
|
||||
:param handler: Handler object.
|
||||
"""
|
||||
global _handler
|
||||
_handler = handler
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Image adapter
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == b"GRIB" and prefix[7] == 1
|
||||
|
||||
|
||||
class GribStubImageFile(ImageFile.StubImageFile):
|
||||
format = "GRIB"
|
||||
format_description = "GRIB"
|
||||
|
||||
def _open(self):
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(8)):
|
||||
msg = "Not a GRIB file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# make something up
|
||||
self._mode = "F"
|
||||
self._size = 1, 1
|
||||
|
||||
loader = self._load()
|
||||
if loader:
|
||||
loader.open(self)
|
||||
|
||||
def _load(self):
|
||||
return _handler
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if _handler is None or not hasattr(_handler, "save"):
|
||||
msg = "GRIB save handler not installed"
|
||||
raise OSError(msg)
|
||||
_handler.save(im, fp, filename)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept)
|
||||
Image.register_save(GribStubImageFile.format, _save)
|
||||
|
||||
Image.register_extension(GribStubImageFile.format, ".grib")
|
@ -1,74 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# HDF5 stub adapter
|
||||
#
|
||||
# Copyright (c) 2000-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
_handler = None
|
||||
|
||||
|
||||
def register_handler(handler):
|
||||
"""
|
||||
Install application-specific HDF5 image handler.
|
||||
|
||||
:param handler: Handler object.
|
||||
"""
|
||||
global _handler
|
||||
_handler = handler
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Image adapter
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:8] == b"\x89HDF\r\n\x1a\n"
|
||||
|
||||
|
||||
class HDF5StubImageFile(ImageFile.StubImageFile):
|
||||
format = "HDF5"
|
||||
format_description = "HDF5"
|
||||
|
||||
def _open(self):
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(8)):
|
||||
msg = "Not an HDF file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# make something up
|
||||
self._mode = "F"
|
||||
self._size = 1, 1
|
||||
|
||||
loader = self._load()
|
||||
if loader:
|
||||
loader.open(self)
|
||||
|
||||
def _load(self):
|
||||
return _handler
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if _handler is None or not hasattr(_handler, "save"):
|
||||
msg = "HDF5 save handler not installed"
|
||||
raise OSError(msg)
|
||||
_handler.save(im, fp, filename)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept)
|
||||
Image.register_save(HDF5StubImageFile.format, _save)
|
||||
|
||||
Image.register_extensions(HDF5StubImageFile.format, [".h5", ".hdf"])
|
@ -1,400 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# macOS icns file decoder, based on icns.py by Bob Ippolito.
|
||||
#
|
||||
# history:
|
||||
# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies.
|
||||
# 2020-04-04 Allow saving on all operating systems.
|
||||
#
|
||||
# Copyright (c) 2004 by Bob Ippolito.
|
||||
# Copyright (c) 2004 by Secret Labs.
|
||||
# Copyright (c) 2004 by Fredrik Lundh.
|
||||
# Copyright (c) 2014 by Alastair Houghton.
|
||||
# Copyright (c) 2020 by Pan Jing.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
|
||||
from . import Image, ImageFile, PngImagePlugin, features
|
||||
|
||||
enable_jpeg2k = features.check_codec("jpg_2000")
|
||||
if enable_jpeg2k:
|
||||
from . import Jpeg2KImagePlugin
|
||||
|
||||
MAGIC = b"icns"
|
||||
HEADERSIZE = 8
|
||||
|
||||
|
||||
def nextheader(fobj):
|
||||
return struct.unpack(">4sI", fobj.read(HEADERSIZE))
|
||||
|
||||
|
||||
def read_32t(fobj, start_length, size):
|
||||
# The 128x128 icon seems to have an extra header for some reason.
|
||||
(start, length) = start_length
|
||||
fobj.seek(start)
|
||||
sig = fobj.read(4)
|
||||
if sig != b"\x00\x00\x00\x00":
|
||||
msg = "Unknown signature, expecting 0x00000000"
|
||||
raise SyntaxError(msg)
|
||||
return read_32(fobj, (start + 4, length - 4), size)
|
||||
|
||||
|
||||
def read_32(fobj, start_length, size):
|
||||
"""
|
||||
Read a 32bit RGB icon resource. Seems to be either uncompressed or
|
||||
an RLE packbits-like scheme.
|
||||
"""
|
||||
(start, length) = start_length
|
||||
fobj.seek(start)
|
||||
pixel_size = (size[0] * size[2], size[1] * size[2])
|
||||
sizesq = pixel_size[0] * pixel_size[1]
|
||||
if length == sizesq * 3:
|
||||
# uncompressed ("RGBRGBGB")
|
||||
indata = fobj.read(length)
|
||||
im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
|
||||
else:
|
||||
# decode image
|
||||
im = Image.new("RGB", pixel_size, None)
|
||||
for band_ix in range(3):
|
||||
data = []
|
||||
bytesleft = sizesq
|
||||
while bytesleft > 0:
|
||||
byte = fobj.read(1)
|
||||
if not byte:
|
||||
break
|
||||
byte = byte[0]
|
||||
if byte & 0x80:
|
||||
blocksize = byte - 125
|
||||
byte = fobj.read(1)
|
||||
for i in range(blocksize):
|
||||
data.append(byte)
|
||||
else:
|
||||
blocksize = byte + 1
|
||||
data.append(fobj.read(blocksize))
|
||||
bytesleft -= blocksize
|
||||
if bytesleft <= 0:
|
||||
break
|
||||
if bytesleft != 0:
|
||||
msg = f"Error reading channel [{repr(bytesleft)} left]"
|
||||
raise SyntaxError(msg)
|
||||
band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1)
|
||||
im.im.putband(band.im, band_ix)
|
||||
return {"RGB": im}
|
||||
|
||||
|
||||
def read_mk(fobj, start_length, size):
|
||||
# Alpha masks seem to be uncompressed
|
||||
start = start_length[0]
|
||||
fobj.seek(start)
|
||||
pixel_size = (size[0] * size[2], size[1] * size[2])
|
||||
sizesq = pixel_size[0] * pixel_size[1]
|
||||
band = Image.frombuffer("L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1)
|
||||
return {"A": band}
|
||||
|
||||
|
||||
def read_png_or_jpeg2000(fobj, start_length, size):
|
||||
(start, length) = start_length
|
||||
fobj.seek(start)
|
||||
sig = fobj.read(12)
|
||||
if sig[:8] == b"\x89PNG\x0d\x0a\x1a\x0a":
|
||||
fobj.seek(start)
|
||||
im = PngImagePlugin.PngImageFile(fobj)
|
||||
Image._decompression_bomb_check(im.size)
|
||||
return {"RGBA": im}
|
||||
elif (
|
||||
sig[:4] == b"\xff\x4f\xff\x51"
|
||||
or sig[:4] == b"\x0d\x0a\x87\x0a"
|
||||
or sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a"
|
||||
):
|
||||
if not enable_jpeg2k:
|
||||
msg = (
|
||||
"Unsupported icon subimage format (rebuild PIL "
|
||||
"with JPEG 2000 support to fix this)"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
# j2k, jpc or j2c
|
||||
fobj.seek(start)
|
||||
jp2kstream = fobj.read(length)
|
||||
f = io.BytesIO(jp2kstream)
|
||||
im = Jpeg2KImagePlugin.Jpeg2KImageFile(f)
|
||||
Image._decompression_bomb_check(im.size)
|
||||
if im.mode != "RGBA":
|
||||
im = im.convert("RGBA")
|
||||
return {"RGBA": im}
|
||||
else:
|
||||
msg = "Unsupported icon subimage format"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
class IcnsFile:
|
||||
SIZES = {
|
||||
(512, 512, 2): [(b"ic10", read_png_or_jpeg2000)],
|
||||
(512, 512, 1): [(b"ic09", read_png_or_jpeg2000)],
|
||||
(256, 256, 2): [(b"ic14", read_png_or_jpeg2000)],
|
||||
(256, 256, 1): [(b"ic08", read_png_or_jpeg2000)],
|
||||
(128, 128, 2): [(b"ic13", read_png_or_jpeg2000)],
|
||||
(128, 128, 1): [
|
||||
(b"ic07", read_png_or_jpeg2000),
|
||||
(b"it32", read_32t),
|
||||
(b"t8mk", read_mk),
|
||||
],
|
||||
(64, 64, 1): [(b"icp6", read_png_or_jpeg2000)],
|
||||
(32, 32, 2): [(b"ic12", read_png_or_jpeg2000)],
|
||||
(48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)],
|
||||
(32, 32, 1): [
|
||||
(b"icp5", read_png_or_jpeg2000),
|
||||
(b"il32", read_32),
|
||||
(b"l8mk", read_mk),
|
||||
],
|
||||
(16, 16, 2): [(b"ic11", read_png_or_jpeg2000)],
|
||||
(16, 16, 1): [
|
||||
(b"icp4", read_png_or_jpeg2000),
|
||||
(b"is32", read_32),
|
||||
(b"s8mk", read_mk),
|
||||
],
|
||||
}
|
||||
|
||||
def __init__(self, fobj):
|
||||
"""
|
||||
fobj is a file-like object as an icns resource
|
||||
"""
|
||||
# signature : (start, length)
|
||||
self.dct = dct = {}
|
||||
self.fobj = fobj
|
||||
sig, filesize = nextheader(fobj)
|
||||
if not _accept(sig):
|
||||
msg = "not an icns file"
|
||||
raise SyntaxError(msg)
|
||||
i = HEADERSIZE
|
||||
while i < filesize:
|
||||
sig, blocksize = nextheader(fobj)
|
||||
if blocksize <= 0:
|
||||
msg = "invalid block header"
|
||||
raise SyntaxError(msg)
|
||||
i += HEADERSIZE
|
||||
blocksize -= HEADERSIZE
|
||||
dct[sig] = (i, blocksize)
|
||||
fobj.seek(blocksize, io.SEEK_CUR)
|
||||
i += blocksize
|
||||
|
||||
def itersizes(self):
|
||||
sizes = []
|
||||
for size, fmts in self.SIZES.items():
|
||||
for fmt, reader in fmts:
|
||||
if fmt in self.dct:
|
||||
sizes.append(size)
|
||||
break
|
||||
return sizes
|
||||
|
||||
def bestsize(self):
|
||||
sizes = self.itersizes()
|
||||
if not sizes:
|
||||
msg = "No 32bit icon resources found"
|
||||
raise SyntaxError(msg)
|
||||
return max(sizes)
|
||||
|
||||
def dataforsize(self, size):
|
||||
"""
|
||||
Get an icon resource as {channel: array}. Note that
|
||||
the arrays are bottom-up like windows bitmaps and will likely
|
||||
need to be flipped or transposed in some way.
|
||||
"""
|
||||
dct = {}
|
||||
for code, reader in self.SIZES[size]:
|
||||
desc = self.dct.get(code)
|
||||
if desc is not None:
|
||||
dct.update(reader(self.fobj, desc, size))
|
||||
return dct
|
||||
|
||||
def getimage(self, size=None):
|
||||
if size is None:
|
||||
size = self.bestsize()
|
||||
if len(size) == 2:
|
||||
size = (size[0], size[1], 1)
|
||||
channels = self.dataforsize(size)
|
||||
|
||||
im = channels.get("RGBA", None)
|
||||
if im:
|
||||
return im
|
||||
|
||||
im = channels.get("RGB").copy()
|
||||
try:
|
||||
im.putalpha(channels["A"])
|
||||
except KeyError:
|
||||
pass
|
||||
return im
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for Mac OS icons.
|
||||
|
||||
|
||||
class IcnsImageFile(ImageFile.ImageFile):
|
||||
"""
|
||||
PIL image support for Mac OS .icns files.
|
||||
Chooses the best resolution, but will possibly load
|
||||
a different size image if you mutate the size attribute
|
||||
before calling 'load'.
|
||||
|
||||
The info dictionary has a key 'sizes' that is a list
|
||||
of sizes that the icns file has.
|
||||
"""
|
||||
|
||||
format = "ICNS"
|
||||
format_description = "Mac OS icns resource"
|
||||
|
||||
def _open(self):
|
||||
self.icns = IcnsFile(self.fp)
|
||||
self._mode = "RGBA"
|
||||
self.info["sizes"] = self.icns.itersizes()
|
||||
self.best_size = self.icns.bestsize()
|
||||
self.size = (
|
||||
self.best_size[0] * self.best_size[2],
|
||||
self.best_size[1] * self.best_size[2],
|
||||
)
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return self._size
|
||||
|
||||
@size.setter
|
||||
def size(self, value):
|
||||
info_size = value
|
||||
if info_size not in self.info["sizes"] and len(info_size) == 2:
|
||||
info_size = (info_size[0], info_size[1], 1)
|
||||
if (
|
||||
info_size not in self.info["sizes"]
|
||||
and len(info_size) == 3
|
||||
and info_size[2] == 1
|
||||
):
|
||||
simple_sizes = [
|
||||
(size[0] * size[2], size[1] * size[2]) for size in self.info["sizes"]
|
||||
]
|
||||
if value in simple_sizes:
|
||||
info_size = self.info["sizes"][simple_sizes.index(value)]
|
||||
if info_size not in self.info["sizes"]:
|
||||
msg = "This is not one of the allowed sizes of this image"
|
||||
raise ValueError(msg)
|
||||
self._size = value
|
||||
|
||||
def load(self):
|
||||
if len(self.size) == 3:
|
||||
self.best_size = self.size
|
||||
self.size = (
|
||||
self.best_size[0] * self.best_size[2],
|
||||
self.best_size[1] * self.best_size[2],
|
||||
)
|
||||
|
||||
px = Image.Image.load(self)
|
||||
if self.im is not None and self.im.size == self.size:
|
||||
# Already loaded
|
||||
return px
|
||||
self.load_prepare()
|
||||
# This is likely NOT the best way to do it, but whatever.
|
||||
im = self.icns.getimage(self.best_size)
|
||||
|
||||
# If this is a PNG or JPEG 2000, it won't be loaded yet
|
||||
px = im.load()
|
||||
|
||||
self.im = im.im
|
||||
self._mode = im.mode
|
||||
self.size = im.size
|
||||
|
||||
return px
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
"""
|
||||
Saves the image as a series of PNG files,
|
||||
that are then combined into a .icns file.
|
||||
"""
|
||||
if hasattr(fp, "flush"):
|
||||
fp.flush()
|
||||
|
||||
sizes = {
|
||||
b"ic07": 128,
|
||||
b"ic08": 256,
|
||||
b"ic09": 512,
|
||||
b"ic10": 1024,
|
||||
b"ic11": 32,
|
||||
b"ic12": 64,
|
||||
b"ic13": 256,
|
||||
b"ic14": 512,
|
||||
}
|
||||
provided_images = {im.width: im for im in im.encoderinfo.get("append_images", [])}
|
||||
size_streams = {}
|
||||
for size in set(sizes.values()):
|
||||
image = (
|
||||
provided_images[size]
|
||||
if size in provided_images
|
||||
else im.resize((size, size))
|
||||
)
|
||||
|
||||
temp = io.BytesIO()
|
||||
image.save(temp, "png")
|
||||
size_streams[size] = temp.getvalue()
|
||||
|
||||
entries = []
|
||||
for type, size in sizes.items():
|
||||
stream = size_streams[size]
|
||||
entries.append(
|
||||
{"type": type, "size": HEADERSIZE + len(stream), "stream": stream}
|
||||
)
|
||||
|
||||
# Header
|
||||
fp.write(MAGIC)
|
||||
file_length = HEADERSIZE # Header
|
||||
file_length += HEADERSIZE + 8 * len(entries) # TOC
|
||||
file_length += sum(entry["size"] for entry in entries)
|
||||
fp.write(struct.pack(">i", file_length))
|
||||
|
||||
# TOC
|
||||
fp.write(b"TOC ")
|
||||
fp.write(struct.pack(">i", HEADERSIZE + len(entries) * HEADERSIZE))
|
||||
for entry in entries:
|
||||
fp.write(entry["type"])
|
||||
fp.write(struct.pack(">i", entry["size"]))
|
||||
|
||||
# Data
|
||||
for entry in entries:
|
||||
fp.write(entry["type"])
|
||||
fp.write(struct.pack(">i", entry["size"]))
|
||||
fp.write(entry["stream"])
|
||||
|
||||
if hasattr(fp, "flush"):
|
||||
fp.flush()
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == MAGIC
|
||||
|
||||
|
||||
Image.register_open(IcnsImageFile.format, IcnsImageFile, _accept)
|
||||
Image.register_extension(IcnsImageFile.format, ".icns")
|
||||
|
||||
Image.register_save(IcnsImageFile.format, _save)
|
||||
Image.register_mime(IcnsImageFile.format, "image/icns")
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Syntax: python3 IcnsImagePlugin.py [file]")
|
||||
sys.exit()
|
||||
|
||||
with open(sys.argv[1], "rb") as fp:
|
||||
imf = IcnsImageFile(fp)
|
||||
for size in imf.info["sizes"]:
|
||||
width, height, scale = imf.size = size
|
||||
imf.save(f"out-{width}-{height}-{scale}.png")
|
||||
with Image.open(sys.argv[1]) as im:
|
||||
im.save("out.png")
|
||||
if sys.platform == "windows":
|
||||
os.startfile("out.png")
|
@ -1,356 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Windows Icon support for PIL
|
||||
#
|
||||
# History:
|
||||
# 96-05-27 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
|
||||
# <casadebender@gmail.com>.
|
||||
# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
|
||||
#
|
||||
# Icon format references:
|
||||
# * https://en.wikipedia.org/wiki/ICO_(file_format)
|
||||
# * https://msdn.microsoft.com/en-us/library/ms997538.aspx
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from io import BytesIO
|
||||
from math import ceil, log
|
||||
|
||||
from . import BmpImagePlugin, Image, ImageFile, PngImagePlugin
|
||||
from ._binary import i16le as i16
|
||||
from ._binary import i32le as i32
|
||||
from ._binary import o8
|
||||
from ._binary import o16le as o16
|
||||
from ._binary import o32le as o32
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
_MAGIC = b"\0\0\1\0"
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
fp.write(_MAGIC) # (2+2)
|
||||
bmp = im.encoderinfo.get("bitmap_format") == "bmp"
|
||||
sizes = im.encoderinfo.get(
|
||||
"sizes",
|
||||
[(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)],
|
||||
)
|
||||
frames = []
|
||||
provided_ims = [im] + im.encoderinfo.get("append_images", [])
|
||||
width, height = im.size
|
||||
for size in sorted(set(sizes)):
|
||||
if size[0] > width or size[1] > height or size[0] > 256 or size[1] > 256:
|
||||
continue
|
||||
|
||||
for provided_im in provided_ims:
|
||||
if provided_im.size != size:
|
||||
continue
|
||||
frames.append(provided_im)
|
||||
if bmp:
|
||||
bits = BmpImagePlugin.SAVE[provided_im.mode][1]
|
||||
bits_used = [bits]
|
||||
for other_im in provided_ims:
|
||||
if other_im.size != size:
|
||||
continue
|
||||
bits = BmpImagePlugin.SAVE[other_im.mode][1]
|
||||
if bits not in bits_used:
|
||||
# Another image has been supplied for this size
|
||||
# with a different bit depth
|
||||
frames.append(other_im)
|
||||
bits_used.append(bits)
|
||||
break
|
||||
else:
|
||||
# TODO: invent a more convenient method for proportional scalings
|
||||
frame = provided_im.copy()
|
||||
frame.thumbnail(size, Image.Resampling.LANCZOS, reducing_gap=None)
|
||||
frames.append(frame)
|
||||
fp.write(o16(len(frames))) # idCount(2)
|
||||
offset = fp.tell() + len(frames) * 16
|
||||
for frame in frames:
|
||||
width, height = frame.size
|
||||
# 0 means 256
|
||||
fp.write(o8(width if width < 256 else 0)) # bWidth(1)
|
||||
fp.write(o8(height if height < 256 else 0)) # bHeight(1)
|
||||
|
||||
bits, colors = BmpImagePlugin.SAVE[frame.mode][1:] if bmp else (32, 0)
|
||||
fp.write(o8(colors)) # bColorCount(1)
|
||||
fp.write(b"\0") # bReserved(1)
|
||||
fp.write(b"\0\0") # wPlanes(2)
|
||||
fp.write(o16(bits)) # wBitCount(2)
|
||||
|
||||
image_io = BytesIO()
|
||||
if bmp:
|
||||
frame.save(image_io, "dib")
|
||||
|
||||
if bits != 32:
|
||||
and_mask = Image.new("1", size)
|
||||
ImageFile._save(
|
||||
and_mask, image_io, [("raw", (0, 0) + size, 0, ("1", 0, -1))]
|
||||
)
|
||||
else:
|
||||
frame.save(image_io, "png")
|
||||
image_io.seek(0)
|
||||
image_bytes = image_io.read()
|
||||
if bmp:
|
||||
image_bytes = image_bytes[:8] + o32(height * 2) + image_bytes[12:]
|
||||
bytes_len = len(image_bytes)
|
||||
fp.write(o32(bytes_len)) # dwBytesInRes(4)
|
||||
fp.write(o32(offset)) # dwImageOffset(4)
|
||||
current = fp.tell()
|
||||
fp.seek(offset)
|
||||
fp.write(image_bytes)
|
||||
offset = offset + bytes_len
|
||||
fp.seek(current)
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == _MAGIC
|
||||
|
||||
|
||||
class IcoFile:
|
||||
def __init__(self, buf):
|
||||
"""
|
||||
Parse image from file-like object containing ico file data
|
||||
"""
|
||||
|
||||
# check magic
|
||||
s = buf.read(6)
|
||||
if not _accept(s):
|
||||
msg = "not an ICO file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.buf = buf
|
||||
self.entry = []
|
||||
|
||||
# Number of items in file
|
||||
self.nb_items = i16(s, 4)
|
||||
|
||||
# Get headers for each item
|
||||
for i in range(self.nb_items):
|
||||
s = buf.read(16)
|
||||
|
||||
icon_header = {
|
||||
"width": s[0],
|
||||
"height": s[1],
|
||||
"nb_color": s[2], # No. of colors in image (0 if >=8bpp)
|
||||
"reserved": s[3],
|
||||
"planes": i16(s, 4),
|
||||
"bpp": i16(s, 6),
|
||||
"size": i32(s, 8),
|
||||
"offset": i32(s, 12),
|
||||
}
|
||||
|
||||
# See Wikipedia
|
||||
for j in ("width", "height"):
|
||||
if not icon_header[j]:
|
||||
icon_header[j] = 256
|
||||
|
||||
# See Wikipedia notes about color depth.
|
||||
# We need this just to differ images with equal sizes
|
||||
icon_header["color_depth"] = (
|
||||
icon_header["bpp"]
|
||||
or (
|
||||
icon_header["nb_color"] != 0
|
||||
and ceil(log(icon_header["nb_color"], 2))
|
||||
)
|
||||
or 256
|
||||
)
|
||||
|
||||
icon_header["dim"] = (icon_header["width"], icon_header["height"])
|
||||
icon_header["square"] = icon_header["width"] * icon_header["height"]
|
||||
|
||||
self.entry.append(icon_header)
|
||||
|
||||
self.entry = sorted(self.entry, key=lambda x: x["color_depth"])
|
||||
# ICO images are usually squares
|
||||
self.entry = sorted(self.entry, key=lambda x: x["square"], reverse=True)
|
||||
|
||||
def sizes(self):
|
||||
"""
|
||||
Get a list of all available icon sizes and color depths.
|
||||
"""
|
||||
return {(h["width"], h["height"]) for h in self.entry}
|
||||
|
||||
def getentryindex(self, size, bpp=False):
|
||||
for i, h in enumerate(self.entry):
|
||||
if size == h["dim"] and (bpp is False or bpp == h["color_depth"]):
|
||||
return i
|
||||
return 0
|
||||
|
||||
def getimage(self, size, bpp=False):
|
||||
"""
|
||||
Get an image from the icon
|
||||
"""
|
||||
return self.frame(self.getentryindex(size, bpp))
|
||||
|
||||
def frame(self, idx):
|
||||
"""
|
||||
Get an image from frame idx
|
||||
"""
|
||||
|
||||
header = self.entry[idx]
|
||||
|
||||
self.buf.seek(header["offset"])
|
||||
data = self.buf.read(8)
|
||||
self.buf.seek(header["offset"])
|
||||
|
||||
if data[:8] == PngImagePlugin._MAGIC:
|
||||
# png frame
|
||||
im = PngImagePlugin.PngImageFile(self.buf)
|
||||
Image._decompression_bomb_check(im.size)
|
||||
else:
|
||||
# XOR + AND mask bmp frame
|
||||
im = BmpImagePlugin.DibImageFile(self.buf)
|
||||
Image._decompression_bomb_check(im.size)
|
||||
|
||||
# change tile dimension to only encompass XOR image
|
||||
im._size = (im.size[0], int(im.size[1] / 2))
|
||||
d, e, o, a = im.tile[0]
|
||||
im.tile[0] = d, (0, 0) + im.size, o, a
|
||||
|
||||
# figure out where AND mask image starts
|
||||
bpp = header["bpp"]
|
||||
if 32 == bpp:
|
||||
# 32-bit color depth icon image allows semitransparent areas
|
||||
# PIL's DIB format ignores transparency bits, recover them.
|
||||
# The DIB is packed in BGRX byte order where X is the alpha
|
||||
# channel.
|
||||
|
||||
# Back up to start of bmp data
|
||||
self.buf.seek(o)
|
||||
# extract every 4th byte (eg. 3,7,11,15,...)
|
||||
alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]
|
||||
|
||||
# convert to an 8bpp grayscale image
|
||||
mask = Image.frombuffer(
|
||||
"L", # 8bpp
|
||||
im.size, # (w, h)
|
||||
alpha_bytes, # source chars
|
||||
"raw", # raw decoder
|
||||
("L", 0, -1), # 8bpp inverted, unpadded, reversed
|
||||
)
|
||||
else:
|
||||
# get AND image from end of bitmap
|
||||
w = im.size[0]
|
||||
if (w % 32) > 0:
|
||||
# bitmap row data is aligned to word boundaries
|
||||
w += 32 - (im.size[0] % 32)
|
||||
|
||||
# the total mask data is
|
||||
# padded row size * height / bits per char
|
||||
|
||||
total_bytes = int((w * im.size[1]) / 8)
|
||||
and_mask_offset = header["offset"] + header["size"] - total_bytes
|
||||
|
||||
self.buf.seek(and_mask_offset)
|
||||
mask_data = self.buf.read(total_bytes)
|
||||
|
||||
# convert raw data to image
|
||||
mask = Image.frombuffer(
|
||||
"1", # 1 bpp
|
||||
im.size, # (w, h)
|
||||
mask_data, # source chars
|
||||
"raw", # raw decoder
|
||||
("1;I", int(w / 8), -1), # 1bpp inverted, padded, reversed
|
||||
)
|
||||
|
||||
# now we have two images, im is XOR image and mask is AND image
|
||||
|
||||
# apply mask image as alpha channel
|
||||
im = im.convert("RGBA")
|
||||
im.putalpha(mask)
|
||||
|
||||
return im
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for Windows Icon files.
|
||||
|
||||
|
||||
class IcoImageFile(ImageFile.ImageFile):
|
||||
"""
|
||||
PIL read-only image support for Microsoft Windows .ico files.
|
||||
|
||||
By default the largest resolution image in the file will be loaded. This
|
||||
can be changed by altering the 'size' attribute before calling 'load'.
|
||||
|
||||
The info dictionary has a key 'sizes' that is a list of the sizes available
|
||||
in the icon file.
|
||||
|
||||
Handles classic, XP and Vista icon formats.
|
||||
|
||||
When saving, PNG compression is used. Support for this was only added in
|
||||
Windows Vista. If you are unable to view the icon in Windows, convert the
|
||||
image to "RGBA" mode before saving.
|
||||
|
||||
This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
|
||||
<casadebender@gmail.com>.
|
||||
https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
|
||||
"""
|
||||
|
||||
format = "ICO"
|
||||
format_description = "Windows Icon"
|
||||
|
||||
def _open(self):
|
||||
self.ico = IcoFile(self.fp)
|
||||
self.info["sizes"] = self.ico.sizes()
|
||||
self.size = self.ico.entry[0]["dim"]
|
||||
self.load()
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return self._size
|
||||
|
||||
@size.setter
|
||||
def size(self, value):
|
||||
if value not in self.info["sizes"]:
|
||||
msg = "This is not one of the allowed sizes of this image"
|
||||
raise ValueError(msg)
|
||||
self._size = value
|
||||
|
||||
def load(self):
|
||||
if self.im is not None and self.im.size == self.size:
|
||||
# Already loaded
|
||||
return Image.Image.load(self)
|
||||
im = self.ico.getimage(self.size)
|
||||
# if tile is PNG, it won't really be loaded yet
|
||||
im.load()
|
||||
self.im = im.im
|
||||
self.pyaccess = None
|
||||
self._mode = im.mode
|
||||
if im.size != self.size:
|
||||
warnings.warn("Image was not the expected size")
|
||||
|
||||
index = self.ico.getentryindex(self.size)
|
||||
sizes = list(self.info["sizes"])
|
||||
sizes[index] = im.size
|
||||
self.info["sizes"] = set(sizes)
|
||||
|
||||
self.size = im.size
|
||||
|
||||
def load_seek(self):
|
||||
# Flag the ImageFile.Parser so that it
|
||||
# just does all the decode at the end.
|
||||
pass
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
Image.register_open(IcoImageFile.format, IcoImageFile, _accept)
|
||||
Image.register_save(IcoImageFile.format, _save)
|
||||
Image.register_extension(IcoImageFile.format, ".ico")
|
||||
|
||||
Image.register_mime(IcoImageFile.format, "image/x-icon")
|
@ -1,371 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# IFUNC IM file handling for PIL
|
||||
#
|
||||
# history:
|
||||
# 1995-09-01 fl Created.
|
||||
# 1997-01-03 fl Save palette images
|
||||
# 1997-01-08 fl Added sequence support
|
||||
# 1997-01-23 fl Added P and RGB save support
|
||||
# 1997-05-31 fl Read floating point images
|
||||
# 1997-06-22 fl Save floating point images
|
||||
# 1997-08-27 fl Read and save 1-bit images
|
||||
# 1998-06-25 fl Added support for RGB+LUT images
|
||||
# 1998-07-02 fl Added support for YCC images
|
||||
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
||||
# 1998-12-29 fl Added I;16 support
|
||||
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
|
||||
# 2003-09-26 fl Added LA/PA support
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-2001 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from . import Image, ImageFile, ImagePalette
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Standard tags
|
||||
|
||||
COMMENT = "Comment"
|
||||
DATE = "Date"
|
||||
EQUIPMENT = "Digitalization equipment"
|
||||
FRAMES = "File size (no of images)"
|
||||
LUT = "Lut"
|
||||
NAME = "Name"
|
||||
SCALE = "Scale (x,y)"
|
||||
SIZE = "Image size (x*y)"
|
||||
MODE = "Image type"
|
||||
|
||||
TAGS = {
|
||||
COMMENT: 0,
|
||||
DATE: 0,
|
||||
EQUIPMENT: 0,
|
||||
FRAMES: 0,
|
||||
LUT: 0,
|
||||
NAME: 0,
|
||||
SCALE: 0,
|
||||
SIZE: 0,
|
||||
MODE: 0,
|
||||
}
|
||||
|
||||
OPEN = {
|
||||
# ifunc93/p3cfunc formats
|
||||
"0 1 image": ("1", "1"),
|
||||
"L 1 image": ("1", "1"),
|
||||
"Greyscale image": ("L", "L"),
|
||||
"Grayscale image": ("L", "L"),
|
||||
"RGB image": ("RGB", "RGB;L"),
|
||||
"RLB image": ("RGB", "RLB"),
|
||||
"RYB image": ("RGB", "RLB"),
|
||||
"B1 image": ("1", "1"),
|
||||
"B2 image": ("P", "P;2"),
|
||||
"B4 image": ("P", "P;4"),
|
||||
"X 24 image": ("RGB", "RGB"),
|
||||
"L 32 S image": ("I", "I;32"),
|
||||
"L 32 F image": ("F", "F;32"),
|
||||
# old p3cfunc formats
|
||||
"RGB3 image": ("RGB", "RGB;T"),
|
||||
"RYB3 image": ("RGB", "RYB;T"),
|
||||
# extensions
|
||||
"LA image": ("LA", "LA;L"),
|
||||
"PA image": ("LA", "PA;L"),
|
||||
"RGBA image": ("RGBA", "RGBA;L"),
|
||||
"RGBX image": ("RGBX", "RGBX;L"),
|
||||
"CMYK image": ("CMYK", "CMYK;L"),
|
||||
"YCC image": ("YCbCr", "YCbCr;L"),
|
||||
}
|
||||
|
||||
# ifunc95 extensions
|
||||
for i in ["8", "8S", "16", "16S", "32", "32F"]:
|
||||
OPEN[f"L {i} image"] = ("F", f"F;{i}")
|
||||
OPEN[f"L*{i} image"] = ("F", f"F;{i}")
|
||||
for i in ["16", "16L", "16B"]:
|
||||
OPEN[f"L {i} image"] = (f"I;{i}", f"I;{i}")
|
||||
OPEN[f"L*{i} image"] = (f"I;{i}", f"I;{i}")
|
||||
for i in ["32S"]:
|
||||
OPEN[f"L {i} image"] = ("I", f"I;{i}")
|
||||
OPEN[f"L*{i} image"] = ("I", f"I;{i}")
|
||||
for i in range(2, 33):
|
||||
OPEN[f"L*{i} image"] = ("F", f"F;{i}")
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Read IM directory
|
||||
|
||||
split = re.compile(rb"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$")
|
||||
|
||||
|
||||
def number(s):
|
||||
try:
|
||||
return int(s)
|
||||
except ValueError:
|
||||
return float(s)
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for the IFUNC IM file format.
|
||||
|
||||
|
||||
class ImImageFile(ImageFile.ImageFile):
|
||||
format = "IM"
|
||||
format_description = "IFUNC Image Memory"
|
||||
_close_exclusive_fp_after_loading = False
|
||||
|
||||
def _open(self):
|
||||
# Quick rejection: if there's not an LF among the first
|
||||
# 100 bytes, this is (probably) not a text header.
|
||||
|
||||
if b"\n" not in self.fp.read(100):
|
||||
msg = "not an IM file"
|
||||
raise SyntaxError(msg)
|
||||
self.fp.seek(0)
|
||||
|
||||
n = 0
|
||||
|
||||
# Default values
|
||||
self.info[MODE] = "L"
|
||||
self.info[SIZE] = (512, 512)
|
||||
self.info[FRAMES] = 1
|
||||
|
||||
self.rawmode = "L"
|
||||
|
||||
while True:
|
||||
s = self.fp.read(1)
|
||||
|
||||
# Some versions of IFUNC uses \n\r instead of \r\n...
|
||||
if s == b"\r":
|
||||
continue
|
||||
|
||||
if not s or s == b"\0" or s == b"\x1A":
|
||||
break
|
||||
|
||||
# FIXME: this may read whole file if not a text file
|
||||
s = s + self.fp.readline()
|
||||
|
||||
if len(s) > 100:
|
||||
msg = "not an IM file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
if s[-2:] == b"\r\n":
|
||||
s = s[:-2]
|
||||
elif s[-1:] == b"\n":
|
||||
s = s[:-1]
|
||||
|
||||
try:
|
||||
m = split.match(s)
|
||||
except re.error as e:
|
||||
msg = "not an IM file"
|
||||
raise SyntaxError(msg) from e
|
||||
|
||||
if m:
|
||||
k, v = m.group(1, 2)
|
||||
|
||||
# Don't know if this is the correct encoding,
|
||||
# but a decent guess (I guess)
|
||||
k = k.decode("latin-1", "replace")
|
||||
v = v.decode("latin-1", "replace")
|
||||
|
||||
# Convert value as appropriate
|
||||
if k in [FRAMES, SCALE, SIZE]:
|
||||
v = v.replace("*", ",")
|
||||
v = tuple(map(number, v.split(",")))
|
||||
if len(v) == 1:
|
||||
v = v[0]
|
||||
elif k == MODE and v in OPEN:
|
||||
v, self.rawmode = OPEN[v]
|
||||
|
||||
# Add to dictionary. Note that COMMENT tags are
|
||||
# combined into a list of strings.
|
||||
if k == COMMENT:
|
||||
if k in self.info:
|
||||
self.info[k].append(v)
|
||||
else:
|
||||
self.info[k] = [v]
|
||||
else:
|
||||
self.info[k] = v
|
||||
|
||||
if k in TAGS:
|
||||
n += 1
|
||||
|
||||
else:
|
||||
msg = "Syntax error in IM header: " + s.decode("ascii", "replace")
|
||||
raise SyntaxError(msg)
|
||||
|
||||
if not n:
|
||||
msg = "Not an IM file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# Basic attributes
|
||||
self._size = self.info[SIZE]
|
||||
self._mode = self.info[MODE]
|
||||
|
||||
# Skip forward to start of image data
|
||||
while s and s[:1] != b"\x1A":
|
||||
s = self.fp.read(1)
|
||||
if not s:
|
||||
msg = "File truncated"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
if LUT in self.info:
|
||||
# convert lookup table to palette or lut attribute
|
||||
palette = self.fp.read(768)
|
||||
greyscale = 1 # greyscale palette
|
||||
linear = 1 # linear greyscale palette
|
||||
for i in range(256):
|
||||
if palette[i] == palette[i + 256] == palette[i + 512]:
|
||||
if palette[i] != i:
|
||||
linear = 0
|
||||
else:
|
||||
greyscale = 0
|
||||
if self.mode in ["L", "LA", "P", "PA"]:
|
||||
if greyscale:
|
||||
if not linear:
|
||||
self.lut = list(palette[:256])
|
||||
else:
|
||||
if self.mode in ["L", "P"]:
|
||||
self._mode = self.rawmode = "P"
|
||||
elif self.mode in ["LA", "PA"]:
|
||||
self._mode = "PA"
|
||||
self.rawmode = "PA;L"
|
||||
self.palette = ImagePalette.raw("RGB;L", palette)
|
||||
elif self.mode == "RGB":
|
||||
if not greyscale or not linear:
|
||||
self.lut = list(palette)
|
||||
|
||||
self.frame = 0
|
||||
|
||||
self.__offset = offs = self.fp.tell()
|
||||
|
||||
self._fp = self.fp # FIXME: hack
|
||||
|
||||
if self.rawmode[:2] == "F;":
|
||||
# ifunc95 formats
|
||||
try:
|
||||
# use bit decoder (if necessary)
|
||||
bits = int(self.rawmode[2:])
|
||||
if bits not in [8, 16, 32]:
|
||||
self.tile = [("bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1))]
|
||||
return
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if self.rawmode in ["RGB;T", "RYB;T"]:
|
||||
# Old LabEye/3PC files. Would be very surprised if anyone
|
||||
# ever stumbled upon such a file ;-)
|
||||
size = self.size[0] * self.size[1]
|
||||
self.tile = [
|
||||
("raw", (0, 0) + self.size, offs, ("G", 0, -1)),
|
||||
("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)),
|
||||
("raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1)),
|
||||
]
|
||||
else:
|
||||
# LabEye/IFUNC files
|
||||
self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))]
|
||||
|
||||
@property
|
||||
def n_frames(self):
|
||||
return self.info[FRAMES]
|
||||
|
||||
@property
|
||||
def is_animated(self):
|
||||
return self.info[FRAMES] > 1
|
||||
|
||||
def seek(self, frame):
|
||||
if not self._seek_check(frame):
|
||||
return
|
||||
|
||||
self.frame = frame
|
||||
|
||||
if self.mode == "1":
|
||||
bits = 1
|
||||
else:
|
||||
bits = 8 * len(self.mode)
|
||||
|
||||
size = ((self.size[0] * bits + 7) // 8) * self.size[1]
|
||||
offs = self.__offset + frame * size
|
||||
|
||||
self.fp = self._fp
|
||||
|
||||
self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))]
|
||||
|
||||
def tell(self):
|
||||
return self.frame
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Save IM files
|
||||
|
||||
|
||||
SAVE = {
|
||||
# mode: (im type, raw mode)
|
||||
"1": ("0 1", "1"),
|
||||
"L": ("Greyscale", "L"),
|
||||
"LA": ("LA", "LA;L"),
|
||||
"P": ("Greyscale", "P"),
|
||||
"PA": ("LA", "PA;L"),
|
||||
"I": ("L 32S", "I;32S"),
|
||||
"I;16": ("L 16", "I;16"),
|
||||
"I;16L": ("L 16L", "I;16L"),
|
||||
"I;16B": ("L 16B", "I;16B"),
|
||||
"F": ("L 32F", "F;32F"),
|
||||
"RGB": ("RGB", "RGB;L"),
|
||||
"RGBA": ("RGBA", "RGBA;L"),
|
||||
"RGBX": ("RGBX", "RGBX;L"),
|
||||
"CMYK": ("CMYK", "CMYK;L"),
|
||||
"YCbCr": ("YCC", "YCbCr;L"),
|
||||
}
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
try:
|
||||
image_type, rawmode = SAVE[im.mode]
|
||||
except KeyError as e:
|
||||
msg = f"Cannot save {im.mode} images as IM"
|
||||
raise ValueError(msg) from e
|
||||
|
||||
frames = im.encoderinfo.get("frames", 1)
|
||||
|
||||
fp.write(f"Image type: {image_type} image\r\n".encode("ascii"))
|
||||
if filename:
|
||||
# Each line must be 100 characters or less,
|
||||
# or: SyntaxError("not an IM file")
|
||||
# 8 characters are used for "Name: " and "\r\n"
|
||||
# Keep just the filename, ditch the potentially overlong path
|
||||
name, ext = os.path.splitext(os.path.basename(filename))
|
||||
name = "".join([name[: 92 - len(ext)], ext])
|
||||
|
||||
fp.write(f"Name: {name}\r\n".encode("ascii"))
|
||||
fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode("ascii"))
|
||||
fp.write(f"File size (no of images): {frames}\r\n".encode("ascii"))
|
||||
if im.mode in ["P", "PA"]:
|
||||
fp.write(b"Lut: 1\r\n")
|
||||
fp.write(b"\000" * (511 - fp.tell()) + b"\032")
|
||||
if im.mode in ["P", "PA"]:
|
||||
im_palette = im.im.getpalette("RGB", "RGB;L")
|
||||
colors = len(im_palette) // 3
|
||||
palette = b""
|
||||
for i in range(3):
|
||||
palette += im_palette[colors * i : colors * (i + 1)]
|
||||
palette += b"\x00" * (256 - colors)
|
||||
fp.write(palette) # 768 bytes
|
||||
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))])
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
|
||||
Image.register_open(ImImageFile.format, ImImageFile)
|
||||
Image.register_save(ImImageFile.format, _save)
|
||||
|
||||
Image.register_extension(ImImageFile.format, ".im")
|
File diff suppressed because it is too large
Load Diff
@ -1,311 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# standard channel operations
|
||||
#
|
||||
# History:
|
||||
# 1996-03-24 fl Created
|
||||
# 1996-08-13 fl Added logical operations (for "1" images)
|
||||
# 2000-10-12 fl Added offset method (from Image.py)
|
||||
#
|
||||
# Copyright (c) 1997-2000 by Secret Labs AB
|
||||
# Copyright (c) 1996-2000 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image
|
||||
|
||||
|
||||
def constant(image: Image.Image, value: int) -> Image.Image:
|
||||
"""Fill a channel with a given gray level.
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
return Image.new("L", image.size, value)
|
||||
|
||||
|
||||
def duplicate(image: Image.Image) -> Image.Image:
|
||||
"""Copy a channel. Alias for :py:meth:`PIL.Image.Image.copy`.
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
return image.copy()
|
||||
|
||||
|
||||
def invert(image: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Invert an image (channel). ::
|
||||
|
||||
out = MAX - image
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image.load()
|
||||
return image._new(image.im.chop_invert())
|
||||
|
||||
|
||||
def lighter(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Compares the two images, pixel by pixel, and returns a new image containing
|
||||
the lighter values. ::
|
||||
|
||||
out = max(image1, image2)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_lighter(image2.im))
|
||||
|
||||
|
||||
def darker(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Compares the two images, pixel by pixel, and returns a new image containing
|
||||
the darker values. ::
|
||||
|
||||
out = min(image1, image2)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_darker(image2.im))
|
||||
|
||||
|
||||
def difference(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Returns the absolute value of the pixel-by-pixel difference between the two
|
||||
images. ::
|
||||
|
||||
out = abs(image1 - image2)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_difference(image2.im))
|
||||
|
||||
|
||||
def multiply(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Superimposes two images on top of each other.
|
||||
|
||||
If you multiply an image with a solid black image, the result is black. If
|
||||
you multiply with a solid white image, the image is unaffected. ::
|
||||
|
||||
out = image1 * image2 / MAX
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_multiply(image2.im))
|
||||
|
||||
|
||||
def screen(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Superimposes two inverted images on top of each other. ::
|
||||
|
||||
out = MAX - ((MAX - image1) * (MAX - image2) / MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_screen(image2.im))
|
||||
|
||||
|
||||
def soft_light(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Superimposes two images on top of each other using the Soft Light algorithm
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_soft_light(image2.im))
|
||||
|
||||
|
||||
def hard_light(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Superimposes two images on top of each other using the Hard Light algorithm
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_hard_light(image2.im))
|
||||
|
||||
|
||||
def overlay(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Superimposes two images on top of each other using the Overlay algorithm
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_overlay(image2.im))
|
||||
|
||||
|
||||
def add(
|
||||
image1: Image.Image, image2: Image.Image, scale: float = 1.0, offset: float = 0
|
||||
) -> Image.Image:
|
||||
"""
|
||||
Adds two images, dividing the result by scale and adding the
|
||||
offset. If omitted, scale defaults to 1.0, and offset to 0.0. ::
|
||||
|
||||
out = ((image1 + image2) / scale + offset)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_add(image2.im, scale, offset))
|
||||
|
||||
|
||||
def subtract(
|
||||
image1: Image.Image, image2: Image.Image, scale: float = 1.0, offset: float = 0
|
||||
) -> Image.Image:
|
||||
"""
|
||||
Subtracts two images, dividing the result by scale and adding the offset.
|
||||
If omitted, scale defaults to 1.0, and offset to 0.0. ::
|
||||
|
||||
out = ((image1 - image2) / scale + offset)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_subtract(image2.im, scale, offset))
|
||||
|
||||
|
||||
def add_modulo(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""Add two images, without clipping the result. ::
|
||||
|
||||
out = ((image1 + image2) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_add_modulo(image2.im))
|
||||
|
||||
|
||||
def subtract_modulo(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""Subtract two images, without clipping the result. ::
|
||||
|
||||
out = ((image1 - image2) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_subtract_modulo(image2.im))
|
||||
|
||||
|
||||
def logical_and(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""Logical AND between two images.
|
||||
|
||||
Both of the images must have mode "1". If you would like to perform a
|
||||
logical AND on an image with a mode other than "1", try
|
||||
:py:meth:`~PIL.ImageChops.multiply` instead, using a black-and-white mask
|
||||
as the second image. ::
|
||||
|
||||
out = ((image1 and image2) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_and(image2.im))
|
||||
|
||||
|
||||
def logical_or(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""Logical OR between two images.
|
||||
|
||||
Both of the images must have mode "1". ::
|
||||
|
||||
out = ((image1 or image2) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_or(image2.im))
|
||||
|
||||
|
||||
def logical_xor(image1: Image.Image, image2: Image.Image) -> Image.Image:
|
||||
"""Logical XOR between two images.
|
||||
|
||||
Both of the images must have mode "1". ::
|
||||
|
||||
out = ((bool(image1) != bool(image2)) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_xor(image2.im))
|
||||
|
||||
|
||||
def blend(image1: Image.Image, image2: Image.Image, alpha: float) -> Image.Image:
|
||||
"""Blend images using constant transparency weight. Alias for
|
||||
:py:func:`PIL.Image.blend`.
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
return Image.blend(image1, image2, alpha)
|
||||
|
||||
|
||||
def composite(
|
||||
image1: Image.Image, image2: Image.Image, mask: Image.Image
|
||||
) -> Image.Image:
|
||||
"""Create composite using transparency mask. Alias for
|
||||
:py:func:`PIL.Image.composite`.
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
return Image.composite(image1, image2, mask)
|
||||
|
||||
|
||||
def offset(image: Image.Image, xoffset: int, yoffset: int | None = None) -> Image.Image:
|
||||
"""Returns a copy of the image where data has been offset by the given
|
||||
distances. Data wraps around the edges. If ``yoffset`` is omitted, it
|
||||
is assumed to be equal to ``xoffset``.
|
||||
|
||||
:param image: Input image.
|
||||
:param xoffset: The horizontal distance.
|
||||
:param yoffset: The vertical distance. If omitted, both
|
||||
distances are set to the same value.
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
if yoffset is None:
|
||||
yoffset = xoffset
|
||||
image.load()
|
||||
return image._new(image.im.offset(xoffset, yoffset))
|
File diff suppressed because it is too large
Load Diff
@ -1,317 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# map CSS3-style colour description strings to RGB
|
||||
#
|
||||
# History:
|
||||
# 2002-10-24 fl Added support for CSS-style color strings
|
||||
# 2002-12-15 fl Added RGBA support
|
||||
# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2
|
||||
# 2004-07-19 fl Fixed gray/grey spelling issues
|
||||
# 2009-03-05 fl Fixed rounding error in grayscale calculation
|
||||
#
|
||||
# Copyright (c) 2002-2004 by Secret Labs AB
|
||||
# Copyright (c) 2002-2004 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from functools import lru_cache
|
||||
|
||||
from . import Image
|
||||
|
||||
|
||||
@lru_cache
|
||||
def getrgb(color):
|
||||
"""
|
||||
Convert a color string to an RGB or RGBA tuple. If the string cannot be
|
||||
parsed, this function raises a :py:exc:`ValueError` exception.
|
||||
|
||||
.. versionadded:: 1.1.4
|
||||
|
||||
:param color: A color string
|
||||
:return: ``(red, green, blue[, alpha])``
|
||||
"""
|
||||
if len(color) > 100:
|
||||
msg = "color specifier is too long"
|
||||
raise ValueError(msg)
|
||||
color = color.lower()
|
||||
|
||||
rgb = colormap.get(color, None)
|
||||
if rgb:
|
||||
if isinstance(rgb, tuple):
|
||||
return rgb
|
||||
colormap[color] = rgb = getrgb(rgb)
|
||||
return rgb
|
||||
|
||||
# check for known string formats
|
||||
if re.match("#[a-f0-9]{3}$", color):
|
||||
return int(color[1] * 2, 16), int(color[2] * 2, 16), int(color[3] * 2, 16)
|
||||
|
||||
if re.match("#[a-f0-9]{4}$", color):
|
||||
return (
|
||||
int(color[1] * 2, 16),
|
||||
int(color[2] * 2, 16),
|
||||
int(color[3] * 2, 16),
|
||||
int(color[4] * 2, 16),
|
||||
)
|
||||
|
||||
if re.match("#[a-f0-9]{6}$", color):
|
||||
return int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16)
|
||||
|
||||
if re.match("#[a-f0-9]{8}$", color):
|
||||
return (
|
||||
int(color[1:3], 16),
|
||||
int(color[3:5], 16),
|
||||
int(color[5:7], 16),
|
||||
int(color[7:9], 16),
|
||||
)
|
||||
|
||||
m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
|
||||
if m:
|
||||
return int(m.group(1)), int(m.group(2)), int(m.group(3))
|
||||
|
||||
m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
|
||||
if m:
|
||||
return (
|
||||
int((int(m.group(1)) * 255) / 100.0 + 0.5),
|
||||
int((int(m.group(2)) * 255) / 100.0 + 0.5),
|
||||
int((int(m.group(3)) * 255) / 100.0 + 0.5),
|
||||
)
|
||||
|
||||
m = re.match(
|
||||
r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color
|
||||
)
|
||||
if m:
|
||||
from colorsys import hls_to_rgb
|
||||
|
||||
rgb = hls_to_rgb(
|
||||
float(m.group(1)) / 360.0,
|
||||
float(m.group(3)) / 100.0,
|
||||
float(m.group(2)) / 100.0,
|
||||
)
|
||||
return (
|
||||
int(rgb[0] * 255 + 0.5),
|
||||
int(rgb[1] * 255 + 0.5),
|
||||
int(rgb[2] * 255 + 0.5),
|
||||
)
|
||||
|
||||
m = re.match(
|
||||
r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color
|
||||
)
|
||||
if m:
|
||||
from colorsys import hsv_to_rgb
|
||||
|
||||
rgb = hsv_to_rgb(
|
||||
float(m.group(1)) / 360.0,
|
||||
float(m.group(2)) / 100.0,
|
||||
float(m.group(3)) / 100.0,
|
||||
)
|
||||
return (
|
||||
int(rgb[0] * 255 + 0.5),
|
||||
int(rgb[1] * 255 + 0.5),
|
||||
int(rgb[2] * 255 + 0.5),
|
||||
)
|
||||
|
||||
m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
|
||||
if m:
|
||||
return int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
|
||||
msg = f"unknown color specifier: {repr(color)}"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
@lru_cache
|
||||
def getcolor(color, mode):
|
||||
"""
|
||||
Same as :py:func:`~PIL.ImageColor.getrgb` for most modes. However, if
|
||||
``mode`` is HSV, converts the RGB value to a HSV value, or if ``mode`` is
|
||||
not color or a palette image, converts the RGB value to a grayscale value.
|
||||
If the string cannot be parsed, this function raises a :py:exc:`ValueError`
|
||||
exception.
|
||||
|
||||
.. versionadded:: 1.1.4
|
||||
|
||||
:param color: A color string
|
||||
:param mode: Convert result to this mode
|
||||
:return: ``(graylevel[, alpha]) or (red, green, blue[, alpha])``
|
||||
"""
|
||||
# same as getrgb, but converts the result to the given mode
|
||||
color, alpha = getrgb(color), 255
|
||||
if len(color) == 4:
|
||||
color, alpha = color[:3], color[3]
|
||||
|
||||
if mode == "HSV":
|
||||
from colorsys import rgb_to_hsv
|
||||
|
||||
r, g, b = color
|
||||
h, s, v = rgb_to_hsv(r / 255, g / 255, b / 255)
|
||||
return int(h * 255), int(s * 255), int(v * 255)
|
||||
elif Image.getmodebase(mode) == "L":
|
||||
r, g, b = color
|
||||
# ITU-R Recommendation 601-2 for nonlinear RGB
|
||||
# scaled to 24 bits to match the convert's implementation.
|
||||
color = (r * 19595 + g * 38470 + b * 7471 + 0x8000) >> 16
|
||||
if mode[-1] == "A":
|
||||
return color, alpha
|
||||
else:
|
||||
if mode[-1] == "A":
|
||||
return color + (alpha,)
|
||||
return color
|
||||
|
||||
|
||||
colormap = {
|
||||
# X11 colour table from https://drafts.csswg.org/css-color-4/, with
|
||||
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
|
||||
# colour names used in CSS 1.
|
||||
"aliceblue": "#f0f8ff",
|
||||
"antiquewhite": "#faebd7",
|
||||
"aqua": "#00ffff",
|
||||
"aquamarine": "#7fffd4",
|
||||
"azure": "#f0ffff",
|
||||
"beige": "#f5f5dc",
|
||||
"bisque": "#ffe4c4",
|
||||
"black": "#000000",
|
||||
"blanchedalmond": "#ffebcd",
|
||||
"blue": "#0000ff",
|
||||
"blueviolet": "#8a2be2",
|
||||
"brown": "#a52a2a",
|
||||
"burlywood": "#deb887",
|
||||
"cadetblue": "#5f9ea0",
|
||||
"chartreuse": "#7fff00",
|
||||
"chocolate": "#d2691e",
|
||||
"coral": "#ff7f50",
|
||||
"cornflowerblue": "#6495ed",
|
||||
"cornsilk": "#fff8dc",
|
||||
"crimson": "#dc143c",
|
||||
"cyan": "#00ffff",
|
||||
"darkblue": "#00008b",
|
||||
"darkcyan": "#008b8b",
|
||||
"darkgoldenrod": "#b8860b",
|
||||
"darkgray": "#a9a9a9",
|
||||
"darkgrey": "#a9a9a9",
|
||||
"darkgreen": "#006400",
|
||||
"darkkhaki": "#bdb76b",
|
||||
"darkmagenta": "#8b008b",
|
||||
"darkolivegreen": "#556b2f",
|
||||
"darkorange": "#ff8c00",
|
||||
"darkorchid": "#9932cc",
|
||||
"darkred": "#8b0000",
|
||||
"darksalmon": "#e9967a",
|
||||
"darkseagreen": "#8fbc8f",
|
||||
"darkslateblue": "#483d8b",
|
||||
"darkslategray": "#2f4f4f",
|
||||
"darkslategrey": "#2f4f4f",
|
||||
"darkturquoise": "#00ced1",
|
||||
"darkviolet": "#9400d3",
|
||||
"deeppink": "#ff1493",
|
||||
"deepskyblue": "#00bfff",
|
||||
"dimgray": "#696969",
|
||||
"dimgrey": "#696969",
|
||||
"dodgerblue": "#1e90ff",
|
||||
"firebrick": "#b22222",
|
||||
"floralwhite": "#fffaf0",
|
||||
"forestgreen": "#228b22",
|
||||
"fuchsia": "#ff00ff",
|
||||
"gainsboro": "#dcdcdc",
|
||||
"ghostwhite": "#f8f8ff",
|
||||
"gold": "#ffd700",
|
||||
"goldenrod": "#daa520",
|
||||
"gray": "#808080",
|
||||
"grey": "#808080",
|
||||
"green": "#008000",
|
||||
"greenyellow": "#adff2f",
|
||||
"honeydew": "#f0fff0",
|
||||
"hotpink": "#ff69b4",
|
||||
"indianred": "#cd5c5c",
|
||||
"indigo": "#4b0082",
|
||||
"ivory": "#fffff0",
|
||||
"khaki": "#f0e68c",
|
||||
"lavender": "#e6e6fa",
|
||||
"lavenderblush": "#fff0f5",
|
||||
"lawngreen": "#7cfc00",
|
||||
"lemonchiffon": "#fffacd",
|
||||
"lightblue": "#add8e6",
|
||||
"lightcoral": "#f08080",
|
||||
"lightcyan": "#e0ffff",
|
||||
"lightgoldenrodyellow": "#fafad2",
|
||||
"lightgreen": "#90ee90",
|
||||
"lightgray": "#d3d3d3",
|
||||
"lightgrey": "#d3d3d3",
|
||||
"lightpink": "#ffb6c1",
|
||||
"lightsalmon": "#ffa07a",
|
||||
"lightseagreen": "#20b2aa",
|
||||
"lightskyblue": "#87cefa",
|
||||
"lightslategray": "#778899",
|
||||
"lightslategrey": "#778899",
|
||||
"lightsteelblue": "#b0c4de",
|
||||
"lightyellow": "#ffffe0",
|
||||
"lime": "#00ff00",
|
||||
"limegreen": "#32cd32",
|
||||
"linen": "#faf0e6",
|
||||
"magenta": "#ff00ff",
|
||||
"maroon": "#800000",
|
||||
"mediumaquamarine": "#66cdaa",
|
||||
"mediumblue": "#0000cd",
|
||||
"mediumorchid": "#ba55d3",
|
||||
"mediumpurple": "#9370db",
|
||||
"mediumseagreen": "#3cb371",
|
||||
"mediumslateblue": "#7b68ee",
|
||||
"mediumspringgreen": "#00fa9a",
|
||||
"mediumturquoise": "#48d1cc",
|
||||
"mediumvioletred": "#c71585",
|
||||
"midnightblue": "#191970",
|
||||
"mintcream": "#f5fffa",
|
||||
"mistyrose": "#ffe4e1",
|
||||
"moccasin": "#ffe4b5",
|
||||
"navajowhite": "#ffdead",
|
||||
"navy": "#000080",
|
||||
"oldlace": "#fdf5e6",
|
||||
"olive": "#808000",
|
||||
"olivedrab": "#6b8e23",
|
||||
"orange": "#ffa500",
|
||||
"orangered": "#ff4500",
|
||||
"orchid": "#da70d6",
|
||||
"palegoldenrod": "#eee8aa",
|
||||
"palegreen": "#98fb98",
|
||||
"paleturquoise": "#afeeee",
|
||||
"palevioletred": "#db7093",
|
||||
"papayawhip": "#ffefd5",
|
||||
"peachpuff": "#ffdab9",
|
||||
"peru": "#cd853f",
|
||||
"pink": "#ffc0cb",
|
||||
"plum": "#dda0dd",
|
||||
"powderblue": "#b0e0e6",
|
||||
"purple": "#800080",
|
||||
"rebeccapurple": "#663399",
|
||||
"red": "#ff0000",
|
||||
"rosybrown": "#bc8f8f",
|
||||
"royalblue": "#4169e1",
|
||||
"saddlebrown": "#8b4513",
|
||||
"salmon": "#fa8072",
|
||||
"sandybrown": "#f4a460",
|
||||
"seagreen": "#2e8b57",
|
||||
"seashell": "#fff5ee",
|
||||
"sienna": "#a0522d",
|
||||
"silver": "#c0c0c0",
|
||||
"skyblue": "#87ceeb",
|
||||
"slateblue": "#6a5acd",
|
||||
"slategray": "#708090",
|
||||
"slategrey": "#708090",
|
||||
"snow": "#fffafa",
|
||||
"springgreen": "#00ff7f",
|
||||
"steelblue": "#4682b4",
|
||||
"tan": "#d2b48c",
|
||||
"teal": "#008080",
|
||||
"thistle": "#d8bfd8",
|
||||
"tomato": "#ff6347",
|
||||
"turquoise": "#40e0d0",
|
||||
"violet": "#ee82ee",
|
||||
"wheat": "#f5deb3",
|
||||
"white": "#ffffff",
|
||||
"whitesmoke": "#f5f5f5",
|
||||
"yellow": "#ffff00",
|
||||
"yellowgreen": "#9acd32",
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,193 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# WCK-style drawing interface operations
|
||||
#
|
||||
# History:
|
||||
# 2003-12-07 fl created
|
||||
# 2005-05-15 fl updated; added to PIL as ImageDraw2
|
||||
# 2005-05-15 fl added text support
|
||||
# 2005-05-20 fl added arc/chord/pieslice support
|
||||
#
|
||||
# Copyright (c) 2003-2005 by Secret Labs AB
|
||||
# Copyright (c) 2003-2005 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
"""
|
||||
(Experimental) WCK-style drawing interface operations
|
||||
|
||||
.. seealso:: :py:mod:`PIL.ImageDraw`
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageColor, ImageDraw, ImageFont, ImagePath
|
||||
|
||||
|
||||
class Pen:
|
||||
"""Stores an outline color and width."""
|
||||
|
||||
def __init__(self, color, width=1, opacity=255):
|
||||
self.color = ImageColor.getrgb(color)
|
||||
self.width = width
|
||||
|
||||
|
||||
class Brush:
|
||||
"""Stores a fill color"""
|
||||
|
||||
def __init__(self, color, opacity=255):
|
||||
self.color = ImageColor.getrgb(color)
|
||||
|
||||
|
||||
class Font:
|
||||
"""Stores a TrueType font and color"""
|
||||
|
||||
def __init__(self, color, file, size=12):
|
||||
# FIXME: add support for bitmap fonts
|
||||
self.color = ImageColor.getrgb(color)
|
||||
self.font = ImageFont.truetype(file, size)
|
||||
|
||||
|
||||
class Draw:
|
||||
"""
|
||||
(Experimental) WCK-style drawing interface
|
||||
"""
|
||||
|
||||
def __init__(self, image, size=None, color=None):
|
||||
if not hasattr(image, "im"):
|
||||
image = Image.new(image, size, color)
|
||||
self.draw = ImageDraw.Draw(image)
|
||||
self.image = image
|
||||
self.transform = None
|
||||
|
||||
def flush(self):
|
||||
return self.image
|
||||
|
||||
def render(self, op, xy, pen, brush=None):
|
||||
# handle color arguments
|
||||
outline = fill = None
|
||||
width = 1
|
||||
if isinstance(pen, Pen):
|
||||
outline = pen.color
|
||||
width = pen.width
|
||||
elif isinstance(brush, Pen):
|
||||
outline = brush.color
|
||||
width = brush.width
|
||||
if isinstance(brush, Brush):
|
||||
fill = brush.color
|
||||
elif isinstance(pen, Brush):
|
||||
fill = pen.color
|
||||
# handle transformation
|
||||
if self.transform:
|
||||
xy = ImagePath.Path(xy)
|
||||
xy.transform(self.transform)
|
||||
# render the item
|
||||
if op == "line":
|
||||
self.draw.line(xy, fill=outline, width=width)
|
||||
else:
|
||||
getattr(self.draw, op)(xy, fill=fill, outline=outline)
|
||||
|
||||
def settransform(self, offset):
|
||||
"""Sets a transformation offset."""
|
||||
(xoffset, yoffset) = offset
|
||||
self.transform = (1, 0, xoffset, 0, 1, yoffset)
|
||||
|
||||
def arc(self, xy, start, end, *options):
|
||||
"""
|
||||
Draws an arc (a portion of a circle outline) between the start and end
|
||||
angles, inside the given bounding box.
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.arc`
|
||||
"""
|
||||
self.render("arc", xy, start, end, *options)
|
||||
|
||||
def chord(self, xy, start, end, *options):
|
||||
"""
|
||||
Same as :py:meth:`~PIL.ImageDraw2.Draw.arc`, but connects the end points
|
||||
with a straight line.
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.chord`
|
||||
"""
|
||||
self.render("chord", xy, start, end, *options)
|
||||
|
||||
def ellipse(self, xy, *options):
|
||||
"""
|
||||
Draws an ellipse inside the given bounding box.
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.ellipse`
|
||||
"""
|
||||
self.render("ellipse", xy, *options)
|
||||
|
||||
def line(self, xy, *options):
|
||||
"""
|
||||
Draws a line between the coordinates in the ``xy`` list.
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.line`
|
||||
"""
|
||||
self.render("line", xy, *options)
|
||||
|
||||
def pieslice(self, xy, start, end, *options):
|
||||
"""
|
||||
Same as arc, but also draws straight lines between the end points and the
|
||||
center of the bounding box.
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.pieslice`
|
||||
"""
|
||||
self.render("pieslice", xy, start, end, *options)
|
||||
|
||||
def polygon(self, xy, *options):
|
||||
"""
|
||||
Draws a polygon.
|
||||
|
||||
The polygon outline consists of straight lines between the given
|
||||
coordinates, plus a straight line between the last and the first
|
||||
coordinate.
|
||||
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.polygon`
|
||||
"""
|
||||
self.render("polygon", xy, *options)
|
||||
|
||||
def rectangle(self, xy, *options):
|
||||
"""
|
||||
Draws a rectangle.
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.rectangle`
|
||||
"""
|
||||
self.render("rectangle", xy, *options)
|
||||
|
||||
def text(self, xy, text, font):
|
||||
"""
|
||||
Draws the string at the given position.
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.text`
|
||||
"""
|
||||
if self.transform:
|
||||
xy = ImagePath.Path(xy)
|
||||
xy.transform(self.transform)
|
||||
self.draw.text(xy, text, font=font.font, fill=font.color)
|
||||
|
||||
def textbbox(self, xy, text, font):
|
||||
"""
|
||||
Returns bounding box (in pixels) of given text.
|
||||
|
||||
:return: ``(left, top, right, bottom)`` bounding box
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textbbox`
|
||||
"""
|
||||
if self.transform:
|
||||
xy = ImagePath.Path(xy)
|
||||
xy.transform(self.transform)
|
||||
return self.draw.textbbox(xy, text, font=font.font)
|
||||
|
||||
def textlength(self, text, font):
|
||||
"""
|
||||
Returns length (in pixels) of given text.
|
||||
This is the amount by which following text should be offset.
|
||||
|
||||
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textlength`
|
||||
"""
|
||||
return self.draw.textlength(text, font=font.font)
|
@ -1,104 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# image enhancement classes
|
||||
#
|
||||
# For a background, see "Image Processing By Interpolation and
|
||||
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
|
||||
# at http://www.graficaobscura.com/interp/index.html
|
||||
#
|
||||
# History:
|
||||
# 1996-03-23 fl Created
|
||||
# 2009-06-16 fl Fixed mean calculation
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFilter, ImageStat
|
||||
|
||||
|
||||
class _Enhance:
|
||||
def enhance(self, factor):
|
||||
"""
|
||||
Returns an enhanced image.
|
||||
|
||||
:param factor: A floating point value controlling the enhancement.
|
||||
Factor 1.0 always returns a copy of the original image,
|
||||
lower factors mean less color (brightness, contrast,
|
||||
etc), and higher values more. There are no restrictions
|
||||
on this value.
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
return Image.blend(self.degenerate, self.image, factor)
|
||||
|
||||
|
||||
class Color(_Enhance):
|
||||
"""Adjust image color balance.
|
||||
|
||||
This class can be used to adjust the colour balance of an image, in
|
||||
a manner similar to the controls on a colour TV set. An enhancement
|
||||
factor of 0.0 gives a black and white image. A factor of 1.0 gives
|
||||
the original image.
|
||||
"""
|
||||
|
||||
def __init__(self, image):
|
||||
self.image = image
|
||||
self.intermediate_mode = "L"
|
||||
if "A" in image.getbands():
|
||||
self.intermediate_mode = "LA"
|
||||
|
||||
self.degenerate = image.convert(self.intermediate_mode).convert(image.mode)
|
||||
|
||||
|
||||
class Contrast(_Enhance):
|
||||
"""Adjust image contrast.
|
||||
|
||||
This class can be used to control the contrast of an image, similar
|
||||
to the contrast control on a TV set. An enhancement factor of 0.0
|
||||
gives a solid gray image. A factor of 1.0 gives the original image.
|
||||
"""
|
||||
|
||||
def __init__(self, image):
|
||||
self.image = image
|
||||
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
|
||||
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
|
||||
|
||||
if "A" in image.getbands():
|
||||
self.degenerate.putalpha(image.getchannel("A"))
|
||||
|
||||
|
||||
class Brightness(_Enhance):
|
||||
"""Adjust image brightness.
|
||||
|
||||
This class can be used to control the brightness of an image. An
|
||||
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
|
||||
original image.
|
||||
"""
|
||||
|
||||
def __init__(self, image):
|
||||
self.image = image
|
||||
self.degenerate = Image.new(image.mode, image.size, 0)
|
||||
|
||||
if "A" in image.getbands():
|
||||
self.degenerate.putalpha(image.getchannel("A"))
|
||||
|
||||
|
||||
class Sharpness(_Enhance):
|
||||
"""Adjust image sharpness.
|
||||
|
||||
This class can be used to adjust the sharpness of an image. An
|
||||
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
|
||||
original image, and a factor of 2.0 gives a sharpened image.
|
||||
"""
|
||||
|
||||
def __init__(self, image):
|
||||
self.image = image
|
||||
self.degenerate = image.filter(ImageFilter.SMOOTH)
|
||||
|
||||
if "A" in image.getbands():
|
||||
self.degenerate.putalpha(image.getchannel("A"))
|
@ -1,795 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# base class for image file handlers
|
||||
#
|
||||
# history:
|
||||
# 1995-09-09 fl Created
|
||||
# 1996-03-11 fl Fixed load mechanism.
|
||||
# 1996-04-15 fl Added pcx/xbm decoders.
|
||||
# 1996-04-30 fl Added encoders.
|
||||
# 1996-12-14 fl Added load helpers
|
||||
# 1997-01-11 fl Use encode_to_file where possible
|
||||
# 1997-08-27 fl Flush output in _save
|
||||
# 1998-03-05 fl Use memory mapping for some modes
|
||||
# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B"
|
||||
# 1999-05-31 fl Added image parser
|
||||
# 2000-10-12 fl Set readonly flag on memory-mapped images
|
||||
# 2002-03-20 fl Use better messages for common decoder errors
|
||||
# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available
|
||||
# 2003-10-30 fl Added StubImageFile class
|
||||
# 2004-02-25 fl Made incremental parser more robust
|
||||
#
|
||||
# Copyright (c) 1997-2004 by Secret Labs AB
|
||||
# Copyright (c) 1995-2004 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import itertools
|
||||
import struct
|
||||
import sys
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
from . import Image
|
||||
from ._deprecate import deprecate
|
||||
from ._util import is_path
|
||||
|
||||
MAXBLOCK = 65536
|
||||
|
||||
SAFEBLOCK = 1024 * 1024
|
||||
|
||||
LOAD_TRUNCATED_IMAGES = False
|
||||
"""Whether or not to load truncated image files. User code may change this."""
|
||||
|
||||
ERRORS = {
|
||||
-1: "image buffer overrun error",
|
||||
-2: "decoding error",
|
||||
-3: "unknown error",
|
||||
-8: "bad configuration",
|
||||
-9: "out of memory error",
|
||||
}
|
||||
"""
|
||||
Dict of known error codes returned from :meth:`.PyDecoder.decode`,
|
||||
:meth:`.PyEncoder.encode` :meth:`.PyEncoder.encode_to_pyfd` and
|
||||
:meth:`.PyEncoder.encode_to_file`.
|
||||
"""
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Helpers
|
||||
|
||||
|
||||
def _get_oserror(error, *, encoder):
|
||||
try:
|
||||
msg = Image.core.getcodecstatus(error)
|
||||
except AttributeError:
|
||||
msg = ERRORS.get(error)
|
||||
if not msg:
|
||||
msg = f"{'encoder' if encoder else 'decoder'} error {error}"
|
||||
msg += f" when {'writing' if encoder else 'reading'} image file"
|
||||
return OSError(msg)
|
||||
|
||||
|
||||
def raise_oserror(error):
|
||||
deprecate(
|
||||
"raise_oserror",
|
||||
12,
|
||||
action="It is only useful for translating error codes returned by a codec's "
|
||||
"decode() method, which ImageFile already does automatically.",
|
||||
)
|
||||
raise _get_oserror(error, encoder=False)
|
||||
|
||||
|
||||
def _tilesort(t):
|
||||
# sort on offset
|
||||
return t[2]
|
||||
|
||||
|
||||
class _Tile(NamedTuple):
|
||||
encoder_name: str
|
||||
extents: tuple[int, int, int, int]
|
||||
offset: int
|
||||
args: tuple[Any, ...] | str | None
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# ImageFile base class
|
||||
|
||||
|
||||
class ImageFile(Image.Image):
|
||||
"""Base class for image file format handlers."""
|
||||
|
||||
def __init__(self, fp=None, filename=None):
|
||||
super().__init__()
|
||||
|
||||
self._min_frame = 0
|
||||
|
||||
self.custom_mimetype = None
|
||||
|
||||
self.tile = None
|
||||
""" A list of tile descriptors, or ``None`` """
|
||||
|
||||
self.readonly = 1 # until we know better
|
||||
|
||||
self.decoderconfig = ()
|
||||
self.decodermaxblock = MAXBLOCK
|
||||
|
||||
if is_path(fp):
|
||||
# filename
|
||||
self.fp = open(fp, "rb")
|
||||
self.filename = fp
|
||||
self._exclusive_fp = True
|
||||
else:
|
||||
# stream
|
||||
self.fp = fp
|
||||
self.filename = filename
|
||||
# can be overridden
|
||||
self._exclusive_fp = None
|
||||
|
||||
try:
|
||||
try:
|
||||
self._open()
|
||||
except (
|
||||
IndexError, # end of data
|
||||
TypeError, # end of data (ord)
|
||||
KeyError, # unsupported mode
|
||||
EOFError, # got header but not the first frame
|
||||
struct.error,
|
||||
) as v:
|
||||
raise SyntaxError(v) from v
|
||||
|
||||
if not self.mode or self.size[0] <= 0 or self.size[1] <= 0:
|
||||
msg = "not identified by this driver"
|
||||
raise SyntaxError(msg)
|
||||
except BaseException:
|
||||
# close the file only if we have opened it this constructor
|
||||
if self._exclusive_fp:
|
||||
self.fp.close()
|
||||
raise
|
||||
|
||||
def get_format_mimetype(self):
|
||||
if self.custom_mimetype:
|
||||
return self.custom_mimetype
|
||||
if self.format is not None:
|
||||
return Image.MIME.get(self.format.upper())
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.tile = []
|
||||
super().__setstate__(state)
|
||||
|
||||
def verify(self):
|
||||
"""Check file integrity"""
|
||||
|
||||
# raise exception if something's wrong. must be called
|
||||
# directly after open, and closes file when finished.
|
||||
if self._exclusive_fp:
|
||||
self.fp.close()
|
||||
self.fp = None
|
||||
|
||||
def load(self):
|
||||
"""Load image data based on tile list"""
|
||||
|
||||
if self.tile is None:
|
||||
msg = "cannot load this image"
|
||||
raise OSError(msg)
|
||||
|
||||
pixel = Image.Image.load(self)
|
||||
if not self.tile:
|
||||
return pixel
|
||||
|
||||
self.map = None
|
||||
use_mmap = self.filename and len(self.tile) == 1
|
||||
# As of pypy 2.1.0, memory mapping was failing here.
|
||||
use_mmap = use_mmap and not hasattr(sys, "pypy_version_info")
|
||||
|
||||
readonly = 0
|
||||
|
||||
# look for read/seek overrides
|
||||
try:
|
||||
read = self.load_read
|
||||
# don't use mmap if there are custom read/seek functions
|
||||
use_mmap = False
|
||||
except AttributeError:
|
||||
read = self.fp.read
|
||||
|
||||
try:
|
||||
seek = self.load_seek
|
||||
use_mmap = False
|
||||
except AttributeError:
|
||||
seek = self.fp.seek
|
||||
|
||||
if use_mmap:
|
||||
# try memory mapping
|
||||
decoder_name, extents, offset, args = self.tile[0]
|
||||
if isinstance(args, str):
|
||||
args = (args, 0, 1)
|
||||
if (
|
||||
decoder_name == "raw"
|
||||
and len(args) >= 3
|
||||
and args[0] == self.mode
|
||||
and args[0] in Image._MAPMODES
|
||||
):
|
||||
try:
|
||||
# use mmap, if possible
|
||||
import mmap
|
||||
|
||||
with open(self.filename) as fp:
|
||||
self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)
|
||||
if offset + self.size[1] * args[1] > self.map.size():
|
||||
msg = "buffer is not large enough"
|
||||
raise OSError(msg)
|
||||
self.im = Image.core.map_buffer(
|
||||
self.map, self.size, decoder_name, offset, args
|
||||
)
|
||||
readonly = 1
|
||||
# After trashing self.im,
|
||||
# we might need to reload the palette data.
|
||||
if self.palette:
|
||||
self.palette.dirty = 1
|
||||
except (AttributeError, OSError, ImportError):
|
||||
self.map = None
|
||||
|
||||
self.load_prepare()
|
||||
err_code = -3 # initialize to unknown error
|
||||
if not self.map:
|
||||
# sort tiles in file order
|
||||
self.tile.sort(key=_tilesort)
|
||||
|
||||
try:
|
||||
# FIXME: This is a hack to handle TIFF's JpegTables tag.
|
||||
prefix = self.tile_prefix
|
||||
except AttributeError:
|
||||
prefix = b""
|
||||
|
||||
# Remove consecutive duplicates that only differ by their offset
|
||||
self.tile = [
|
||||
list(tiles)[-1]
|
||||
for _, tiles in itertools.groupby(
|
||||
self.tile, lambda tile: (tile[0], tile[1], tile[3])
|
||||
)
|
||||
]
|
||||
for decoder_name, extents, offset, args in self.tile:
|
||||
seek(offset)
|
||||
decoder = Image._getdecoder(
|
||||
self.mode, decoder_name, args, self.decoderconfig
|
||||
)
|
||||
try:
|
||||
decoder.setimage(self.im, extents)
|
||||
if decoder.pulls_fd:
|
||||
decoder.setfd(self.fp)
|
||||
err_code = decoder.decode(b"")[1]
|
||||
else:
|
||||
b = prefix
|
||||
while True:
|
||||
try:
|
||||
s = read(self.decodermaxblock)
|
||||
except (IndexError, struct.error) as e:
|
||||
# truncated png/gif
|
||||
if LOAD_TRUNCATED_IMAGES:
|
||||
break
|
||||
else:
|
||||
msg = "image file is truncated"
|
||||
raise OSError(msg) from e
|
||||
|
||||
if not s: # truncated jpeg
|
||||
if LOAD_TRUNCATED_IMAGES:
|
||||
break
|
||||
else:
|
||||
msg = (
|
||||
"image file is truncated "
|
||||
f"({len(b)} bytes not processed)"
|
||||
)
|
||||
raise OSError(msg)
|
||||
|
||||
b = b + s
|
||||
n, err_code = decoder.decode(b)
|
||||
if n < 0:
|
||||
break
|
||||
b = b[n:]
|
||||
finally:
|
||||
# Need to cleanup here to prevent leaks
|
||||
decoder.cleanup()
|
||||
|
||||
self.tile = []
|
||||
self.readonly = readonly
|
||||
|
||||
self.load_end()
|
||||
|
||||
if self._exclusive_fp and self._close_exclusive_fp_after_loading:
|
||||
self.fp.close()
|
||||
self.fp = None
|
||||
|
||||
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
|
||||
# still raised if decoder fails to return anything
|
||||
raise _get_oserror(err_code, encoder=False)
|
||||
|
||||
return Image.Image.load(self)
|
||||
|
||||
def load_prepare(self):
|
||||
# create image memory if necessary
|
||||
if not self.im or self.im.mode != self.mode or self.im.size != self.size:
|
||||
self.im = Image.core.new(self.mode, self.size)
|
||||
# create palette (optional)
|
||||
if self.mode == "P":
|
||||
Image.Image.load(self)
|
||||
|
||||
def load_end(self):
|
||||
# may be overridden
|
||||
pass
|
||||
|
||||
# may be defined for contained formats
|
||||
# def load_seek(self, pos):
|
||||
# pass
|
||||
|
||||
# may be defined for blocked formats (e.g. PNG)
|
||||
# def load_read(self, bytes):
|
||||
# pass
|
||||
|
||||
def _seek_check(self, frame):
|
||||
if (
|
||||
frame < self._min_frame
|
||||
# Only check upper limit on frames if additional seek operations
|
||||
# are not required to do so
|
||||
or (
|
||||
not (hasattr(self, "_n_frames") and self._n_frames is None)
|
||||
and frame >= self.n_frames + self._min_frame
|
||||
)
|
||||
):
|
||||
msg = "attempt to seek outside sequence"
|
||||
raise EOFError(msg)
|
||||
|
||||
return self.tell() != frame
|
||||
|
||||
|
||||
class StubImageFile(ImageFile):
|
||||
"""
|
||||
Base class for stub image loaders.
|
||||
|
||||
A stub loader is an image loader that can identify files of a
|
||||
certain format, but relies on external code to load the file.
|
||||
"""
|
||||
|
||||
def _open(self):
|
||||
msg = "StubImageFile subclass must implement _open"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def load(self):
|
||||
loader = self._load()
|
||||
if loader is None:
|
||||
msg = f"cannot find loader for this {self.format} file"
|
||||
raise OSError(msg)
|
||||
image = loader.load(self)
|
||||
assert image is not None
|
||||
# become the other object (!)
|
||||
self.__class__ = image.__class__
|
||||
self.__dict__ = image.__dict__
|
||||
return image.load()
|
||||
|
||||
def _load(self):
|
||||
"""(Hook) Find actual image loader."""
|
||||
msg = "StubImageFile subclass must implement _load"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
class Parser:
|
||||
"""
|
||||
Incremental image parser. This class implements the standard
|
||||
feed/close consumer interface.
|
||||
"""
|
||||
|
||||
incremental = None
|
||||
image = None
|
||||
data = None
|
||||
decoder = None
|
||||
offset = 0
|
||||
finished = 0
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
(Consumer) Reset the parser. Note that you can only call this
|
||||
method immediately after you've created a parser; parser
|
||||
instances cannot be reused.
|
||||
"""
|
||||
assert self.data is None, "cannot reuse parsers"
|
||||
|
||||
def feed(self, data):
|
||||
"""
|
||||
(Consumer) Feed data to the parser.
|
||||
|
||||
:param data: A string buffer.
|
||||
:exception OSError: If the parser failed to parse the image file.
|
||||
"""
|
||||
# collect data
|
||||
|
||||
if self.finished:
|
||||
return
|
||||
|
||||
if self.data is None:
|
||||
self.data = data
|
||||
else:
|
||||
self.data = self.data + data
|
||||
|
||||
# parse what we have
|
||||
if self.decoder:
|
||||
if self.offset > 0:
|
||||
# skip header
|
||||
skip = min(len(self.data), self.offset)
|
||||
self.data = self.data[skip:]
|
||||
self.offset = self.offset - skip
|
||||
if self.offset > 0 or not self.data:
|
||||
return
|
||||
|
||||
n, e = self.decoder.decode(self.data)
|
||||
|
||||
if n < 0:
|
||||
# end of stream
|
||||
self.data = None
|
||||
self.finished = 1
|
||||
if e < 0:
|
||||
# decoding error
|
||||
self.image = None
|
||||
raise _get_oserror(e, encoder=False)
|
||||
else:
|
||||
# end of image
|
||||
return
|
||||
self.data = self.data[n:]
|
||||
|
||||
elif self.image:
|
||||
# if we end up here with no decoder, this file cannot
|
||||
# be incrementally parsed. wait until we've gotten all
|
||||
# available data
|
||||
pass
|
||||
|
||||
else:
|
||||
# attempt to open this file
|
||||
try:
|
||||
with io.BytesIO(self.data) as fp:
|
||||
im = Image.open(fp)
|
||||
except OSError:
|
||||
pass # not enough data
|
||||
else:
|
||||
flag = hasattr(im, "load_seek") or hasattr(im, "load_read")
|
||||
if flag or len(im.tile) != 1:
|
||||
# custom load code, or multiple tiles
|
||||
self.decode = None
|
||||
else:
|
||||
# initialize decoder
|
||||
im.load_prepare()
|
||||
d, e, o, a = im.tile[0]
|
||||
im.tile = []
|
||||
self.decoder = Image._getdecoder(im.mode, d, a, im.decoderconfig)
|
||||
self.decoder.setimage(im.im, e)
|
||||
|
||||
# calculate decoder offset
|
||||
self.offset = o
|
||||
if self.offset <= len(self.data):
|
||||
self.data = self.data[self.offset :]
|
||||
self.offset = 0
|
||||
|
||||
self.image = im
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
(Consumer) Close the stream.
|
||||
|
||||
:returns: An image object.
|
||||
:exception OSError: If the parser failed to parse the image file either
|
||||
because it cannot be identified or cannot be
|
||||
decoded.
|
||||
"""
|
||||
# finish decoding
|
||||
if self.decoder:
|
||||
# get rid of what's left in the buffers
|
||||
self.feed(b"")
|
||||
self.data = self.decoder = None
|
||||
if not self.finished:
|
||||
msg = "image was incomplete"
|
||||
raise OSError(msg)
|
||||
if not self.image:
|
||||
msg = "cannot parse this image"
|
||||
raise OSError(msg)
|
||||
if self.data:
|
||||
# incremental parsing not possible; reopen the file
|
||||
# not that we have all data
|
||||
with io.BytesIO(self.data) as fp:
|
||||
try:
|
||||
self.image = Image.open(fp)
|
||||
finally:
|
||||
self.image.load()
|
||||
return self.image
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _save(im, fp, tile, bufsize=0):
|
||||
"""Helper to save image based on tile list
|
||||
|
||||
:param im: Image object.
|
||||
:param fp: File object.
|
||||
:param tile: Tile list.
|
||||
:param bufsize: Optional buffer size
|
||||
"""
|
||||
|
||||
im.load()
|
||||
if not hasattr(im, "encoderconfig"):
|
||||
im.encoderconfig = ()
|
||||
tile.sort(key=_tilesort)
|
||||
# FIXME: make MAXBLOCK a configuration parameter
|
||||
# It would be great if we could have the encoder specify what it needs
|
||||
# But, it would need at least the image size in most cases. RawEncode is
|
||||
# a tricky case.
|
||||
bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c
|
||||
try:
|
||||
fh = fp.fileno()
|
||||
fp.flush()
|
||||
_encode_tile(im, fp, tile, bufsize, fh)
|
||||
except (AttributeError, io.UnsupportedOperation) as exc:
|
||||
_encode_tile(im, fp, tile, bufsize, None, exc)
|
||||
if hasattr(fp, "flush"):
|
||||
fp.flush()
|
||||
|
||||
|
||||
def _encode_tile(im, fp, tile: list[_Tile], bufsize, fh, exc=None):
|
||||
for encoder_name, extents, offset, args in tile:
|
||||
if offset > 0:
|
||||
fp.seek(offset)
|
||||
encoder = Image._getencoder(im.mode, encoder_name, args, im.encoderconfig)
|
||||
try:
|
||||
encoder.setimage(im.im, extents)
|
||||
if encoder.pushes_fd:
|
||||
encoder.setfd(fp)
|
||||
errcode = encoder.encode_to_pyfd()[1]
|
||||
else:
|
||||
if exc:
|
||||
# compress to Python file-compatible object
|
||||
while True:
|
||||
errcode, data = encoder.encode(bufsize)[1:]
|
||||
fp.write(data)
|
||||
if errcode:
|
||||
break
|
||||
else:
|
||||
# slight speedup: compress to real file object
|
||||
errcode = encoder.encode_to_file(fh, bufsize)
|
||||
if errcode < 0:
|
||||
raise _get_oserror(errcode, encoder=True) from exc
|
||||
finally:
|
||||
encoder.cleanup()
|
||||
|
||||
|
||||
def _safe_read(fp, size):
|
||||
"""
|
||||
Reads large blocks in a safe way. Unlike fp.read(n), this function
|
||||
doesn't trust the user. If the requested size is larger than
|
||||
SAFEBLOCK, the file is read block by block.
|
||||
|
||||
:param fp: File handle. Must implement a <b>read</b> method.
|
||||
:param size: Number of bytes to read.
|
||||
:returns: A string containing <i>size</i> bytes of data.
|
||||
|
||||
Raises an OSError if the file is truncated and the read cannot be completed
|
||||
|
||||
"""
|
||||
if size <= 0:
|
||||
return b""
|
||||
if size <= SAFEBLOCK:
|
||||
data = fp.read(size)
|
||||
if len(data) < size:
|
||||
msg = "Truncated File Read"
|
||||
raise OSError(msg)
|
||||
return data
|
||||
data = []
|
||||
remaining_size = size
|
||||
while remaining_size > 0:
|
||||
block = fp.read(min(remaining_size, SAFEBLOCK))
|
||||
if not block:
|
||||
break
|
||||
data.append(block)
|
||||
remaining_size -= len(block)
|
||||
if sum(len(d) for d in data) < size:
|
||||
msg = "Truncated File Read"
|
||||
raise OSError(msg)
|
||||
return b"".join(data)
|
||||
|
||||
|
||||
class PyCodecState:
|
||||
def __init__(self):
|
||||
self.xsize = 0
|
||||
self.ysize = 0
|
||||
self.xoff = 0
|
||||
self.yoff = 0
|
||||
|
||||
def extents(self):
|
||||
return self.xoff, self.yoff, self.xoff + self.xsize, self.yoff + self.ysize
|
||||
|
||||
|
||||
class PyCodec:
|
||||
def __init__(self, mode, *args):
|
||||
self.im = None
|
||||
self.state = PyCodecState()
|
||||
self.fd = None
|
||||
self.mode = mode
|
||||
self.init(args)
|
||||
|
||||
def init(self, args):
|
||||
"""
|
||||
Override to perform codec specific initialization
|
||||
|
||||
:param args: Array of args items from the tile entry
|
||||
:returns: None
|
||||
"""
|
||||
self.args = args
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
Override to perform codec specific cleanup
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
pass
|
||||
|
||||
def setfd(self, fd):
|
||||
"""
|
||||
Called from ImageFile to set the Python file-like object
|
||||
|
||||
:param fd: A Python file-like object
|
||||
:returns: None
|
||||
"""
|
||||
self.fd = fd
|
||||
|
||||
def setimage(self, im, extents=None):
|
||||
"""
|
||||
Called from ImageFile to set the core output image for the codec
|
||||
|
||||
:param im: A core image object
|
||||
:param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle
|
||||
for this tile
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
# following c code
|
||||
self.im = im
|
||||
|
||||
if extents:
|
||||
(x0, y0, x1, y1) = extents
|
||||
else:
|
||||
(x0, y0, x1, y1) = (0, 0, 0, 0)
|
||||
|
||||
if x0 == 0 and x1 == 0:
|
||||
self.state.xsize, self.state.ysize = self.im.size
|
||||
else:
|
||||
self.state.xoff = x0
|
||||
self.state.yoff = y0
|
||||
self.state.xsize = x1 - x0
|
||||
self.state.ysize = y1 - y0
|
||||
|
||||
if self.state.xsize <= 0 or self.state.ysize <= 0:
|
||||
msg = "Size cannot be negative"
|
||||
raise ValueError(msg)
|
||||
|
||||
if (
|
||||
self.state.xsize + self.state.xoff > self.im.size[0]
|
||||
or self.state.ysize + self.state.yoff > self.im.size[1]
|
||||
):
|
||||
msg = "Tile cannot extend outside image"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
class PyDecoder(PyCodec):
|
||||
"""
|
||||
Python implementation of a format decoder. Override this class and
|
||||
add the decoding logic in the :meth:`decode` method.
|
||||
|
||||
See :ref:`Writing Your Own File Codec in Python<file-codecs-py>`
|
||||
"""
|
||||
|
||||
_pulls_fd = False
|
||||
|
||||
@property
|
||||
def pulls_fd(self):
|
||||
return self._pulls_fd
|
||||
|
||||
def decode(self, buffer):
|
||||
"""
|
||||
Override to perform the decoding process.
|
||||
|
||||
:param buffer: A bytes object with the data to be decoded.
|
||||
:returns: A tuple of ``(bytes consumed, errcode)``.
|
||||
If finished with decoding return -1 for the bytes consumed.
|
||||
Err codes are from :data:`.ImageFile.ERRORS`.
|
||||
"""
|
||||
msg = "unavailable in base decoder"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def set_as_raw(self, data, rawmode=None):
|
||||
"""
|
||||
Convenience method to set the internal image from a stream of raw data
|
||||
|
||||
:param data: Bytes to be set
|
||||
:param rawmode: The rawmode to be used for the decoder.
|
||||
If not specified, it will default to the mode of the image
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
if not rawmode:
|
||||
rawmode = self.mode
|
||||
d = Image._getdecoder(self.mode, "raw", rawmode)
|
||||
d.setimage(self.im, self.state.extents())
|
||||
s = d.decode(data)
|
||||
|
||||
if s[0] >= 0:
|
||||
msg = "not enough image data"
|
||||
raise ValueError(msg)
|
||||
if s[1] != 0:
|
||||
msg = "cannot decode image data"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
class PyEncoder(PyCodec):
|
||||
"""
|
||||
Python implementation of a format encoder. Override this class and
|
||||
add the decoding logic in the :meth:`encode` method.
|
||||
|
||||
See :ref:`Writing Your Own File Codec in Python<file-codecs-py>`
|
||||
"""
|
||||
|
||||
_pushes_fd = False
|
||||
|
||||
@property
|
||||
def pushes_fd(self):
|
||||
return self._pushes_fd
|
||||
|
||||
def encode(self, bufsize):
|
||||
"""
|
||||
Override to perform the encoding process.
|
||||
|
||||
:param bufsize: Buffer size.
|
||||
:returns: A tuple of ``(bytes encoded, errcode, bytes)``.
|
||||
If finished with encoding return 1 for the error code.
|
||||
Err codes are from :data:`.ImageFile.ERRORS`.
|
||||
"""
|
||||
msg = "unavailable in base encoder"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def encode_to_pyfd(self):
|
||||
"""
|
||||
If ``pushes_fd`` is ``True``, then this method will be used,
|
||||
and ``encode()`` will only be called once.
|
||||
|
||||
:returns: A tuple of ``(bytes consumed, errcode)``.
|
||||
Err codes are from :data:`.ImageFile.ERRORS`.
|
||||
"""
|
||||
if not self.pushes_fd:
|
||||
return 0, -8 # bad configuration
|
||||
bytes_consumed, errcode, data = self.encode(0)
|
||||
if data:
|
||||
self.fd.write(data)
|
||||
return bytes_consumed, errcode
|
||||
|
||||
def encode_to_file(self, fh, bufsize):
|
||||
"""
|
||||
:param fh: File handle.
|
||||
:param bufsize: Buffer size.
|
||||
|
||||
:returns: If finished successfully, return 0.
|
||||
Otherwise, return an error code. Err codes are from
|
||||
:data:`.ImageFile.ERRORS`.
|
||||
"""
|
||||
errcode = 0
|
||||
while errcode == 0:
|
||||
status, errcode, buf = self.encode(bufsize)
|
||||
if status > 0:
|
||||
fh.write(buf[status:])
|
||||
return errcode
|
@ -1,568 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# standard filters
|
||||
#
|
||||
# History:
|
||||
# 1995-11-27 fl Created
|
||||
# 2002-06-08 fl Added rank and mode filters
|
||||
# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-2002 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
|
||||
|
||||
class Filter:
|
||||
pass
|
||||
|
||||
|
||||
class MultibandFilter(Filter):
|
||||
pass
|
||||
|
||||
|
||||
class BuiltinFilter(MultibandFilter):
|
||||
def filter(self, image):
|
||||
if image.mode == "P":
|
||||
msg = "cannot filter palette images"
|
||||
raise ValueError(msg)
|
||||
return image.filter(*self.filterargs)
|
||||
|
||||
|
||||
class Kernel(BuiltinFilter):
|
||||
"""
|
||||
Create a convolution kernel. The current version only
|
||||
supports 3x3 and 5x5 integer and floating point kernels.
|
||||
|
||||
In the current version, kernels can only be applied to
|
||||
"L" and "RGB" images.
|
||||
|
||||
:param size: Kernel size, given as (width, height). In the current
|
||||
version, this must be (3,3) or (5,5).
|
||||
:param kernel: A sequence containing kernel weights. The kernel will
|
||||
be flipped vertically before being applied to the image.
|
||||
:param scale: Scale factor. If given, the result for each pixel is
|
||||
divided by this value. The default is the sum of the
|
||||
kernel weights.
|
||||
:param offset: Offset. If given, this value is added to the result,
|
||||
after it has been divided by the scale factor.
|
||||
"""
|
||||
|
||||
name = "Kernel"
|
||||
|
||||
def __init__(self, size, kernel, scale=None, offset=0):
|
||||
if scale is None:
|
||||
# default scale is sum of kernel
|
||||
scale = functools.reduce(lambda a, b: a + b, kernel)
|
||||
if size[0] * size[1] != len(kernel):
|
||||
msg = "not enough coefficients in kernel"
|
||||
raise ValueError(msg)
|
||||
self.filterargs = size, scale, offset, kernel
|
||||
|
||||
|
||||
class RankFilter(Filter):
|
||||
"""
|
||||
Create a rank filter. The rank filter sorts all pixels in
|
||||
a window of the given size, and returns the ``rank``'th value.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
:param rank: What pixel value to pick. Use 0 for a min filter,
|
||||
``size * size / 2`` for a median filter, ``size * size - 1``
|
||||
for a max filter, etc.
|
||||
"""
|
||||
|
||||
name = "Rank"
|
||||
|
||||
def __init__(self, size, rank):
|
||||
self.size = size
|
||||
self.rank = rank
|
||||
|
||||
def filter(self, image):
|
||||
if image.mode == "P":
|
||||
msg = "cannot filter palette images"
|
||||
raise ValueError(msg)
|
||||
image = image.expand(self.size // 2, self.size // 2)
|
||||
return image.rankfilter(self.size, self.rank)
|
||||
|
||||
|
||||
class MedianFilter(RankFilter):
|
||||
"""
|
||||
Create a median filter. Picks the median pixel value in a window with the
|
||||
given size.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
"""
|
||||
|
||||
name = "Median"
|
||||
|
||||
def __init__(self, size=3):
|
||||
self.size = size
|
||||
self.rank = size * size // 2
|
||||
|
||||
|
||||
class MinFilter(RankFilter):
|
||||
"""
|
||||
Create a min filter. Picks the lowest pixel value in a window with the
|
||||
given size.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
"""
|
||||
|
||||
name = "Min"
|
||||
|
||||
def __init__(self, size=3):
|
||||
self.size = size
|
||||
self.rank = 0
|
||||
|
||||
|
||||
class MaxFilter(RankFilter):
|
||||
"""
|
||||
Create a max filter. Picks the largest pixel value in a window with the
|
||||
given size.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
"""
|
||||
|
||||
name = "Max"
|
||||
|
||||
def __init__(self, size=3):
|
||||
self.size = size
|
||||
self.rank = size * size - 1
|
||||
|
||||
|
||||
class ModeFilter(Filter):
|
||||
"""
|
||||
Create a mode filter. Picks the most frequent pixel value in a box with the
|
||||
given size. Pixel values that occur only once or twice are ignored; if no
|
||||
pixel value occurs more than twice, the original pixel value is preserved.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
"""
|
||||
|
||||
name = "Mode"
|
||||
|
||||
def __init__(self, size=3):
|
||||
self.size = size
|
||||
|
||||
def filter(self, image):
|
||||
return image.modefilter(self.size)
|
||||
|
||||
|
||||
class GaussianBlur(MultibandFilter):
|
||||
"""Blurs the image with a sequence of extended box filters, which
|
||||
approximates a Gaussian kernel. For details on accuracy see
|
||||
<https://www.mia.uni-saarland.de/Publications/gwosdek-ssvm11.pdf>
|
||||
|
||||
:param radius: Standard deviation of the Gaussian kernel. Either a sequence of two
|
||||
numbers for x and y, or a single number for both.
|
||||
"""
|
||||
|
||||
name = "GaussianBlur"
|
||||
|
||||
def __init__(self, radius=2):
|
||||
self.radius = radius
|
||||
|
||||
def filter(self, image):
|
||||
xy = self.radius
|
||||
if not isinstance(xy, (tuple, list)):
|
||||
xy = (xy, xy)
|
||||
if xy == (0, 0):
|
||||
return image.copy()
|
||||
return image.gaussian_blur(xy)
|
||||
|
||||
|
||||
class BoxBlur(MultibandFilter):
|
||||
"""Blurs the image by setting each pixel to the average value of the pixels
|
||||
in a square box extending radius pixels in each direction.
|
||||
Supports float radius of arbitrary size. Uses an optimized implementation
|
||||
which runs in linear time relative to the size of the image
|
||||
for any radius value.
|
||||
|
||||
:param radius: Size of the box in a direction. Either a sequence of two numbers for
|
||||
x and y, or a single number for both.
|
||||
|
||||
Radius 0 does not blur, returns an identical image.
|
||||
Radius 1 takes 1 pixel in each direction, i.e. 9 pixels in total.
|
||||
"""
|
||||
|
||||
name = "BoxBlur"
|
||||
|
||||
def __init__(self, radius):
|
||||
xy = radius
|
||||
if not isinstance(xy, (tuple, list)):
|
||||
xy = (xy, xy)
|
||||
if xy[0] < 0 or xy[1] < 0:
|
||||
msg = "radius must be >= 0"
|
||||
raise ValueError(msg)
|
||||
self.radius = radius
|
||||
|
||||
def filter(self, image):
|
||||
xy = self.radius
|
||||
if not isinstance(xy, (tuple, list)):
|
||||
xy = (xy, xy)
|
||||
if xy == (0, 0):
|
||||
return image.copy()
|
||||
return image.box_blur(xy)
|
||||
|
||||
|
||||
class UnsharpMask(MultibandFilter):
|
||||
"""Unsharp mask filter.
|
||||
|
||||
See Wikipedia's entry on `digital unsharp masking`_ for an explanation of
|
||||
the parameters.
|
||||
|
||||
:param radius: Blur Radius
|
||||
:param percent: Unsharp strength, in percent
|
||||
:param threshold: Threshold controls the minimum brightness change that
|
||||
will be sharpened
|
||||
|
||||
.. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking
|
||||
|
||||
"""
|
||||
|
||||
name = "UnsharpMask"
|
||||
|
||||
def __init__(self, radius=2, percent=150, threshold=3):
|
||||
self.radius = radius
|
||||
self.percent = percent
|
||||
self.threshold = threshold
|
||||
|
||||
def filter(self, image):
|
||||
return image.unsharp_mask(self.radius, self.percent, self.threshold)
|
||||
|
||||
|
||||
class BLUR(BuiltinFilter):
|
||||
name = "Blur"
|
||||
# fmt: off
|
||||
filterargs = (5, 5), 16, 0, (
|
||||
1, 1, 1, 1, 1,
|
||||
1, 0, 0, 0, 1,
|
||||
1, 0, 0, 0, 1,
|
||||
1, 0, 0, 0, 1,
|
||||
1, 1, 1, 1, 1,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class CONTOUR(BuiltinFilter):
|
||||
name = "Contour"
|
||||
# fmt: off
|
||||
filterargs = (3, 3), 1, 255, (
|
||||
-1, -1, -1,
|
||||
-1, 8, -1,
|
||||
-1, -1, -1,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class DETAIL(BuiltinFilter):
|
||||
name = "Detail"
|
||||
# fmt: off
|
||||
filterargs = (3, 3), 6, 0, (
|
||||
0, -1, 0,
|
||||
-1, 10, -1,
|
||||
0, -1, 0,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class EDGE_ENHANCE(BuiltinFilter):
|
||||
name = "Edge-enhance"
|
||||
# fmt: off
|
||||
filterargs = (3, 3), 2, 0, (
|
||||
-1, -1, -1,
|
||||
-1, 10, -1,
|
||||
-1, -1, -1,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class EDGE_ENHANCE_MORE(BuiltinFilter):
|
||||
name = "Edge-enhance More"
|
||||
# fmt: off
|
||||
filterargs = (3, 3), 1, 0, (
|
||||
-1, -1, -1,
|
||||
-1, 9, -1,
|
||||
-1, -1, -1,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class EMBOSS(BuiltinFilter):
|
||||
name = "Emboss"
|
||||
# fmt: off
|
||||
filterargs = (3, 3), 1, 128, (
|
||||
-1, 0, 0,
|
||||
0, 1, 0,
|
||||
0, 0, 0,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class FIND_EDGES(BuiltinFilter):
|
||||
name = "Find Edges"
|
||||
# fmt: off
|
||||
filterargs = (3, 3), 1, 0, (
|
||||
-1, -1, -1,
|
||||
-1, 8, -1,
|
||||
-1, -1, -1,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class SHARPEN(BuiltinFilter):
|
||||
name = "Sharpen"
|
||||
# fmt: off
|
||||
filterargs = (3, 3), 16, 0, (
|
||||
-2, -2, -2,
|
||||
-2, 32, -2,
|
||||
-2, -2, -2,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class SMOOTH(BuiltinFilter):
|
||||
name = "Smooth"
|
||||
# fmt: off
|
||||
filterargs = (3, 3), 13, 0, (
|
||||
1, 1, 1,
|
||||
1, 5, 1,
|
||||
1, 1, 1,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class SMOOTH_MORE(BuiltinFilter):
|
||||
name = "Smooth More"
|
||||
# fmt: off
|
||||
filterargs = (5, 5), 100, 0, (
|
||||
1, 1, 1, 1, 1,
|
||||
1, 5, 5, 5, 1,
|
||||
1, 5, 44, 5, 1,
|
||||
1, 5, 5, 5, 1,
|
||||
1, 1, 1, 1, 1,
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class Color3DLUT(MultibandFilter):
|
||||
"""Three-dimensional color lookup table.
|
||||
|
||||
Transforms 3-channel pixels using the values of the channels as coordinates
|
||||
in the 3D lookup table and interpolating the nearest elements.
|
||||
|
||||
This method allows you to apply almost any color transformation
|
||||
in constant time by using pre-calculated decimated tables.
|
||||
|
||||
.. versionadded:: 5.2.0
|
||||
|
||||
:param size: Size of the table. One int or tuple of (int, int, int).
|
||||
Minimal size in any dimension is 2, maximum is 65.
|
||||
:param table: Flat lookup table. A list of ``channels * size**3``
|
||||
float elements or a list of ``size**3`` channels-sized
|
||||
tuples with floats. Channels are changed first,
|
||||
then first dimension, then second, then third.
|
||||
Value 0.0 corresponds lowest value of output, 1.0 highest.
|
||||
:param channels: Number of channels in the table. Could be 3 or 4.
|
||||
Default is 3.
|
||||
:param target_mode: A mode for the result image. Should have not less
|
||||
than ``channels`` channels. Default is ``None``,
|
||||
which means that mode wouldn't be changed.
|
||||
"""
|
||||
|
||||
name = "Color 3D LUT"
|
||||
|
||||
def __init__(self, size, table, channels=3, target_mode=None, **kwargs):
|
||||
if channels not in (3, 4):
|
||||
msg = "Only 3 or 4 output channels are supported"
|
||||
raise ValueError(msg)
|
||||
self.size = size = self._check_size(size)
|
||||
self.channels = channels
|
||||
self.mode = target_mode
|
||||
|
||||
# Hidden flag `_copy_table=False` could be used to avoid extra copying
|
||||
# of the table if the table is specially made for the constructor.
|
||||
copy_table = kwargs.get("_copy_table", True)
|
||||
items = size[0] * size[1] * size[2]
|
||||
wrong_size = False
|
||||
|
||||
numpy = None
|
||||
if hasattr(table, "shape"):
|
||||
try:
|
||||
import numpy
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if numpy and isinstance(table, numpy.ndarray):
|
||||
if copy_table:
|
||||
table = table.copy()
|
||||
|
||||
if table.shape in [
|
||||
(items * channels,),
|
||||
(items, channels),
|
||||
(size[2], size[1], size[0], channels),
|
||||
]:
|
||||
table = table.reshape(items * channels)
|
||||
else:
|
||||
wrong_size = True
|
||||
|
||||
else:
|
||||
if copy_table:
|
||||
table = list(table)
|
||||
|
||||
# Convert to a flat list
|
||||
if table and isinstance(table[0], (list, tuple)):
|
||||
table, raw_table = [], table
|
||||
for pixel in raw_table:
|
||||
if len(pixel) != channels:
|
||||
msg = (
|
||||
"The elements of the table should "
|
||||
f"have a length of {channels}."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
table.extend(pixel)
|
||||
|
||||
if wrong_size or len(table) != items * channels:
|
||||
msg = (
|
||||
"The table should have either channels * size**3 float items "
|
||||
"or size**3 items of channels-sized tuples with floats. "
|
||||
f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. "
|
||||
f"Actual length: {len(table)}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
self.table = table
|
||||
|
||||
@staticmethod
|
||||
def _check_size(size):
|
||||
try:
|
||||
_, _, _ = size
|
||||
except ValueError as e:
|
||||
msg = "Size should be either an integer or a tuple of three integers."
|
||||
raise ValueError(msg) from e
|
||||
except TypeError:
|
||||
size = (size, size, size)
|
||||
size = [int(x) for x in size]
|
||||
for size_1d in size:
|
||||
if not 2 <= size_1d <= 65:
|
||||
msg = "Size should be in [2, 65] range."
|
||||
raise ValueError(msg)
|
||||
return size
|
||||
|
||||
@classmethod
|
||||
def generate(cls, size, callback, channels=3, target_mode=None):
|
||||
"""Generates new LUT using provided callback.
|
||||
|
||||
:param size: Size of the table. Passed to the constructor.
|
||||
:param callback: Function with three parameters which correspond
|
||||
three color channels. Will be called ``size**3``
|
||||
times with values from 0.0 to 1.0 and should return
|
||||
a tuple with ``channels`` elements.
|
||||
:param channels: The number of channels which should return callback.
|
||||
:param target_mode: Passed to the constructor of the resulting
|
||||
lookup table.
|
||||
"""
|
||||
size_1d, size_2d, size_3d = cls._check_size(size)
|
||||
if channels not in (3, 4):
|
||||
msg = "Only 3 or 4 output channels are supported"
|
||||
raise ValueError(msg)
|
||||
|
||||
table = [0] * (size_1d * size_2d * size_3d * channels)
|
||||
idx_out = 0
|
||||
for b in range(size_3d):
|
||||
for g in range(size_2d):
|
||||
for r in range(size_1d):
|
||||
table[idx_out : idx_out + channels] = callback(
|
||||
r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1)
|
||||
)
|
||||
idx_out += channels
|
||||
|
||||
return cls(
|
||||
(size_1d, size_2d, size_3d),
|
||||
table,
|
||||
channels=channels,
|
||||
target_mode=target_mode,
|
||||
_copy_table=False,
|
||||
)
|
||||
|
||||
def transform(self, callback, with_normals=False, channels=None, target_mode=None):
|
||||
"""Transforms the table values using provided callback and returns
|
||||
a new LUT with altered values.
|
||||
|
||||
:param callback: A function which takes old lookup table values
|
||||
and returns a new set of values. The number
|
||||
of arguments which function should take is
|
||||
``self.channels`` or ``3 + self.channels``
|
||||
if ``with_normals`` flag is set.
|
||||
Should return a tuple of ``self.channels`` or
|
||||
``channels`` elements if it is set.
|
||||
:param with_normals: If true, ``callback`` will be called with
|
||||
coordinates in the color cube as the first
|
||||
three arguments. Otherwise, ``callback``
|
||||
will be called only with actual color values.
|
||||
:param channels: The number of channels in the resulting lookup table.
|
||||
:param target_mode: Passed to the constructor of the resulting
|
||||
lookup table.
|
||||
"""
|
||||
if channels not in (None, 3, 4):
|
||||
msg = "Only 3 or 4 output channels are supported"
|
||||
raise ValueError(msg)
|
||||
ch_in = self.channels
|
||||
ch_out = channels or ch_in
|
||||
size_1d, size_2d, size_3d = self.size
|
||||
|
||||
table = [0] * (size_1d * size_2d * size_3d * ch_out)
|
||||
idx_in = 0
|
||||
idx_out = 0
|
||||
for b in range(size_3d):
|
||||
for g in range(size_2d):
|
||||
for r in range(size_1d):
|
||||
values = self.table[idx_in : idx_in + ch_in]
|
||||
if with_normals:
|
||||
values = callback(
|
||||
r / (size_1d - 1),
|
||||
g / (size_2d - 1),
|
||||
b / (size_3d - 1),
|
||||
*values,
|
||||
)
|
||||
else:
|
||||
values = callback(*values)
|
||||
table[idx_out : idx_out + ch_out] = values
|
||||
idx_in += ch_in
|
||||
idx_out += ch_out
|
||||
|
||||
return type(self)(
|
||||
self.size,
|
||||
table,
|
||||
channels=ch_out,
|
||||
target_mode=target_mode or self.mode,
|
||||
_copy_table=False,
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
r = [
|
||||
f"{self.__class__.__name__} from {self.table.__class__.__name__}",
|
||||
"size={:d}x{:d}x{:d}".format(*self.size),
|
||||
f"channels={self.channels:d}",
|
||||
]
|
||||
if self.mode:
|
||||
r.append(f"target_mode={self.mode}")
|
||||
return "<{}>".format(" ".join(r))
|
||||
|
||||
def filter(self, image):
|
||||
from . import Image
|
||||
|
||||
return image.color_lut_3d(
|
||||
self.mode or image.mode,
|
||||
Image.Resampling.BILINEAR,
|
||||
self.channels,
|
||||
self.size[0],
|
||||
self.size[1],
|
||||
self.size[2],
|
||||
self.table,
|
||||
)
|
File diff suppressed because it is too large
Load Diff
@ -1,178 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# screen grabber
|
||||
#
|
||||
# History:
|
||||
# 2001-04-26 fl created
|
||||
# 2001-09-17 fl use builtin driver, if present
|
||||
# 2002-11-19 fl added grabclipboard support
|
||||
#
|
||||
# Copyright (c) 2001-2002 by Secret Labs AB
|
||||
# Copyright (c) 2001-2002 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from . import Image
|
||||
|
||||
|
||||
def grab(bbox=None, include_layered_windows=False, all_screens=False, xdisplay=None):
|
||||
if xdisplay is None:
|
||||
if sys.platform == "darwin":
|
||||
fh, filepath = tempfile.mkstemp(".png")
|
||||
os.close(fh)
|
||||
args = ["screencapture"]
|
||||
if bbox:
|
||||
left, top, right, bottom = bbox
|
||||
args += ["-R", f"{left},{top},{right-left},{bottom-top}"]
|
||||
subprocess.call(args + ["-x", filepath])
|
||||
im = Image.open(filepath)
|
||||
im.load()
|
||||
os.unlink(filepath)
|
||||
if bbox:
|
||||
im_resized = im.resize((right - left, bottom - top))
|
||||
im.close()
|
||||
return im_resized
|
||||
return im
|
||||
elif sys.platform == "win32":
|
||||
offset, size, data = Image.core.grabscreen_win32(
|
||||
include_layered_windows, all_screens
|
||||
)
|
||||
im = Image.frombytes(
|
||||
"RGB",
|
||||
size,
|
||||
data,
|
||||
# RGB, 32-bit line padding, origin lower left corner
|
||||
"raw",
|
||||
"BGR",
|
||||
(size[0] * 3 + 3) & -4,
|
||||
-1,
|
||||
)
|
||||
if bbox:
|
||||
x0, y0 = offset
|
||||
left, top, right, bottom = bbox
|
||||
im = im.crop((left - x0, top - y0, right - x0, bottom - y0))
|
||||
return im
|
||||
try:
|
||||
if not Image.core.HAVE_XCB:
|
||||
msg = "Pillow was built without XCB support"
|
||||
raise OSError(msg)
|
||||
size, data = Image.core.grabscreen_x11(xdisplay)
|
||||
except OSError:
|
||||
if (
|
||||
xdisplay is None
|
||||
and sys.platform not in ("darwin", "win32")
|
||||
and shutil.which("gnome-screenshot")
|
||||
):
|
||||
fh, filepath = tempfile.mkstemp(".png")
|
||||
os.close(fh)
|
||||
subprocess.call(["gnome-screenshot", "-f", filepath])
|
||||
im = Image.open(filepath)
|
||||
im.load()
|
||||
os.unlink(filepath)
|
||||
if bbox:
|
||||
im_cropped = im.crop(bbox)
|
||||
im.close()
|
||||
return im_cropped
|
||||
return im
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
im = Image.frombytes("RGB", size, data, "raw", "BGRX", size[0] * 4, 1)
|
||||
if bbox:
|
||||
im = im.crop(bbox)
|
||||
return im
|
||||
|
||||
|
||||
def grabclipboard():
|
||||
if sys.platform == "darwin":
|
||||
fh, filepath = tempfile.mkstemp(".png")
|
||||
os.close(fh)
|
||||
commands = [
|
||||
'set theFile to (open for access POSIX file "'
|
||||
+ filepath
|
||||
+ '" with write permission)',
|
||||
"try",
|
||||
" write (the clipboard as «class PNGf») to theFile",
|
||||
"end try",
|
||||
"close access theFile",
|
||||
]
|
||||
script = ["osascript"]
|
||||
for command in commands:
|
||||
script += ["-e", command]
|
||||
subprocess.call(script)
|
||||
|
||||
im = None
|
||||
if os.stat(filepath).st_size != 0:
|
||||
im = Image.open(filepath)
|
||||
im.load()
|
||||
os.unlink(filepath)
|
||||
return im
|
||||
elif sys.platform == "win32":
|
||||
fmt, data = Image.core.grabclipboard_win32()
|
||||
if fmt == "file": # CF_HDROP
|
||||
import struct
|
||||
|
||||
o = struct.unpack_from("I", data)[0]
|
||||
if data[16] != 0:
|
||||
files = data[o:].decode("utf-16le").split("\0")
|
||||
else:
|
||||
files = data[o:].decode("mbcs").split("\0")
|
||||
return files[: files.index("")]
|
||||
if isinstance(data, bytes):
|
||||
data = io.BytesIO(data)
|
||||
if fmt == "png":
|
||||
from . import PngImagePlugin
|
||||
|
||||
return PngImagePlugin.PngImageFile(data)
|
||||
elif fmt == "DIB":
|
||||
from . import BmpImagePlugin
|
||||
|
||||
return BmpImagePlugin.DibImageFile(data)
|
||||
return None
|
||||
else:
|
||||
if os.getenv("WAYLAND_DISPLAY"):
|
||||
session_type = "wayland"
|
||||
elif os.getenv("DISPLAY"):
|
||||
session_type = "x11"
|
||||
else: # Session type check failed
|
||||
session_type = None
|
||||
|
||||
if shutil.which("wl-paste") and session_type in ("wayland", None):
|
||||
output = subprocess.check_output(["wl-paste", "-l"]).decode()
|
||||
mimetypes = output.splitlines()
|
||||
if "image/png" in mimetypes:
|
||||
mimetype = "image/png"
|
||||
elif mimetypes:
|
||||
mimetype = mimetypes[0]
|
||||
else:
|
||||
mimetype = None
|
||||
|
||||
args = ["wl-paste"]
|
||||
if mimetype:
|
||||
args.extend(["-t", mimetype])
|
||||
elif shutil.which("xclip") and session_type in ("x11", None):
|
||||
args = ["xclip", "-selection", "clipboard", "-t", "image/png", "-o"]
|
||||
else:
|
||||
msg = "wl-paste or xclip is required for ImageGrab.grabclipboard() on Linux"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
p = subprocess.run(args, capture_output=True)
|
||||
err = p.stderr
|
||||
if err:
|
||||
msg = f"{args[0]} error: {err.strip().decode()}"
|
||||
raise ChildProcessError(msg)
|
||||
data = io.BytesIO(p.stdout)
|
||||
im = Image.open(data)
|
||||
im.load()
|
||||
return im
|
@ -1,265 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# a simple math add-on for the Python Imaging Library
|
||||
#
|
||||
# History:
|
||||
# 1999-02-15 fl Original PIL Plus release
|
||||
# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6
|
||||
# 2005-09-12 fl Fixed int() and float() for Python 2.4.1
|
||||
#
|
||||
# Copyright (c) 1999-2005 by Secret Labs AB
|
||||
# Copyright (c) 2005 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import builtins
|
||||
|
||||
from . import Image, _imagingmath
|
||||
|
||||
|
||||
class _Operand:
|
||||
"""Wraps an image operand, providing standard operators"""
|
||||
|
||||
def __init__(self, im):
|
||||
self.im = im
|
||||
|
||||
def __fixup(self, im1):
|
||||
# convert image to suitable mode
|
||||
if isinstance(im1, _Operand):
|
||||
# argument was an image.
|
||||
if im1.im.mode in ("1", "L"):
|
||||
return im1.im.convert("I")
|
||||
elif im1.im.mode in ("I", "F"):
|
||||
return im1.im
|
||||
else:
|
||||
msg = f"unsupported mode: {im1.im.mode}"
|
||||
raise ValueError(msg)
|
||||
else:
|
||||
# argument was a constant
|
||||
if isinstance(im1, (int, float)) and self.im.mode in ("1", "L", "I"):
|
||||
return Image.new("I", self.im.size, im1)
|
||||
else:
|
||||
return Image.new("F", self.im.size, im1)
|
||||
|
||||
def apply(self, op, im1, im2=None, mode=None):
|
||||
im1 = self.__fixup(im1)
|
||||
if im2 is None:
|
||||
# unary operation
|
||||
out = Image.new(mode or im1.mode, im1.size, None)
|
||||
im1.load()
|
||||
try:
|
||||
op = getattr(_imagingmath, op + "_" + im1.mode)
|
||||
except AttributeError as e:
|
||||
msg = f"bad operand type for '{op}'"
|
||||
raise TypeError(msg) from e
|
||||
_imagingmath.unop(op, out.im.id, im1.im.id)
|
||||
else:
|
||||
# binary operation
|
||||
im2 = self.__fixup(im2)
|
||||
if im1.mode != im2.mode:
|
||||
# convert both arguments to floating point
|
||||
if im1.mode != "F":
|
||||
im1 = im1.convert("F")
|
||||
if im2.mode != "F":
|
||||
im2 = im2.convert("F")
|
||||
if im1.size != im2.size:
|
||||
# crop both arguments to a common size
|
||||
size = (min(im1.size[0], im2.size[0]), min(im1.size[1], im2.size[1]))
|
||||
if im1.size != size:
|
||||
im1 = im1.crop((0, 0) + size)
|
||||
if im2.size != size:
|
||||
im2 = im2.crop((0, 0) + size)
|
||||
out = Image.new(mode or im1.mode, im1.size, None)
|
||||
im1.load()
|
||||
im2.load()
|
||||
try:
|
||||
op = getattr(_imagingmath, op + "_" + im1.mode)
|
||||
except AttributeError as e:
|
||||
msg = f"bad operand type for '{op}'"
|
||||
raise TypeError(msg) from e
|
||||
_imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id)
|
||||
return _Operand(out)
|
||||
|
||||
# unary operators
|
||||
def __bool__(self):
|
||||
# an image is "true" if it contains at least one non-zero pixel
|
||||
return self.im.getbbox() is not None
|
||||
|
||||
def __abs__(self):
|
||||
return self.apply("abs", self)
|
||||
|
||||
def __pos__(self):
|
||||
return self
|
||||
|
||||
def __neg__(self):
|
||||
return self.apply("neg", self)
|
||||
|
||||
# binary operators
|
||||
def __add__(self, other):
|
||||
return self.apply("add", self, other)
|
||||
|
||||
def __radd__(self, other):
|
||||
return self.apply("add", other, self)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.apply("sub", self, other)
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.apply("sub", other, self)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.apply("mul", self, other)
|
||||
|
||||
def __rmul__(self, other):
|
||||
return self.apply("mul", other, self)
|
||||
|
||||
def __truediv__(self, other):
|
||||
return self.apply("div", self, other)
|
||||
|
||||
def __rtruediv__(self, other):
|
||||
return self.apply("div", other, self)
|
||||
|
||||
def __mod__(self, other):
|
||||
return self.apply("mod", self, other)
|
||||
|
||||
def __rmod__(self, other):
|
||||
return self.apply("mod", other, self)
|
||||
|
||||
def __pow__(self, other):
|
||||
return self.apply("pow", self, other)
|
||||
|
||||
def __rpow__(self, other):
|
||||
return self.apply("pow", other, self)
|
||||
|
||||
# bitwise
|
||||
def __invert__(self):
|
||||
return self.apply("invert", self)
|
||||
|
||||
def __and__(self, other):
|
||||
return self.apply("and", self, other)
|
||||
|
||||
def __rand__(self, other):
|
||||
return self.apply("and", other, self)
|
||||
|
||||
def __or__(self, other):
|
||||
return self.apply("or", self, other)
|
||||
|
||||
def __ror__(self, other):
|
||||
return self.apply("or", other, self)
|
||||
|
||||
def __xor__(self, other):
|
||||
return self.apply("xor", self, other)
|
||||
|
||||
def __rxor__(self, other):
|
||||
return self.apply("xor", other, self)
|
||||
|
||||
def __lshift__(self, other):
|
||||
return self.apply("lshift", self, other)
|
||||
|
||||
def __rshift__(self, other):
|
||||
return self.apply("rshift", self, other)
|
||||
|
||||
# logical
|
||||
def __eq__(self, other):
|
||||
return self.apply("eq", self, other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return self.apply("ne", self, other)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.apply("lt", self, other)
|
||||
|
||||
def __le__(self, other):
|
||||
return self.apply("le", self, other)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.apply("gt", self, other)
|
||||
|
||||
def __ge__(self, other):
|
||||
return self.apply("ge", self, other)
|
||||
|
||||
|
||||
# conversions
|
||||
def imagemath_int(self):
|
||||
return _Operand(self.im.convert("I"))
|
||||
|
||||
|
||||
def imagemath_float(self):
|
||||
return _Operand(self.im.convert("F"))
|
||||
|
||||
|
||||
# logical
|
||||
def imagemath_equal(self, other):
|
||||
return self.apply("eq", self, other, mode="I")
|
||||
|
||||
|
||||
def imagemath_notequal(self, other):
|
||||
return self.apply("ne", self, other, mode="I")
|
||||
|
||||
|
||||
def imagemath_min(self, other):
|
||||
return self.apply("min", self, other)
|
||||
|
||||
|
||||
def imagemath_max(self, other):
|
||||
return self.apply("max", self, other)
|
||||
|
||||
|
||||
def imagemath_convert(self, mode):
|
||||
return _Operand(self.im.convert(mode))
|
||||
|
||||
|
||||
ops = {}
|
||||
for k, v in list(globals().items()):
|
||||
if k[:10] == "imagemath_":
|
||||
ops[k[10:]] = v
|
||||
|
||||
|
||||
def eval(expression, _dict={}, **kw):
|
||||
"""
|
||||
Evaluates an image expression.
|
||||
|
||||
:param expression: A string containing a Python-style expression.
|
||||
:param options: Values to add to the evaluation context. You
|
||||
can either use a dictionary, or one or more keyword
|
||||
arguments.
|
||||
:return: The evaluated expression. This is usually an image object, but can
|
||||
also be an integer, a floating point value, or a pixel tuple,
|
||||
depending on the expression.
|
||||
"""
|
||||
|
||||
# build execution namespace
|
||||
args = ops.copy()
|
||||
for k in list(_dict.keys()) + list(kw.keys()):
|
||||
if "__" in k or hasattr(builtins, k):
|
||||
msg = f"'{k}' not allowed"
|
||||
raise ValueError(msg)
|
||||
|
||||
args.update(_dict)
|
||||
args.update(kw)
|
||||
for k, v in args.items():
|
||||
if hasattr(v, "im"):
|
||||
args[k] = _Operand(v)
|
||||
|
||||
compiled_code = compile(expression, "<string>", "eval")
|
||||
|
||||
def scan(code):
|
||||
for const in code.co_consts:
|
||||
if type(const) is type(compiled_code):
|
||||
scan(const)
|
||||
|
||||
for name in code.co_names:
|
||||
if name not in args and name != "abs":
|
||||
msg = f"'{name}' not allowed"
|
||||
raise ValueError(msg)
|
||||
|
||||
scan(compiled_code)
|
||||
out = builtins.eval(expression, {"__builtins": {"abs": abs}}, args)
|
||||
try:
|
||||
return out.im
|
||||
except AttributeError:
|
||||
return out
|
@ -1,96 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# standard mode descriptors
|
||||
#
|
||||
# History:
|
||||
# 2006-03-20 fl Added
|
||||
#
|
||||
# Copyright (c) 2006 by Secret Labs AB.
|
||||
# Copyright (c) 2006 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
|
||||
|
||||
class ModeDescriptor:
|
||||
"""Wrapper for mode strings."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mode: str,
|
||||
bands: tuple[str, ...],
|
||||
basemode: str,
|
||||
basetype: str,
|
||||
typestr: str,
|
||||
) -> None:
|
||||
self.mode = mode
|
||||
self.bands = bands
|
||||
self.basemode = basemode
|
||||
self.basetype = basetype
|
||||
self.typestr = typestr
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.mode
|
||||
|
||||
|
||||
@lru_cache
|
||||
def getmode(mode: str) -> ModeDescriptor:
|
||||
"""Gets a mode descriptor for the given mode."""
|
||||
# initialize mode cache
|
||||
endian = "<" if sys.byteorder == "little" else ">"
|
||||
|
||||
modes = {
|
||||
# core modes
|
||||
# Bits need to be extended to bytes
|
||||
"1": ("L", "L", ("1",), "|b1"),
|
||||
"L": ("L", "L", ("L",), "|u1"),
|
||||
"I": ("L", "I", ("I",), endian + "i4"),
|
||||
"F": ("L", "F", ("F",), endian + "f4"),
|
||||
"P": ("P", "L", ("P",), "|u1"),
|
||||
"RGB": ("RGB", "L", ("R", "G", "B"), "|u1"),
|
||||
"RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"),
|
||||
"RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"),
|
||||
"CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"),
|
||||
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"),
|
||||
# UNDONE - unsigned |u1i1i1
|
||||
"LAB": ("RGB", "L", ("L", "A", "B"), "|u1"),
|
||||
"HSV": ("RGB", "L", ("H", "S", "V"), "|u1"),
|
||||
# extra experimental modes
|
||||
"RGBa": ("RGB", "L", ("R", "G", "B", "a"), "|u1"),
|
||||
"BGR;15": ("RGB", "L", ("B", "G", "R"), "|u1"),
|
||||
"BGR;16": ("RGB", "L", ("B", "G", "R"), "|u1"),
|
||||
"BGR;24": ("RGB", "L", ("B", "G", "R"), "|u1"),
|
||||
"LA": ("L", "L", ("L", "A"), "|u1"),
|
||||
"La": ("L", "L", ("L", "a"), "|u1"),
|
||||
"PA": ("RGB", "L", ("P", "A"), "|u1"),
|
||||
}
|
||||
if mode in modes:
|
||||
base_mode, base_type, bands, type_str = modes[mode]
|
||||
return ModeDescriptor(mode, bands, base_mode, base_type, type_str)
|
||||
|
||||
mapping_modes = {
|
||||
# I;16 == I;16L, and I;32 == I;32L
|
||||
"I;16": "<u2",
|
||||
"I;16S": "<i2",
|
||||
"I;16L": "<u2",
|
||||
"I;16LS": "<i2",
|
||||
"I;16B": ">u2",
|
||||
"I;16BS": ">i2",
|
||||
"I;16N": endian + "u2",
|
||||
"I;16NS": endian + "i2",
|
||||
"I;32": "<u4",
|
||||
"I;32B": ">u4",
|
||||
"I;32L": "<u4",
|
||||
"I;32S": "<i4",
|
||||
"I;32BS": ">i4",
|
||||
"I;32LS": "<i4",
|
||||
}
|
||||
|
||||
type_str = mapping_modes[mode]
|
||||
return ModeDescriptor(mode, ("I",), "L", "L", type_str)
|
@ -1,255 +0,0 @@
|
||||
# A binary morphology add-on for the Python Imaging Library
|
||||
#
|
||||
# History:
|
||||
# 2014-06-04 Initial version.
|
||||
#
|
||||
# Copyright (c) 2014 Dov Grobgeld <dov.grobgeld@gmail.com>
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
from . import Image, _imagingmorph
|
||||
|
||||
LUT_SIZE = 1 << 9
|
||||
|
||||
# fmt: off
|
||||
ROTATION_MATRIX = [
|
||||
6, 3, 0,
|
||||
7, 4, 1,
|
||||
8, 5, 2,
|
||||
]
|
||||
MIRROR_MATRIX = [
|
||||
2, 1, 0,
|
||||
5, 4, 3,
|
||||
8, 7, 6,
|
||||
]
|
||||
# fmt: on
|
||||
|
||||
|
||||
class LutBuilder:
|
||||
"""A class for building a MorphLut from a descriptive language
|
||||
|
||||
The input patterns is a list of a strings sequences like these::
|
||||
|
||||
4:(...
|
||||
.1.
|
||||
111)->1
|
||||
|
||||
(whitespaces including linebreaks are ignored). The option 4
|
||||
describes a series of symmetry operations (in this case a
|
||||
4-rotation), the pattern is described by:
|
||||
|
||||
- . or X - Ignore
|
||||
- 1 - Pixel is on
|
||||
- 0 - Pixel is off
|
||||
|
||||
The result of the operation is described after "->" string.
|
||||
|
||||
The default is to return the current pixel value, which is
|
||||
returned if no other match is found.
|
||||
|
||||
Operations:
|
||||
|
||||
- 4 - 4 way rotation
|
||||
- N - Negate
|
||||
- 1 - Dummy op for no other operation (an op must always be given)
|
||||
- M - Mirroring
|
||||
|
||||
Example::
|
||||
|
||||
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
|
||||
lut = lb.build_lut()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, patterns=None, op_name=None):
|
||||
if patterns is not None:
|
||||
self.patterns = patterns
|
||||
else:
|
||||
self.patterns = []
|
||||
self.lut = None
|
||||
if op_name is not None:
|
||||
known_patterns = {
|
||||
"corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"],
|
||||
"dilation4": ["4:(... .0. .1.)->1"],
|
||||
"dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"],
|
||||
"erosion4": ["4:(... .1. .0.)->0"],
|
||||
"erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"],
|
||||
"edge": [
|
||||
"1:(... ... ...)->0",
|
||||
"4:(.0. .1. ...)->1",
|
||||
"4:(01. .1. ...)->1",
|
||||
],
|
||||
}
|
||||
if op_name not in known_patterns:
|
||||
msg = "Unknown pattern " + op_name + "!"
|
||||
raise Exception(msg)
|
||||
|
||||
self.patterns = known_patterns[op_name]
|
||||
|
||||
def add_patterns(self, patterns):
|
||||
self.patterns += patterns
|
||||
|
||||
def build_default_lut(self):
|
||||
symbols = [0, 1]
|
||||
m = 1 << 4 # pos of current pixel
|
||||
self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE))
|
||||
|
||||
def get_lut(self):
|
||||
return self.lut
|
||||
|
||||
def _string_permute(self, pattern, permutation):
|
||||
"""string_permute takes a pattern and a permutation and returns the
|
||||
string permuted according to the permutation list.
|
||||
"""
|
||||
assert len(permutation) == 9
|
||||
return "".join(pattern[p] for p in permutation)
|
||||
|
||||
def _pattern_permute(self, basic_pattern, options, basic_result):
|
||||
"""pattern_permute takes a basic pattern and its result and clones
|
||||
the pattern according to the modifications described in the $options
|
||||
parameter. It returns a list of all cloned patterns."""
|
||||
patterns = [(basic_pattern, basic_result)]
|
||||
|
||||
# rotations
|
||||
if "4" in options:
|
||||
res = patterns[-1][1]
|
||||
for i in range(4):
|
||||
patterns.append(
|
||||
(self._string_permute(patterns[-1][0], ROTATION_MATRIX), res)
|
||||
)
|
||||
# mirror
|
||||
if "M" in options:
|
||||
n = len(patterns)
|
||||
for pattern, res in patterns[:n]:
|
||||
patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res))
|
||||
|
||||
# negate
|
||||
if "N" in options:
|
||||
n = len(patterns)
|
||||
for pattern, res in patterns[:n]:
|
||||
# Swap 0 and 1
|
||||
pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1")
|
||||
res = 1 - int(res)
|
||||
patterns.append((pattern, res))
|
||||
|
||||
return patterns
|
||||
|
||||
def build_lut(self):
|
||||
"""Compile all patterns into a morphology lut.
|
||||
|
||||
TBD :Build based on (file) morphlut:modify_lut
|
||||
"""
|
||||
self.build_default_lut()
|
||||
patterns = []
|
||||
|
||||
# Parse and create symmetries of the patterns strings
|
||||
for p in self.patterns:
|
||||
m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", ""))
|
||||
if not m:
|
||||
msg = 'Syntax error in pattern "' + p + '"'
|
||||
raise Exception(msg)
|
||||
options = m.group(1)
|
||||
pattern = m.group(2)
|
||||
result = int(m.group(3))
|
||||
|
||||
# Get rid of spaces
|
||||
pattern = pattern.replace(" ", "").replace("\n", "")
|
||||
|
||||
patterns += self._pattern_permute(pattern, options, result)
|
||||
|
||||
# compile the patterns into regular expressions for speed
|
||||
for i, pattern in enumerate(patterns):
|
||||
p = pattern[0].replace(".", "X").replace("X", "[01]")
|
||||
p = re.compile(p)
|
||||
patterns[i] = (p, pattern[1])
|
||||
|
||||
# Step through table and find patterns that match.
|
||||
# Note that all the patterns are searched. The last one
|
||||
# caught overrides
|
||||
for i in range(LUT_SIZE):
|
||||
# Build the bit pattern
|
||||
bitpattern = bin(i)[2:]
|
||||
bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1]
|
||||
|
||||
for p, r in patterns:
|
||||
if p.match(bitpattern):
|
||||
self.lut[i] = [0, 1][r]
|
||||
|
||||
return self.lut
|
||||
|
||||
|
||||
class MorphOp:
|
||||
"""A class for binary morphological operators"""
|
||||
|
||||
def __init__(self, lut=None, op_name=None, patterns=None):
|
||||
"""Create a binary morphological operator"""
|
||||
self.lut = lut
|
||||
if op_name is not None:
|
||||
self.lut = LutBuilder(op_name=op_name).build_lut()
|
||||
elif patterns is not None:
|
||||
self.lut = LutBuilder(patterns=patterns).build_lut()
|
||||
|
||||
def apply(self, image):
|
||||
"""Run a single morphological operation on an image
|
||||
|
||||
Returns a tuple of the number of changed pixels and the
|
||||
morphed image"""
|
||||
if self.lut is None:
|
||||
msg = "No operator loaded"
|
||||
raise Exception(msg)
|
||||
|
||||
if image.mode != "L":
|
||||
msg = "Image mode must be L"
|
||||
raise ValueError(msg)
|
||||
outimage = Image.new(image.mode, image.size, None)
|
||||
count = _imagingmorph.apply(bytes(self.lut), image.im.id, outimage.im.id)
|
||||
return count, outimage
|
||||
|
||||
def match(self, image):
|
||||
"""Get a list of coordinates matching the morphological operation on
|
||||
an image.
|
||||
|
||||
Returns a list of tuples of (x,y) coordinates
|
||||
of all matching pixels. See :ref:`coordinate-system`."""
|
||||
if self.lut is None:
|
||||
msg = "No operator loaded"
|
||||
raise Exception(msg)
|
||||
|
||||
if image.mode != "L":
|
||||
msg = "Image mode must be L"
|
||||
raise ValueError(msg)
|
||||
return _imagingmorph.match(bytes(self.lut), image.im.id)
|
||||
|
||||
def get_on_pixels(self, image):
|
||||
"""Get a list of all turned on pixels in a binary image
|
||||
|
||||
Returns a list of tuples of (x,y) coordinates
|
||||
of all matching pixels. See :ref:`coordinate-system`."""
|
||||
|
||||
if image.mode != "L":
|
||||
msg = "Image mode must be L"
|
||||
raise ValueError(msg)
|
||||
return _imagingmorph.get_on_pixels(image.im.id)
|
||||
|
||||
def load_lut(self, filename):
|
||||
"""Load an operator from an mrl file"""
|
||||
with open(filename, "rb") as f:
|
||||
self.lut = bytearray(f.read())
|
||||
|
||||
if len(self.lut) != LUT_SIZE:
|
||||
self.lut = None
|
||||
msg = "Wrong size operator file!"
|
||||
raise Exception(msg)
|
||||
|
||||
def save_lut(self, filename):
|
||||
"""Save an operator to an mrl file"""
|
||||
if self.lut is None:
|
||||
msg = "No operator loaded"
|
||||
raise Exception(msg)
|
||||
with open(filename, "wb") as f:
|
||||
f.write(self.lut)
|
||||
|
||||
def set_lut(self, lut):
|
||||
"""Set the lut from an external source"""
|
||||
self.lut = lut
|
@ -1,655 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# standard image operations
|
||||
#
|
||||
# History:
|
||||
# 2001-10-20 fl Created
|
||||
# 2001-10-23 fl Added autocontrast operator
|
||||
# 2001-12-18 fl Added Kevin's fit operator
|
||||
# 2004-03-14 fl Fixed potential division by zero in equalize
|
||||
# 2005-05-05 fl Fixed equalize for low number of values
|
||||
#
|
||||
# Copyright (c) 2001-2004 by Secret Labs AB
|
||||
# Copyright (c) 2001-2004 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import operator
|
||||
import re
|
||||
|
||||
from . import ExifTags, Image, ImagePalette
|
||||
|
||||
#
|
||||
# helpers
|
||||
|
||||
|
||||
def _border(border):
|
||||
if isinstance(border, tuple):
|
||||
if len(border) == 2:
|
||||
left, top = right, bottom = border
|
||||
elif len(border) == 4:
|
||||
left, top, right, bottom = border
|
||||
else:
|
||||
left = top = right = bottom = border
|
||||
return left, top, right, bottom
|
||||
|
||||
|
||||
def _color(color, mode):
|
||||
if isinstance(color, str):
|
||||
from . import ImageColor
|
||||
|
||||
color = ImageColor.getcolor(color, mode)
|
||||
return color
|
||||
|
||||
|
||||
def _lut(image, lut):
|
||||
if image.mode == "P":
|
||||
# FIXME: apply to lookup table, not image data
|
||||
msg = "mode P support coming soon"
|
||||
raise NotImplementedError(msg)
|
||||
elif image.mode in ("L", "RGB"):
|
||||
if image.mode == "RGB" and len(lut) == 256:
|
||||
lut = lut + lut + lut
|
||||
return image.point(lut)
|
||||
else:
|
||||
msg = f"not supported for mode {image.mode}"
|
||||
raise OSError(msg)
|
||||
|
||||
|
||||
#
|
||||
# actions
|
||||
|
||||
|
||||
def autocontrast(image, cutoff=0, ignore=None, mask=None, preserve_tone=False):
|
||||
"""
|
||||
Maximize (normalize) image contrast. This function calculates a
|
||||
histogram of the input image (or mask region), removes ``cutoff`` percent of the
|
||||
lightest and darkest pixels from the histogram, and remaps the image
|
||||
so that the darkest pixel becomes black (0), and the lightest
|
||||
becomes white (255).
|
||||
|
||||
:param image: The image to process.
|
||||
:param cutoff: The percent to cut off from the histogram on the low and
|
||||
high ends. Either a tuple of (low, high), or a single
|
||||
number for both.
|
||||
:param ignore: The background pixel value (use None for no background).
|
||||
:param mask: Histogram used in contrast operation is computed using pixels
|
||||
within the mask. If no mask is given the entire image is used
|
||||
for histogram computation.
|
||||
:param preserve_tone: Preserve image tone in Photoshop-like style autocontrast.
|
||||
|
||||
.. versionadded:: 8.2.0
|
||||
|
||||
:return: An image.
|
||||
"""
|
||||
if preserve_tone:
|
||||
histogram = image.convert("L").histogram(mask)
|
||||
else:
|
||||
histogram = image.histogram(mask)
|
||||
|
||||
lut = []
|
||||
for layer in range(0, len(histogram), 256):
|
||||
h = histogram[layer : layer + 256]
|
||||
if ignore is not None:
|
||||
# get rid of outliers
|
||||
try:
|
||||
h[ignore] = 0
|
||||
except TypeError:
|
||||
# assume sequence
|
||||
for ix in ignore:
|
||||
h[ix] = 0
|
||||
if cutoff:
|
||||
# cut off pixels from both ends of the histogram
|
||||
if not isinstance(cutoff, tuple):
|
||||
cutoff = (cutoff, cutoff)
|
||||
# get number of pixels
|
||||
n = 0
|
||||
for ix in range(256):
|
||||
n = n + h[ix]
|
||||
# remove cutoff% pixels from the low end
|
||||
cut = n * cutoff[0] // 100
|
||||
for lo in range(256):
|
||||
if cut > h[lo]:
|
||||
cut = cut - h[lo]
|
||||
h[lo] = 0
|
||||
else:
|
||||
h[lo] -= cut
|
||||
cut = 0
|
||||
if cut <= 0:
|
||||
break
|
||||
# remove cutoff% samples from the high end
|
||||
cut = n * cutoff[1] // 100
|
||||
for hi in range(255, -1, -1):
|
||||
if cut > h[hi]:
|
||||
cut = cut - h[hi]
|
||||
h[hi] = 0
|
||||
else:
|
||||
h[hi] -= cut
|
||||
cut = 0
|
||||
if cut <= 0:
|
||||
break
|
||||
# find lowest/highest samples after preprocessing
|
||||
for lo in range(256):
|
||||
if h[lo]:
|
||||
break
|
||||
for hi in range(255, -1, -1):
|
||||
if h[hi]:
|
||||
break
|
||||
if hi <= lo:
|
||||
# don't bother
|
||||
lut.extend(list(range(256)))
|
||||
else:
|
||||
scale = 255.0 / (hi - lo)
|
||||
offset = -lo * scale
|
||||
for ix in range(256):
|
||||
ix = int(ix * scale + offset)
|
||||
if ix < 0:
|
||||
ix = 0
|
||||
elif ix > 255:
|
||||
ix = 255
|
||||
lut.append(ix)
|
||||
return _lut(image, lut)
|
||||
|
||||
|
||||
def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoint=127):
|
||||
"""
|
||||
Colorize grayscale image.
|
||||
This function calculates a color wedge which maps all black pixels in
|
||||
the source image to the first color and all white pixels to the
|
||||
second color. If ``mid`` is specified, it uses three-color mapping.
|
||||
The ``black`` and ``white`` arguments should be RGB tuples or color names;
|
||||
optionally you can use three-color mapping by also specifying ``mid``.
|
||||
Mapping positions for any of the colors can be specified
|
||||
(e.g. ``blackpoint``), where these parameters are the integer
|
||||
value corresponding to where the corresponding color should be mapped.
|
||||
These parameters must have logical order, such that
|
||||
``blackpoint <= midpoint <= whitepoint`` (if ``mid`` is specified).
|
||||
|
||||
:param image: The image to colorize.
|
||||
:param black: The color to use for black input pixels.
|
||||
:param white: The color to use for white input pixels.
|
||||
:param mid: The color to use for midtone input pixels.
|
||||
:param blackpoint: an int value [0, 255] for the black mapping.
|
||||
:param whitepoint: an int value [0, 255] for the white mapping.
|
||||
:param midpoint: an int value [0, 255] for the midtone mapping.
|
||||
:return: An image.
|
||||
"""
|
||||
|
||||
# Initial asserts
|
||||
assert image.mode == "L"
|
||||
if mid is None:
|
||||
assert 0 <= blackpoint <= whitepoint <= 255
|
||||
else:
|
||||
assert 0 <= blackpoint <= midpoint <= whitepoint <= 255
|
||||
|
||||
# Define colors from arguments
|
||||
black = _color(black, "RGB")
|
||||
white = _color(white, "RGB")
|
||||
if mid is not None:
|
||||
mid = _color(mid, "RGB")
|
||||
|
||||
# Empty lists for the mapping
|
||||
red = []
|
||||
green = []
|
||||
blue = []
|
||||
|
||||
# Create the low-end values
|
||||
for i in range(0, blackpoint):
|
||||
red.append(black[0])
|
||||
green.append(black[1])
|
||||
blue.append(black[2])
|
||||
|
||||
# Create the mapping (2-color)
|
||||
if mid is None:
|
||||
range_map = range(0, whitepoint - blackpoint)
|
||||
|
||||
for i in range_map:
|
||||
red.append(black[0] + i * (white[0] - black[0]) // len(range_map))
|
||||
green.append(black[1] + i * (white[1] - black[1]) // len(range_map))
|
||||
blue.append(black[2] + i * (white[2] - black[2]) // len(range_map))
|
||||
|
||||
# Create the mapping (3-color)
|
||||
else:
|
||||
range_map1 = range(0, midpoint - blackpoint)
|
||||
range_map2 = range(0, whitepoint - midpoint)
|
||||
|
||||
for i in range_map1:
|
||||
red.append(black[0] + i * (mid[0] - black[0]) // len(range_map1))
|
||||
green.append(black[1] + i * (mid[1] - black[1]) // len(range_map1))
|
||||
blue.append(black[2] + i * (mid[2] - black[2]) // len(range_map1))
|
||||
for i in range_map2:
|
||||
red.append(mid[0] + i * (white[0] - mid[0]) // len(range_map2))
|
||||
green.append(mid[1] + i * (white[1] - mid[1]) // len(range_map2))
|
||||
blue.append(mid[2] + i * (white[2] - mid[2]) // len(range_map2))
|
||||
|
||||
# Create the high-end values
|
||||
for i in range(0, 256 - whitepoint):
|
||||
red.append(white[0])
|
||||
green.append(white[1])
|
||||
blue.append(white[2])
|
||||
|
||||
# Return converted image
|
||||
image = image.convert("RGB")
|
||||
return _lut(image, red + green + blue)
|
||||
|
||||
|
||||
def contain(image, size, method=Image.Resampling.BICUBIC):
|
||||
"""
|
||||
Returns a resized version of the image, set to the maximum width and height
|
||||
within the requested size, while maintaining the original aspect ratio.
|
||||
|
||||
:param image: The image to resize.
|
||||
:param size: The requested output size in pixels, given as a
|
||||
(width, height) tuple.
|
||||
:param method: Resampling method to use. Default is
|
||||
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
|
||||
See :ref:`concept-filters`.
|
||||
:return: An image.
|
||||
"""
|
||||
|
||||
im_ratio = image.width / image.height
|
||||
dest_ratio = size[0] / size[1]
|
||||
|
||||
if im_ratio != dest_ratio:
|
||||
if im_ratio > dest_ratio:
|
||||
new_height = round(image.height / image.width * size[0])
|
||||
if new_height != size[1]:
|
||||
size = (size[0], new_height)
|
||||
else:
|
||||
new_width = round(image.width / image.height * size[1])
|
||||
if new_width != size[0]:
|
||||
size = (new_width, size[1])
|
||||
return image.resize(size, resample=method)
|
||||
|
||||
|
||||
def cover(image, size, method=Image.Resampling.BICUBIC):
|
||||
"""
|
||||
Returns a resized version of the image, so that the requested size is
|
||||
covered, while maintaining the original aspect ratio.
|
||||
|
||||
:param image: The image to resize.
|
||||
:param size: The requested output size in pixels, given as a
|
||||
(width, height) tuple.
|
||||
:param method: Resampling method to use. Default is
|
||||
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
|
||||
See :ref:`concept-filters`.
|
||||
:return: An image.
|
||||
"""
|
||||
|
||||
im_ratio = image.width / image.height
|
||||
dest_ratio = size[0] / size[1]
|
||||
|
||||
if im_ratio != dest_ratio:
|
||||
if im_ratio < dest_ratio:
|
||||
new_height = round(image.height / image.width * size[0])
|
||||
if new_height != size[1]:
|
||||
size = (size[0], new_height)
|
||||
else:
|
||||
new_width = round(image.width / image.height * size[1])
|
||||
if new_width != size[0]:
|
||||
size = (new_width, size[1])
|
||||
return image.resize(size, resample=method)
|
||||
|
||||
|
||||
def pad(image, size, method=Image.Resampling.BICUBIC, color=None, centering=(0.5, 0.5)):
|
||||
"""
|
||||
Returns a resized and padded version of the image, expanded to fill the
|
||||
requested aspect ratio and size.
|
||||
|
||||
:param image: The image to resize and crop.
|
||||
:param size: The requested output size in pixels, given as a
|
||||
(width, height) tuple.
|
||||
:param method: Resampling method to use. Default is
|
||||
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
|
||||
See :ref:`concept-filters`.
|
||||
:param color: The background color of the padded image.
|
||||
:param centering: Control the position of the original image within the
|
||||
padded version.
|
||||
|
||||
(0.5, 0.5) will keep the image centered
|
||||
(0, 0) will keep the image aligned to the top left
|
||||
(1, 1) will keep the image aligned to the bottom
|
||||
right
|
||||
:return: An image.
|
||||
"""
|
||||
|
||||
resized = contain(image, size, method)
|
||||
if resized.size == size:
|
||||
out = resized
|
||||
else:
|
||||
out = Image.new(image.mode, size, color)
|
||||
if resized.palette:
|
||||
out.putpalette(resized.getpalette())
|
||||
if resized.width != size[0]:
|
||||
x = round((size[0] - resized.width) * max(0, min(centering[0], 1)))
|
||||
out.paste(resized, (x, 0))
|
||||
else:
|
||||
y = round((size[1] - resized.height) * max(0, min(centering[1], 1)))
|
||||
out.paste(resized, (0, y))
|
||||
return out
|
||||
|
||||
|
||||
def crop(image, border=0):
|
||||
"""
|
||||
Remove border from image. The same amount of pixels are removed
|
||||
from all four sides. This function works on all image modes.
|
||||
|
||||
.. seealso:: :py:meth:`~PIL.Image.Image.crop`
|
||||
|
||||
:param image: The image to crop.
|
||||
:param border: The number of pixels to remove.
|
||||
:return: An image.
|
||||
"""
|
||||
left, top, right, bottom = _border(border)
|
||||
return image.crop((left, top, image.size[0] - right, image.size[1] - bottom))
|
||||
|
||||
|
||||
def scale(image, factor, resample=Image.Resampling.BICUBIC):
|
||||
"""
|
||||
Returns a rescaled image by a specific factor given in parameter.
|
||||
A factor greater than 1 expands the image, between 0 and 1 contracts the
|
||||
image.
|
||||
|
||||
:param image: The image to rescale.
|
||||
:param factor: The expansion factor, as a float.
|
||||
:param resample: Resampling method to use. Default is
|
||||
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
|
||||
See :ref:`concept-filters`.
|
||||
:returns: An :py:class:`~PIL.Image.Image` object.
|
||||
"""
|
||||
if factor == 1:
|
||||
return image.copy()
|
||||
elif factor <= 0:
|
||||
msg = "the factor must be greater than 0"
|
||||
raise ValueError(msg)
|
||||
else:
|
||||
size = (round(factor * image.width), round(factor * image.height))
|
||||
return image.resize(size, resample)
|
||||
|
||||
|
||||
def deform(image, deformer, resample=Image.Resampling.BILINEAR):
|
||||
"""
|
||||
Deform the image.
|
||||
|
||||
:param image: The image to deform.
|
||||
:param deformer: A deformer object. Any object that implements a
|
||||
``getmesh`` method can be used.
|
||||
:param resample: An optional resampling filter. Same values possible as
|
||||
in the PIL.Image.transform function.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.transform(
|
||||
image.size, Image.Transform.MESH, deformer.getmesh(image), resample
|
||||
)
|
||||
|
||||
|
||||
def equalize(image, mask=None):
|
||||
"""
|
||||
Equalize the image histogram. This function applies a non-linear
|
||||
mapping to the input image, in order to create a uniform
|
||||
distribution of grayscale values in the output image.
|
||||
|
||||
:param image: The image to equalize.
|
||||
:param mask: An optional mask. If given, only the pixels selected by
|
||||
the mask are included in the analysis.
|
||||
:return: An image.
|
||||
"""
|
||||
if image.mode == "P":
|
||||
image = image.convert("RGB")
|
||||
h = image.histogram(mask)
|
||||
lut = []
|
||||
for b in range(0, len(h), 256):
|
||||
histo = [_f for _f in h[b : b + 256] if _f]
|
||||
if len(histo) <= 1:
|
||||
lut.extend(list(range(256)))
|
||||
else:
|
||||
step = (functools.reduce(operator.add, histo) - histo[-1]) // 255
|
||||
if not step:
|
||||
lut.extend(list(range(256)))
|
||||
else:
|
||||
n = step // 2
|
||||
for i in range(256):
|
||||
lut.append(n // step)
|
||||
n = n + h[i + b]
|
||||
return _lut(image, lut)
|
||||
|
||||
|
||||
def expand(image, border=0, fill=0):
|
||||
"""
|
||||
Add border to the image
|
||||
|
||||
:param image: The image to expand.
|
||||
:param border: Border width, in pixels.
|
||||
:param fill: Pixel fill value (a color value). Default is 0 (black).
|
||||
:return: An image.
|
||||
"""
|
||||
left, top, right, bottom = _border(border)
|
||||
width = left + image.size[0] + right
|
||||
height = top + image.size[1] + bottom
|
||||
color = _color(fill, image.mode)
|
||||
if image.palette:
|
||||
palette = ImagePalette.ImagePalette(palette=image.getpalette())
|
||||
if isinstance(color, tuple):
|
||||
color = palette.getcolor(color)
|
||||
else:
|
||||
palette = None
|
||||
out = Image.new(image.mode, (width, height), color)
|
||||
if palette:
|
||||
out.putpalette(palette.palette)
|
||||
out.paste(image, (left, top))
|
||||
return out
|
||||
|
||||
|
||||
def fit(image, size, method=Image.Resampling.BICUBIC, bleed=0.0, centering=(0.5, 0.5)):
|
||||
"""
|
||||
Returns a resized and cropped version of the image, cropped to the
|
||||
requested aspect ratio and size.
|
||||
|
||||
This function was contributed by Kevin Cazabon.
|
||||
|
||||
:param image: The image to resize and crop.
|
||||
:param size: The requested output size in pixels, given as a
|
||||
(width, height) tuple.
|
||||
:param method: Resampling method to use. Default is
|
||||
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
|
||||
See :ref:`concept-filters`.
|
||||
:param bleed: Remove a border around the outside of the image from all
|
||||
four edges. The value is a decimal percentage (use 0.01 for
|
||||
one percent). The default value is 0 (no border).
|
||||
Cannot be greater than or equal to 0.5.
|
||||
:param centering: Control the cropping position. Use (0.5, 0.5) for
|
||||
center cropping (e.g. if cropping the width, take 50% off
|
||||
of the left side, and therefore 50% off the right side).
|
||||
(0.0, 0.0) will crop from the top left corner (i.e. if
|
||||
cropping the width, take all of the crop off of the right
|
||||
side, and if cropping the height, take all of it off the
|
||||
bottom). (1.0, 0.0) will crop from the bottom left
|
||||
corner, etc. (i.e. if cropping the width, take all of the
|
||||
crop off the left side, and if cropping the height take
|
||||
none from the top, and therefore all off the bottom).
|
||||
:return: An image.
|
||||
"""
|
||||
|
||||
# by Kevin Cazabon, Feb 17/2000
|
||||
# kevin@cazabon.com
|
||||
# https://www.cazabon.com
|
||||
|
||||
# ensure centering is mutable
|
||||
centering = list(centering)
|
||||
|
||||
if not 0.0 <= centering[0] <= 1.0:
|
||||
centering[0] = 0.5
|
||||
if not 0.0 <= centering[1] <= 1.0:
|
||||
centering[1] = 0.5
|
||||
|
||||
if not 0.0 <= bleed < 0.5:
|
||||
bleed = 0.0
|
||||
|
||||
# calculate the area to use for resizing and cropping, subtracting
|
||||
# the 'bleed' around the edges
|
||||
|
||||
# number of pixels to trim off on Top and Bottom, Left and Right
|
||||
bleed_pixels = (bleed * image.size[0], bleed * image.size[1])
|
||||
|
||||
live_size = (
|
||||
image.size[0] - bleed_pixels[0] * 2,
|
||||
image.size[1] - bleed_pixels[1] * 2,
|
||||
)
|
||||
|
||||
# calculate the aspect ratio of the live_size
|
||||
live_size_ratio = live_size[0] / live_size[1]
|
||||
|
||||
# calculate the aspect ratio of the output image
|
||||
output_ratio = size[0] / size[1]
|
||||
|
||||
# figure out if the sides or top/bottom will be cropped off
|
||||
if live_size_ratio == output_ratio:
|
||||
# live_size is already the needed ratio
|
||||
crop_width = live_size[0]
|
||||
crop_height = live_size[1]
|
||||
elif live_size_ratio >= output_ratio:
|
||||
# live_size is wider than what's needed, crop the sides
|
||||
crop_width = output_ratio * live_size[1]
|
||||
crop_height = live_size[1]
|
||||
else:
|
||||
# live_size is taller than what's needed, crop the top and bottom
|
||||
crop_width = live_size[0]
|
||||
crop_height = live_size[0] / output_ratio
|
||||
|
||||
# make the crop
|
||||
crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering[0]
|
||||
crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering[1]
|
||||
|
||||
crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)
|
||||
|
||||
# resize the image and return it
|
||||
return image.resize(size, method, box=crop)
|
||||
|
||||
|
||||
def flip(image):
|
||||
"""
|
||||
Flip the image vertically (top to bottom).
|
||||
|
||||
:param image: The image to flip.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.transpose(Image.Transpose.FLIP_TOP_BOTTOM)
|
||||
|
||||
|
||||
def grayscale(image):
|
||||
"""
|
||||
Convert the image to grayscale.
|
||||
|
||||
:param image: The image to convert.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.convert("L")
|
||||
|
||||
|
||||
def invert(image):
|
||||
"""
|
||||
Invert (negate) the image.
|
||||
|
||||
:param image: The image to invert.
|
||||
:return: An image.
|
||||
"""
|
||||
lut = list(range(255, -1, -1))
|
||||
return image.point(lut) if image.mode == "1" else _lut(image, lut)
|
||||
|
||||
|
||||
def mirror(image):
|
||||
"""
|
||||
Flip image horizontally (left to right).
|
||||
|
||||
:param image: The image to mirror.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
|
||||
|
||||
|
||||
def posterize(image, bits):
|
||||
"""
|
||||
Reduce the number of bits for each color channel.
|
||||
|
||||
:param image: The image to posterize.
|
||||
:param bits: The number of bits to keep for each channel (1-8).
|
||||
:return: An image.
|
||||
"""
|
||||
mask = ~(2 ** (8 - bits) - 1)
|
||||
lut = [i & mask for i in range(256)]
|
||||
return _lut(image, lut)
|
||||
|
||||
|
||||
def solarize(image, threshold=128):
|
||||
"""
|
||||
Invert all pixel values above a threshold.
|
||||
|
||||
:param image: The image to solarize.
|
||||
:param threshold: All pixels above this grayscale level are inverted.
|
||||
:return: An image.
|
||||
"""
|
||||
lut = []
|
||||
for i in range(256):
|
||||
if i < threshold:
|
||||
lut.append(i)
|
||||
else:
|
||||
lut.append(255 - i)
|
||||
return _lut(image, lut)
|
||||
|
||||
|
||||
def exif_transpose(image, *, in_place=False):
|
||||
"""
|
||||
If an image has an EXIF Orientation tag, other than 1, transpose the image
|
||||
accordingly, and remove the orientation data.
|
||||
|
||||
:param image: The image to transpose.
|
||||
:param in_place: Boolean. Keyword-only argument.
|
||||
If ``True``, the original image is modified in-place, and ``None`` is returned.
|
||||
If ``False`` (default), a new :py:class:`~PIL.Image.Image` object is returned
|
||||
with the transposition applied. If there is no transposition, a copy of the
|
||||
image will be returned.
|
||||
"""
|
||||
image.load()
|
||||
image_exif = image.getexif()
|
||||
orientation = image_exif.get(ExifTags.Base.Orientation)
|
||||
method = {
|
||||
2: Image.Transpose.FLIP_LEFT_RIGHT,
|
||||
3: Image.Transpose.ROTATE_180,
|
||||
4: Image.Transpose.FLIP_TOP_BOTTOM,
|
||||
5: Image.Transpose.TRANSPOSE,
|
||||
6: Image.Transpose.ROTATE_270,
|
||||
7: Image.Transpose.TRANSVERSE,
|
||||
8: Image.Transpose.ROTATE_90,
|
||||
}.get(orientation)
|
||||
if method is not None:
|
||||
transposed_image = image.transpose(method)
|
||||
if in_place:
|
||||
image.im = transposed_image.im
|
||||
image.pyaccess = None
|
||||
image._size = transposed_image._size
|
||||
exif_image = image if in_place else transposed_image
|
||||
|
||||
exif = exif_image.getexif()
|
||||
if ExifTags.Base.Orientation in exif:
|
||||
del exif[ExifTags.Base.Orientation]
|
||||
if "exif" in exif_image.info:
|
||||
exif_image.info["exif"] = exif.tobytes()
|
||||
elif "Raw profile type exif" in exif_image.info:
|
||||
exif_image.info["Raw profile type exif"] = exif.tobytes().hex()
|
||||
elif "XML:com.adobe.xmp" in exif_image.info:
|
||||
for pattern in (
|
||||
r'tiff:Orientation="([0-9])"',
|
||||
r"<tiff:Orientation>([0-9])</tiff:Orientation>",
|
||||
):
|
||||
exif_image.info["XML:com.adobe.xmp"] = re.sub(
|
||||
pattern, "", exif_image.info["XML:com.adobe.xmp"]
|
||||
)
|
||||
if not in_place:
|
||||
return transposed_image
|
||||
elif not in_place:
|
||||
return image.copy()
|
@ -1,262 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# image palette object
|
||||
#
|
||||
# History:
|
||||
# 1996-03-11 fl Rewritten.
|
||||
# 1997-01-03 fl Up and running.
|
||||
# 1997-08-23 fl Added load hack
|
||||
# 2001-04-16 fl Fixed randint shadow bug in random()
|
||||
#
|
||||
# Copyright (c) 1997-2001 by Secret Labs AB
|
||||
# Copyright (c) 1996-1997 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import array
|
||||
|
||||
from . import GimpGradientFile, GimpPaletteFile, ImageColor, PaletteFile
|
||||
|
||||
|
||||
class ImagePalette:
|
||||
"""
|
||||
Color palette for palette mapped images
|
||||
|
||||
:param mode: The mode to use for the palette. See:
|
||||
:ref:`concept-modes`. Defaults to "RGB"
|
||||
:param palette: An optional palette. If given, it must be a bytearray,
|
||||
an array or a list of ints between 0-255. The list must consist of
|
||||
all channels for one color followed by the next color (e.g. RGBRGBRGB).
|
||||
Defaults to an empty palette.
|
||||
"""
|
||||
|
||||
def __init__(self, mode="RGB", palette=None):
|
||||
self.mode = mode
|
||||
self.rawmode = None # if set, palette contains raw data
|
||||
self.palette = palette or bytearray()
|
||||
self.dirty = None
|
||||
|
||||
@property
|
||||
def palette(self):
|
||||
return self._palette
|
||||
|
||||
@palette.setter
|
||||
def palette(self, palette):
|
||||
self._colors = None
|
||||
self._palette = palette
|
||||
|
||||
@property
|
||||
def colors(self):
|
||||
if self._colors is None:
|
||||
mode_len = len(self.mode)
|
||||
self._colors = {}
|
||||
for i in range(0, len(self.palette), mode_len):
|
||||
color = tuple(self.palette[i : i + mode_len])
|
||||
if color in self._colors:
|
||||
continue
|
||||
self._colors[color] = i // mode_len
|
||||
return self._colors
|
||||
|
||||
@colors.setter
|
||||
def colors(self, colors):
|
||||
self._colors = colors
|
||||
|
||||
def copy(self):
|
||||
new = ImagePalette()
|
||||
|
||||
new.mode = self.mode
|
||||
new.rawmode = self.rawmode
|
||||
if self.palette is not None:
|
||||
new.palette = self.palette[:]
|
||||
new.dirty = self.dirty
|
||||
|
||||
return new
|
||||
|
||||
def getdata(self):
|
||||
"""
|
||||
Get palette contents in format suitable for the low-level
|
||||
``im.putpalette`` primitive.
|
||||
|
||||
.. warning:: This method is experimental.
|
||||
"""
|
||||
if self.rawmode:
|
||||
return self.rawmode, self.palette
|
||||
return self.mode, self.tobytes()
|
||||
|
||||
def tobytes(self):
|
||||
"""Convert palette to bytes.
|
||||
|
||||
.. warning:: This method is experimental.
|
||||
"""
|
||||
if self.rawmode:
|
||||
msg = "palette contains raw palette data"
|
||||
raise ValueError(msg)
|
||||
if isinstance(self.palette, bytes):
|
||||
return self.palette
|
||||
arr = array.array("B", self.palette)
|
||||
return arr.tobytes()
|
||||
|
||||
# Declare tostring as an alias for tobytes
|
||||
tostring = tobytes
|
||||
|
||||
def _new_color_index(self, image=None, e=None):
|
||||
if not isinstance(self.palette, bytearray):
|
||||
self._palette = bytearray(self.palette)
|
||||
index = len(self.palette) // 3
|
||||
special_colors = ()
|
||||
if image:
|
||||
special_colors = (
|
||||
image.info.get("background"),
|
||||
image.info.get("transparency"),
|
||||
)
|
||||
while index in special_colors:
|
||||
index += 1
|
||||
if index >= 256:
|
||||
if image:
|
||||
# Search for an unused index
|
||||
for i, count in reversed(list(enumerate(image.histogram()))):
|
||||
if count == 0 and i not in special_colors:
|
||||
index = i
|
||||
break
|
||||
if index >= 256:
|
||||
msg = "cannot allocate more than 256 colors"
|
||||
raise ValueError(msg) from e
|
||||
return index
|
||||
|
||||
def getcolor(self, color, image=None):
|
||||
"""Given an rgb tuple, allocate palette entry.
|
||||
|
||||
.. warning:: This method is experimental.
|
||||
"""
|
||||
if self.rawmode:
|
||||
msg = "palette contains raw palette data"
|
||||
raise ValueError(msg)
|
||||
if isinstance(color, tuple):
|
||||
if self.mode == "RGB":
|
||||
if len(color) == 4:
|
||||
if color[3] != 255:
|
||||
msg = "cannot add non-opaque RGBA color to RGB palette"
|
||||
raise ValueError(msg)
|
||||
color = color[:3]
|
||||
elif self.mode == "RGBA":
|
||||
if len(color) == 3:
|
||||
color += (255,)
|
||||
try:
|
||||
return self.colors[color]
|
||||
except KeyError as e:
|
||||
# allocate new color slot
|
||||
index = self._new_color_index(image, e)
|
||||
self.colors[color] = index
|
||||
if index * 3 < len(self.palette):
|
||||
self._palette = (
|
||||
self.palette[: index * 3]
|
||||
+ bytes(color)
|
||||
+ self.palette[index * 3 + 3 :]
|
||||
)
|
||||
else:
|
||||
self._palette += bytes(color)
|
||||
self.dirty = 1
|
||||
return index
|
||||
else:
|
||||
msg = f"unknown color specifier: {repr(color)}"
|
||||
raise ValueError(msg)
|
||||
|
||||
def save(self, fp):
|
||||
"""Save palette to text file.
|
||||
|
||||
.. warning:: This method is experimental.
|
||||
"""
|
||||
if self.rawmode:
|
||||
msg = "palette contains raw palette data"
|
||||
raise ValueError(msg)
|
||||
if isinstance(fp, str):
|
||||
fp = open(fp, "w")
|
||||
fp.write("# Palette\n")
|
||||
fp.write(f"# Mode: {self.mode}\n")
|
||||
for i in range(256):
|
||||
fp.write(f"{i}")
|
||||
for j in range(i * len(self.mode), (i + 1) * len(self.mode)):
|
||||
try:
|
||||
fp.write(f" {self.palette[j]}")
|
||||
except IndexError:
|
||||
fp.write(" 0")
|
||||
fp.write("\n")
|
||||
fp.close()
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Internal
|
||||
|
||||
|
||||
def raw(rawmode, data):
|
||||
palette = ImagePalette()
|
||||
palette.rawmode = rawmode
|
||||
palette.palette = data
|
||||
palette.dirty = 1
|
||||
return palette
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Factories
|
||||
|
||||
|
||||
def make_linear_lut(black, white):
|
||||
if black == 0:
|
||||
return [white * i // 255 for i in range(256)]
|
||||
|
||||
msg = "unavailable when black is non-zero"
|
||||
raise NotImplementedError(msg) # FIXME
|
||||
|
||||
|
||||
def make_gamma_lut(exp):
|
||||
return [int(((i / 255.0) ** exp) * 255.0 + 0.5) for i in range(256)]
|
||||
|
||||
|
||||
def negative(mode="RGB"):
|
||||
palette = list(range(256 * len(mode)))
|
||||
palette.reverse()
|
||||
return ImagePalette(mode, [i // len(mode) for i in palette])
|
||||
|
||||
|
||||
def random(mode="RGB"):
|
||||
from random import randint
|
||||
|
||||
palette = [randint(0, 255) for _ in range(256 * len(mode))]
|
||||
return ImagePalette(mode, palette)
|
||||
|
||||
|
||||
def sepia(white="#fff0c0"):
|
||||
bands = [make_linear_lut(0, band) for band in ImageColor.getrgb(white)]
|
||||
return ImagePalette("RGB", [bands[i % 3][i // 3] for i in range(256 * 3)])
|
||||
|
||||
|
||||
def wedge(mode="RGB"):
|
||||
palette = list(range(256 * len(mode)))
|
||||
return ImagePalette(mode, [i // len(mode) for i in palette])
|
||||
|
||||
|
||||
def load(filename):
|
||||
# FIXME: supports GIMP gradients only
|
||||
|
||||
with open(filename, "rb") as fp:
|
||||
for paletteHandler in [
|
||||
GimpPaletteFile.GimpPaletteFile,
|
||||
GimpGradientFile.GimpGradientFile,
|
||||
PaletteFile.PaletteFile,
|
||||
]:
|
||||
try:
|
||||
fp.seek(0)
|
||||
lut = paletteHandler(fp).getpalette()
|
||||
if lut:
|
||||
break
|
||||
except (SyntaxError, ValueError):
|
||||
pass
|
||||
else:
|
||||
msg = "cannot load palette"
|
||||
raise OSError(msg)
|
||||
|
||||
return lut # data, rawmode
|
@ -1,20 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# path interface
|
||||
#
|
||||
# History:
|
||||
# 1996-11-04 fl Created
|
||||
# 2002-04-14 fl Added documentation stub class
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image
|
||||
|
||||
Path = Image.core.path
|
@ -1,197 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# a simple Qt image interface.
|
||||
#
|
||||
# history:
|
||||
# 2006-06-03 fl: created
|
||||
# 2006-06-04 fl: inherit from QImage instead of wrapping it
|
||||
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
|
||||
# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com)
|
||||
#
|
||||
# Copyright (c) 2006 by Secret Labs AB
|
||||
# Copyright (c) 2006 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from io import BytesIO
|
||||
|
||||
from . import Image
|
||||
from ._util import is_path
|
||||
|
||||
qt_versions = [
|
||||
["6", "PyQt6"],
|
||||
["side6", "PySide6"],
|
||||
]
|
||||
|
||||
# If a version has already been imported, attempt it first
|
||||
qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)
|
||||
for qt_version, qt_module in qt_versions:
|
||||
try:
|
||||
if qt_module == "PyQt6":
|
||||
from PyQt6.QtCore import QBuffer, QIODevice
|
||||
from PyQt6.QtGui import QImage, QPixmap, qRgba
|
||||
elif qt_module == "PySide6":
|
||||
from PySide6.QtCore import QBuffer, QIODevice
|
||||
from PySide6.QtGui import QImage, QPixmap, qRgba
|
||||
except (ImportError, RuntimeError):
|
||||
continue
|
||||
qt_is_installed = True
|
||||
break
|
||||
else:
|
||||
qt_is_installed = False
|
||||
qt_version = None
|
||||
|
||||
|
||||
def rgb(r, g, b, a=255):
|
||||
"""(Internal) Turns an RGB color into a Qt compatible color integer."""
|
||||
# use qRgb to pack the colors, and then turn the resulting long
|
||||
# into a negative integer with the same bitpattern.
|
||||
return qRgba(r, g, b, a) & 0xFFFFFFFF
|
||||
|
||||
|
||||
def fromqimage(im):
|
||||
"""
|
||||
:param im: QImage or PIL ImageQt object
|
||||
"""
|
||||
buffer = QBuffer()
|
||||
if qt_version == "6":
|
||||
try:
|
||||
qt_openmode = QIODevice.OpenModeFlag
|
||||
except AttributeError:
|
||||
qt_openmode = QIODevice.OpenMode
|
||||
else:
|
||||
qt_openmode = QIODevice
|
||||
buffer.open(qt_openmode.ReadWrite)
|
||||
# preserve alpha channel with png
|
||||
# otherwise ppm is more friendly with Image.open
|
||||
if im.hasAlphaChannel():
|
||||
im.save(buffer, "png")
|
||||
else:
|
||||
im.save(buffer, "ppm")
|
||||
|
||||
b = BytesIO()
|
||||
b.write(buffer.data())
|
||||
buffer.close()
|
||||
b.seek(0)
|
||||
|
||||
return Image.open(b)
|
||||
|
||||
|
||||
def fromqpixmap(im):
|
||||
return fromqimage(im)
|
||||
|
||||
|
||||
def align8to32(bytes, width, mode):
|
||||
"""
|
||||
converts each scanline of data from 8 bit to 32 bit aligned
|
||||
"""
|
||||
|
||||
bits_per_pixel = {"1": 1, "L": 8, "P": 8, "I;16": 16}[mode]
|
||||
|
||||
# calculate bytes per line and the extra padding if needed
|
||||
bits_per_line = bits_per_pixel * width
|
||||
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
|
||||
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
|
||||
|
||||
extra_padding = -bytes_per_line % 4
|
||||
|
||||
# already 32 bit aligned by luck
|
||||
if not extra_padding:
|
||||
return bytes
|
||||
|
||||
new_data = [
|
||||
bytes[i * bytes_per_line : (i + 1) * bytes_per_line] + b"\x00" * extra_padding
|
||||
for i in range(len(bytes) // bytes_per_line)
|
||||
]
|
||||
|
||||
return b"".join(new_data)
|
||||
|
||||
|
||||
def _toqclass_helper(im):
|
||||
data = None
|
||||
colortable = None
|
||||
exclusive_fp = False
|
||||
|
||||
# handle filename, if given instead of image name
|
||||
if hasattr(im, "toUtf8"):
|
||||
# FIXME - is this really the best way to do this?
|
||||
im = str(im.toUtf8(), "utf-8")
|
||||
if is_path(im):
|
||||
im = Image.open(im)
|
||||
exclusive_fp = True
|
||||
|
||||
qt_format = QImage.Format if qt_version == "6" else QImage
|
||||
if im.mode == "1":
|
||||
format = qt_format.Format_Mono
|
||||
elif im.mode == "L":
|
||||
format = qt_format.Format_Indexed8
|
||||
colortable = [rgb(i, i, i) for i in range(256)]
|
||||
elif im.mode == "P":
|
||||
format = qt_format.Format_Indexed8
|
||||
palette = im.getpalette()
|
||||
colortable = [rgb(*palette[i : i + 3]) for i in range(0, len(palette), 3)]
|
||||
elif im.mode == "RGB":
|
||||
# Populate the 4th channel with 255
|
||||
im = im.convert("RGBA")
|
||||
|
||||
data = im.tobytes("raw", "BGRA")
|
||||
format = qt_format.Format_RGB32
|
||||
elif im.mode == "RGBA":
|
||||
data = im.tobytes("raw", "BGRA")
|
||||
format = qt_format.Format_ARGB32
|
||||
elif im.mode == "I;16" and hasattr(qt_format, "Format_Grayscale16"): # Qt 5.13+
|
||||
im = im.point(lambda i: i * 256)
|
||||
|
||||
format = qt_format.Format_Grayscale16
|
||||
else:
|
||||
if exclusive_fp:
|
||||
im.close()
|
||||
msg = f"unsupported image mode {repr(im.mode)}"
|
||||
raise ValueError(msg)
|
||||
|
||||
size = im.size
|
||||
__data = data or align8to32(im.tobytes(), size[0], im.mode)
|
||||
if exclusive_fp:
|
||||
im.close()
|
||||
return {"data": __data, "size": size, "format": format, "colortable": colortable}
|
||||
|
||||
|
||||
if qt_is_installed:
|
||||
|
||||
class ImageQt(QImage):
|
||||
def __init__(self, im):
|
||||
"""
|
||||
An PIL image wrapper for Qt. This is a subclass of PyQt's QImage
|
||||
class.
|
||||
|
||||
:param im: A PIL Image object, or a file name (given either as
|
||||
Python string or a PyQt string object).
|
||||
"""
|
||||
im_data = _toqclass_helper(im)
|
||||
# must keep a reference, or Qt will crash!
|
||||
# All QImage constructors that take data operate on an existing
|
||||
# buffer, so this buffer has to hang on for the life of the image.
|
||||
# Fixes https://github.com/python-pillow/Pillow/issues/1370
|
||||
self.__data = im_data["data"]
|
||||
super().__init__(
|
||||
self.__data,
|
||||
im_data["size"][0],
|
||||
im_data["size"][1],
|
||||
im_data["format"],
|
||||
)
|
||||
if im_data["colortable"]:
|
||||
self.setColorTable(im_data["colortable"])
|
||||
|
||||
|
||||
def toqimage(im):
|
||||
return ImageQt(im)
|
||||
|
||||
|
||||
def toqpixmap(im):
|
||||
qimage = toqimage(im)
|
||||
return QPixmap.fromImage(qimage)
|
@ -1,86 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# sequence support classes
|
||||
#
|
||||
# history:
|
||||
# 1997-02-20 fl Created
|
||||
#
|
||||
# Copyright (c) 1997 by Secret Labs AB.
|
||||
# Copyright (c) 1997 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
##
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Callable
|
||||
|
||||
from . import Image
|
||||
|
||||
|
||||
class Iterator:
|
||||
"""
|
||||
This class implements an iterator object that can be used to loop
|
||||
over an image sequence.
|
||||
|
||||
You can use the ``[]`` operator to access elements by index. This operator
|
||||
will raise an :py:exc:`IndexError` if you try to access a nonexistent
|
||||
frame.
|
||||
|
||||
:param im: An image object.
|
||||
"""
|
||||
|
||||
def __init__(self, im: Image.Image):
|
||||
if not hasattr(im, "seek"):
|
||||
msg = "im must have seek method"
|
||||
raise AttributeError(msg)
|
||||
self.im = im
|
||||
self.position = getattr(self.im, "_min_frame", 0)
|
||||
|
||||
def __getitem__(self, ix: int) -> Image.Image:
|
||||
try:
|
||||
self.im.seek(ix)
|
||||
return self.im
|
||||
except EOFError as e:
|
||||
msg = "end of sequence"
|
||||
raise IndexError(msg) from e
|
||||
|
||||
def __iter__(self) -> Iterator:
|
||||
return self
|
||||
|
||||
def __next__(self) -> Image.Image:
|
||||
try:
|
||||
self.im.seek(self.position)
|
||||
self.position += 1
|
||||
return self.im
|
||||
except EOFError as e:
|
||||
msg = "end of sequence"
|
||||
raise StopIteration(msg) from e
|
||||
|
||||
|
||||
def all_frames(
|
||||
im: Image.Image | list[Image.Image],
|
||||
func: Callable[[Image.Image], Image.Image] | None = None,
|
||||
) -> list[Image.Image]:
|
||||
"""
|
||||
Applies a given function to all frames in an image or a list of images.
|
||||
The frames are returned as a list of separate images.
|
||||
|
||||
:param im: An image, or a list of images.
|
||||
:param func: The function to apply to all of the image frames.
|
||||
:returns: A list of images.
|
||||
"""
|
||||
if not isinstance(im, list):
|
||||
im = [im]
|
||||
|
||||
ims = []
|
||||
for imSequence in im:
|
||||
current = imSequence.tell()
|
||||
|
||||
ims += [im_frame.copy() for im_frame in Iterator(imSequence)]
|
||||
|
||||
imSequence.seek(current)
|
||||
return [func(im) for im in ims] if func else ims
|
@ -1,326 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# im.show() drivers
|
||||
#
|
||||
# History:
|
||||
# 2008-04-06 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 2008.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from shlex import quote
|
||||
|
||||
from . import Image
|
||||
|
||||
_viewers = []
|
||||
|
||||
|
||||
def register(viewer, order=1):
|
||||
"""
|
||||
The :py:func:`register` function is used to register additional viewers::
|
||||
|
||||
from PIL import ImageShow
|
||||
ImageShow.register(MyViewer()) # MyViewer will be used as a last resort
|
||||
ImageShow.register(MySecondViewer(), 0) # MySecondViewer will be prioritised
|
||||
ImageShow.register(ImageShow.XVViewer(), 0) # XVViewer will be prioritised
|
||||
|
||||
:param viewer: The viewer to be registered.
|
||||
:param order:
|
||||
Zero or a negative integer to prepend this viewer to the list,
|
||||
a positive integer to append it.
|
||||
"""
|
||||
try:
|
||||
if issubclass(viewer, Viewer):
|
||||
viewer = viewer()
|
||||
except TypeError:
|
||||
pass # raised if viewer wasn't a class
|
||||
if order > 0:
|
||||
_viewers.append(viewer)
|
||||
else:
|
||||
_viewers.insert(0, viewer)
|
||||
|
||||
|
||||
def show(image, title=None, **options):
|
||||
r"""
|
||||
Display a given image.
|
||||
|
||||
:param image: An image object.
|
||||
:param title: Optional title. Not all viewers can display the title.
|
||||
:param \**options: Additional viewer options.
|
||||
:returns: ``True`` if a suitable viewer was found, ``False`` otherwise.
|
||||
"""
|
||||
for viewer in _viewers:
|
||||
if viewer.show(image, title=title, **options):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class Viewer:
|
||||
"""Base class for viewers."""
|
||||
|
||||
# main api
|
||||
|
||||
def show(self, image, **options):
|
||||
"""
|
||||
The main function for displaying an image.
|
||||
Converts the given image to the target format and displays it.
|
||||
"""
|
||||
|
||||
if not (
|
||||
image.mode in ("1", "RGBA")
|
||||
or (self.format == "PNG" and image.mode in ("I;16", "LA"))
|
||||
):
|
||||
base = Image.getmodebase(image.mode)
|
||||
if image.mode != base:
|
||||
image = image.convert(base)
|
||||
|
||||
return self.show_image(image, **options)
|
||||
|
||||
# hook methods
|
||||
|
||||
format = None
|
||||
"""The format to convert the image into."""
|
||||
options = {}
|
||||
"""Additional options used to convert the image."""
|
||||
|
||||
def get_format(self, image):
|
||||
"""Return format name, or ``None`` to save as PGM/PPM."""
|
||||
return self.format
|
||||
|
||||
def get_command(self, file, **options):
|
||||
"""
|
||||
Returns the command used to display the file.
|
||||
Not implemented in the base class.
|
||||
"""
|
||||
msg = "unavailable in base viewer"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def save_image(self, image):
|
||||
"""Save to temporary file and return filename."""
|
||||
return image._dump(format=self.get_format(image), **self.options)
|
||||
|
||||
def show_image(self, image, **options):
|
||||
"""Display the given image."""
|
||||
return self.show_file(self.save_image(image), **options)
|
||||
|
||||
def show_file(self, path, **options):
|
||||
"""
|
||||
Display given file.
|
||||
"""
|
||||
os.system(self.get_command(path, **options)) # nosec
|
||||
return 1
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
class WindowsViewer(Viewer):
|
||||
"""The default viewer on Windows is the default system application for PNG files."""
|
||||
|
||||
format = "PNG"
|
||||
options = {"compress_level": 1, "save_all": True}
|
||||
|
||||
def get_command(self, file, **options):
|
||||
return (
|
||||
f'start "Pillow" /WAIT "{file}" '
|
||||
"&& ping -n 4 127.0.0.1 >NUL "
|
||||
f'&& del /f "{file}"'
|
||||
)
|
||||
|
||||
|
||||
if sys.platform == "win32":
|
||||
register(WindowsViewer)
|
||||
|
||||
|
||||
class MacViewer(Viewer):
|
||||
"""The default viewer on macOS using ``Preview.app``."""
|
||||
|
||||
format = "PNG"
|
||||
options = {"compress_level": 1, "save_all": True}
|
||||
|
||||
def get_command(self, file, **options):
|
||||
# on darwin open returns immediately resulting in the temp
|
||||
# file removal while app is opening
|
||||
command = "open -a Preview.app"
|
||||
command = f"({command} {quote(file)}; sleep 20; rm -f {quote(file)})&"
|
||||
return command
|
||||
|
||||
def show_file(self, path, **options):
|
||||
"""
|
||||
Display given file.
|
||||
"""
|
||||
subprocess.call(["open", "-a", "Preview.app", path])
|
||||
executable = sys.executable or shutil.which("python3")
|
||||
if executable:
|
||||
subprocess.Popen(
|
||||
[
|
||||
executable,
|
||||
"-c",
|
||||
"import os, sys, time; time.sleep(20); os.remove(sys.argv[1])",
|
||||
path,
|
||||
]
|
||||
)
|
||||
return 1
|
||||
|
||||
|
||||
if sys.platform == "darwin":
|
||||
register(MacViewer)
|
||||
|
||||
|
||||
class UnixViewer(Viewer):
|
||||
format = "PNG"
|
||||
options = {"compress_level": 1, "save_all": True}
|
||||
|
||||
def get_command(self, file, **options):
|
||||
command = self.get_command_ex(file, **options)[0]
|
||||
return f"({command} {quote(file)}"
|
||||
|
||||
|
||||
class XDGViewer(UnixViewer):
|
||||
"""
|
||||
The freedesktop.org ``xdg-open`` command.
|
||||
"""
|
||||
|
||||
def get_command_ex(self, file, **options):
|
||||
command = executable = "xdg-open"
|
||||
return command, executable
|
||||
|
||||
def show_file(self, path, **options):
|
||||
"""
|
||||
Display given file.
|
||||
"""
|
||||
subprocess.Popen(["xdg-open", path])
|
||||
return 1
|
||||
|
||||
|
||||
class DisplayViewer(UnixViewer):
|
||||
"""
|
||||
The ImageMagick ``display`` command.
|
||||
This viewer supports the ``title`` parameter.
|
||||
"""
|
||||
|
||||
def get_command_ex(self, file, title=None, **options):
|
||||
command = executable = "display"
|
||||
if title:
|
||||
command += f" -title {quote(title)}"
|
||||
return command, executable
|
||||
|
||||
def show_file(self, path, **options):
|
||||
"""
|
||||
Display given file.
|
||||
"""
|
||||
args = ["display"]
|
||||
title = options.get("title")
|
||||
if title:
|
||||
args += ["-title", title]
|
||||
args.append(path)
|
||||
|
||||
subprocess.Popen(args)
|
||||
return 1
|
||||
|
||||
|
||||
class GmDisplayViewer(UnixViewer):
|
||||
"""The GraphicsMagick ``gm display`` command."""
|
||||
|
||||
def get_command_ex(self, file, **options):
|
||||
executable = "gm"
|
||||
command = "gm display"
|
||||
return command, executable
|
||||
|
||||
def show_file(self, path, **options):
|
||||
"""
|
||||
Display given file.
|
||||
"""
|
||||
subprocess.Popen(["gm", "display", path])
|
||||
return 1
|
||||
|
||||
|
||||
class EogViewer(UnixViewer):
|
||||
"""The GNOME Image Viewer ``eog`` command."""
|
||||
|
||||
def get_command_ex(self, file, **options):
|
||||
executable = "eog"
|
||||
command = "eog -n"
|
||||
return command, executable
|
||||
|
||||
def show_file(self, path, **options):
|
||||
"""
|
||||
Display given file.
|
||||
"""
|
||||
subprocess.Popen(["eog", "-n", path])
|
||||
return 1
|
||||
|
||||
|
||||
class XVViewer(UnixViewer):
|
||||
"""
|
||||
The X Viewer ``xv`` command.
|
||||
This viewer supports the ``title`` parameter.
|
||||
"""
|
||||
|
||||
def get_command_ex(self, file, title=None, **options):
|
||||
# note: xv is pretty outdated. most modern systems have
|
||||
# imagemagick's display command instead.
|
||||
command = executable = "xv"
|
||||
if title:
|
||||
command += f" -name {quote(title)}"
|
||||
return command, executable
|
||||
|
||||
def show_file(self, path, **options):
|
||||
"""
|
||||
Display given file.
|
||||
"""
|
||||
args = ["xv"]
|
||||
title = options.get("title")
|
||||
if title:
|
||||
args += ["-name", title]
|
||||
args.append(path)
|
||||
|
||||
subprocess.Popen(args)
|
||||
return 1
|
||||
|
||||
|
||||
if sys.platform not in ("win32", "darwin"): # unixoids
|
||||
if shutil.which("xdg-open"):
|
||||
register(XDGViewer)
|
||||
if shutil.which("display"):
|
||||
register(DisplayViewer)
|
||||
if shutil.which("gm"):
|
||||
register(GmDisplayViewer)
|
||||
if shutil.which("eog"):
|
||||
register(EogViewer)
|
||||
if shutil.which("xv"):
|
||||
register(XVViewer)
|
||||
|
||||
|
||||
class IPythonViewer(Viewer):
|
||||
"""The viewer for IPython frontends."""
|
||||
|
||||
def show_image(self, image, **options):
|
||||
ipython_display(image)
|
||||
return 1
|
||||
|
||||
|
||||
try:
|
||||
from IPython.display import display as ipython_display
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
register(IPythonViewer)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Syntax: python3 ImageShow.py imagefile [title]")
|
||||
sys.exit()
|
||||
|
||||
with Image.open(sys.argv[1]) as im:
|
||||
print(show(im, *sys.argv[2:]))
|
@ -1,129 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# global image statistics
|
||||
#
|
||||
# History:
|
||||
# 1996-04-05 fl Created
|
||||
# 1997-05-21 fl Added mask; added rms, var, stddev attributes
|
||||
# 1997-08-05 fl Added median
|
||||
# 1998-07-05 hk Fixed integer overflow error
|
||||
#
|
||||
# Notes:
|
||||
# This class shows how to implement delayed evaluation of attributes.
|
||||
# To get a certain value, simply access the corresponding attribute.
|
||||
# The __getattr__ dispatcher takes care of the rest.
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996-97.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
|
||||
|
||||
class Stat:
|
||||
def __init__(self, image_or_list, mask=None):
|
||||
try:
|
||||
if mask:
|
||||
self.h = image_or_list.histogram(mask)
|
||||
else:
|
||||
self.h = image_or_list.histogram()
|
||||
except AttributeError:
|
||||
self.h = image_or_list # assume it to be a histogram list
|
||||
if not isinstance(self.h, list):
|
||||
msg = "first argument must be image or list"
|
||||
raise TypeError(msg)
|
||||
self.bands = list(range(len(self.h) // 256))
|
||||
|
||||
def __getattr__(self, id):
|
||||
"""Calculate missing attribute"""
|
||||
if id[:4] == "_get":
|
||||
raise AttributeError(id)
|
||||
# calculate missing attribute
|
||||
v = getattr(self, "_get" + id)()
|
||||
setattr(self, id, v)
|
||||
return v
|
||||
|
||||
def _getextrema(self):
|
||||
"""Get min/max values for each band in the image"""
|
||||
|
||||
def minmax(histogram):
|
||||
res_min, res_max = 255, 0
|
||||
for i in range(256):
|
||||
if histogram[i]:
|
||||
res_min = i
|
||||
break
|
||||
for i in range(255, -1, -1):
|
||||
if histogram[i]:
|
||||
res_max = i
|
||||
break
|
||||
return res_min, res_max
|
||||
|
||||
return [minmax(self.h[i:]) for i in range(0, len(self.h), 256)]
|
||||
|
||||
def _getcount(self):
|
||||
"""Get total number of pixels in each layer"""
|
||||
return [sum(self.h[i : i + 256]) for i in range(0, len(self.h), 256)]
|
||||
|
||||
def _getsum(self):
|
||||
"""Get sum of all pixels in each layer"""
|
||||
|
||||
v = []
|
||||
for i in range(0, len(self.h), 256):
|
||||
layer_sum = 0.0
|
||||
for j in range(256):
|
||||
layer_sum += j * self.h[i + j]
|
||||
v.append(layer_sum)
|
||||
return v
|
||||
|
||||
def _getsum2(self):
|
||||
"""Get squared sum of all pixels in each layer"""
|
||||
|
||||
v = []
|
||||
for i in range(0, len(self.h), 256):
|
||||
sum2 = 0.0
|
||||
for j in range(256):
|
||||
sum2 += (j**2) * float(self.h[i + j])
|
||||
v.append(sum2)
|
||||
return v
|
||||
|
||||
def _getmean(self):
|
||||
"""Get average pixel level for each layer"""
|
||||
return [self.sum[i] / self.count[i] for i in self.bands]
|
||||
|
||||
def _getmedian(self):
|
||||
"""Get median pixel level for each layer"""
|
||||
|
||||
v = []
|
||||
for i in self.bands:
|
||||
s = 0
|
||||
half = self.count[i] // 2
|
||||
b = i * 256
|
||||
for j in range(256):
|
||||
s = s + self.h[b + j]
|
||||
if s > half:
|
||||
break
|
||||
v.append(j)
|
||||
return v
|
||||
|
||||
def _getrms(self):
|
||||
"""Get RMS for each layer"""
|
||||
return [math.sqrt(self.sum2[i] / self.count[i]) for i in self.bands]
|
||||
|
||||
def _getvar(self):
|
||||
"""Get variance for each layer"""
|
||||
return [
|
||||
(self.sum2[i] - (self.sum[i] ** 2.0) / self.count[i]) / self.count[i]
|
||||
for i in self.bands
|
||||
]
|
||||
|
||||
def _getstddev(self):
|
||||
"""Get standard deviation for each layer"""
|
||||
return [math.sqrt(self.var[i]) for i in self.bands]
|
||||
|
||||
|
||||
Global = Stat # compatibility
|
@ -1,284 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# a Tk display interface
|
||||
#
|
||||
# History:
|
||||
# 96-04-08 fl Created
|
||||
# 96-09-06 fl Added getimage method
|
||||
# 96-11-01 fl Rewritten, removed image attribute and crop method
|
||||
# 97-05-09 fl Use PyImagingPaste method instead of image type
|
||||
# 97-05-12 fl Minor tweaks to match the IFUNC95 interface
|
||||
# 97-05-17 fl Support the "pilbitmap" booster patch
|
||||
# 97-06-05 fl Added file= and data= argument to image constructors
|
||||
# 98-03-09 fl Added width and height methods to Image classes
|
||||
# 98-07-02 fl Use default mode for "P" images without palette attribute
|
||||
# 98-07-02 fl Explicitly destroy Tkinter image objects
|
||||
# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)
|
||||
# 99-07-26 fl Automatically hook into Tkinter (if possible)
|
||||
# 99-08-15 fl Hook uses _imagingtk instead of _imaging
|
||||
#
|
||||
# Copyright (c) 1997-1999 by Secret Labs AB
|
||||
# Copyright (c) 1996-1997 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import tkinter
|
||||
from io import BytesIO
|
||||
|
||||
from . import Image
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Check for Tkinter interface hooks
|
||||
|
||||
_pilbitmap_ok = None
|
||||
|
||||
|
||||
def _pilbitmap_check():
|
||||
global _pilbitmap_ok
|
||||
if _pilbitmap_ok is None:
|
||||
try:
|
||||
im = Image.new("1", (1, 1))
|
||||
tkinter.BitmapImage(data=f"PIL:{im.im.id}")
|
||||
_pilbitmap_ok = 1
|
||||
except tkinter.TclError:
|
||||
_pilbitmap_ok = 0
|
||||
return _pilbitmap_ok
|
||||
|
||||
|
||||
def _get_image_from_kw(kw):
|
||||
source = None
|
||||
if "file" in kw:
|
||||
source = kw.pop("file")
|
||||
elif "data" in kw:
|
||||
source = BytesIO(kw.pop("data"))
|
||||
if source:
|
||||
return Image.open(source)
|
||||
|
||||
|
||||
def _pyimagingtkcall(command, photo, id):
|
||||
tk = photo.tk
|
||||
try:
|
||||
tk.call(command, photo, id)
|
||||
except tkinter.TclError:
|
||||
# activate Tkinter hook
|
||||
# may raise an error if it cannot attach to Tkinter
|
||||
from . import _imagingtk
|
||||
|
||||
_imagingtk.tkinit(tk.interpaddr())
|
||||
tk.call(command, photo, id)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# PhotoImage
|
||||
|
||||
|
||||
class PhotoImage:
|
||||
"""
|
||||
A Tkinter-compatible photo image. This can be used
|
||||
everywhere Tkinter expects an image object. If the image is an RGBA
|
||||
image, pixels having alpha 0 are treated as transparent.
|
||||
|
||||
The constructor takes either a PIL image, or a mode and a size.
|
||||
Alternatively, you can use the ``file`` or ``data`` options to initialize
|
||||
the photo image object.
|
||||
|
||||
:param image: Either a PIL image, or a mode string. If a mode string is
|
||||
used, a size must also be given.
|
||||
:param size: If the first argument is a mode string, this defines the size
|
||||
of the image.
|
||||
:keyword file: A filename to load the image from (using
|
||||
``Image.open(file)``).
|
||||
:keyword data: An 8-bit string containing image data (as loaded from an
|
||||
image file).
|
||||
"""
|
||||
|
||||
def __init__(self, image=None, size=None, **kw):
|
||||
# Tk compatibility: file or data
|
||||
if image is None:
|
||||
image = _get_image_from_kw(kw)
|
||||
|
||||
if hasattr(image, "mode") and hasattr(image, "size"):
|
||||
# got an image instead of a mode
|
||||
mode = image.mode
|
||||
if mode == "P":
|
||||
# palette mapped data
|
||||
image.apply_transparency()
|
||||
image.load()
|
||||
try:
|
||||
mode = image.palette.mode
|
||||
except AttributeError:
|
||||
mode = "RGB" # default
|
||||
size = image.size
|
||||
kw["width"], kw["height"] = size
|
||||
else:
|
||||
mode = image
|
||||
image = None
|
||||
|
||||
if mode not in ["1", "L", "RGB", "RGBA"]:
|
||||
mode = Image.getmodebase(mode)
|
||||
|
||||
self.__mode = mode
|
||||
self.__size = size
|
||||
self.__photo = tkinter.PhotoImage(**kw)
|
||||
self.tk = self.__photo.tk
|
||||
if image:
|
||||
self.paste(image)
|
||||
|
||||
def __del__(self):
|
||||
name = self.__photo.name
|
||||
self.__photo.name = None
|
||||
try:
|
||||
self.__photo.tk.call("image", "delete", name)
|
||||
except Exception:
|
||||
pass # ignore internal errors
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Get the Tkinter photo image identifier. This method is automatically
|
||||
called by Tkinter whenever a PhotoImage object is passed to a Tkinter
|
||||
method.
|
||||
|
||||
:return: A Tkinter photo image identifier (a string).
|
||||
"""
|
||||
return str(self.__photo)
|
||||
|
||||
def width(self):
|
||||
"""
|
||||
Get the width of the image.
|
||||
|
||||
:return: The width, in pixels.
|
||||
"""
|
||||
return self.__size[0]
|
||||
|
||||
def height(self):
|
||||
"""
|
||||
Get the height of the image.
|
||||
|
||||
:return: The height, in pixels.
|
||||
"""
|
||||
return self.__size[1]
|
||||
|
||||
def paste(self, im):
|
||||
"""
|
||||
Paste a PIL image into the photo image. Note that this can
|
||||
be very slow if the photo image is displayed.
|
||||
|
||||
:param im: A PIL image. The size must match the target region. If the
|
||||
mode does not match, the image is converted to the mode of
|
||||
the bitmap image.
|
||||
"""
|
||||
# convert to blittable
|
||||
im.load()
|
||||
image = im.im
|
||||
if image.isblock() and im.mode == self.__mode:
|
||||
block = image
|
||||
else:
|
||||
block = image.new_block(self.__mode, im.size)
|
||||
image.convert2(block, image) # convert directly between buffers
|
||||
|
||||
_pyimagingtkcall("PyImagingPhoto", self.__photo, block.id)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# BitmapImage
|
||||
|
||||
|
||||
class BitmapImage:
|
||||
"""
|
||||
A Tkinter-compatible bitmap image. This can be used everywhere Tkinter
|
||||
expects an image object.
|
||||
|
||||
The given image must have mode "1". Pixels having value 0 are treated as
|
||||
transparent. Options, if any, are passed on to Tkinter. The most commonly
|
||||
used option is ``foreground``, which is used to specify the color for the
|
||||
non-transparent parts. See the Tkinter documentation for information on
|
||||
how to specify colours.
|
||||
|
||||
:param image: A PIL image.
|
||||
"""
|
||||
|
||||
def __init__(self, image=None, **kw):
|
||||
# Tk compatibility: file or data
|
||||
if image is None:
|
||||
image = _get_image_from_kw(kw)
|
||||
|
||||
self.__mode = image.mode
|
||||
self.__size = image.size
|
||||
|
||||
if _pilbitmap_check():
|
||||
# fast way (requires the pilbitmap booster patch)
|
||||
image.load()
|
||||
kw["data"] = f"PIL:{image.im.id}"
|
||||
self.__im = image # must keep a reference
|
||||
else:
|
||||
# slow but safe way
|
||||
kw["data"] = image.tobitmap()
|
||||
self.__photo = tkinter.BitmapImage(**kw)
|
||||
|
||||
def __del__(self):
|
||||
name = self.__photo.name
|
||||
self.__photo.name = None
|
||||
try:
|
||||
self.__photo.tk.call("image", "delete", name)
|
||||
except Exception:
|
||||
pass # ignore internal errors
|
||||
|
||||
def width(self):
|
||||
"""
|
||||
Get the width of the image.
|
||||
|
||||
:return: The width, in pixels.
|
||||
"""
|
||||
return self.__size[0]
|
||||
|
||||
def height(self):
|
||||
"""
|
||||
Get the height of the image.
|
||||
|
||||
:return: The height, in pixels.
|
||||
"""
|
||||
return self.__size[1]
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Get the Tkinter bitmap image identifier. This method is automatically
|
||||
called by Tkinter whenever a BitmapImage object is passed to a Tkinter
|
||||
method.
|
||||
|
||||
:return: A Tkinter bitmap image identifier (a string).
|
||||
"""
|
||||
return str(self.__photo)
|
||||
|
||||
|
||||
def getimage(photo):
|
||||
"""Copies the contents of a PhotoImage to a PIL image memory."""
|
||||
im = Image.new("RGBA", (photo.width(), photo.height()))
|
||||
block = im.im
|
||||
|
||||
_pyimagingtkcall("PyImagingPhotoGet", photo, block.id)
|
||||
|
||||
return im
|
||||
|
||||
|
||||
def _show(image, title):
|
||||
"""Helper for the Image.show method."""
|
||||
|
||||
class UI(tkinter.Label):
|
||||
def __init__(self, master, im):
|
||||
if im.mode == "1":
|
||||
self.image = BitmapImage(im, foreground="white", master=master)
|
||||
else:
|
||||
self.image = PhotoImage(im, master=master)
|
||||
super().__init__(master, image=self.image, bg="black", bd=0)
|
||||
|
||||
if not tkinter._default_root:
|
||||
msg = "tkinter not initialized"
|
||||
raise OSError(msg)
|
||||
top = tkinter.Toplevel()
|
||||
if title:
|
||||
top.title(title)
|
||||
UI(top, image).pack()
|
@ -1,112 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# transform wrappers
|
||||
#
|
||||
# History:
|
||||
# 2002-04-08 fl Created
|
||||
#
|
||||
# Copyright (c) 2002 by Secret Labs AB
|
||||
# Copyright (c) 2002 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Sequence
|
||||
|
||||
from . import Image
|
||||
|
||||
|
||||
class Transform(Image.ImageTransformHandler):
|
||||
method: Image.Transform
|
||||
|
||||
def __init__(self, data: Sequence[int]) -> None:
|
||||
self.data = data
|
||||
|
||||
def getdata(self) -> tuple[int, Sequence[int]]:
|
||||
return self.method, self.data
|
||||
|
||||
def transform(
|
||||
self,
|
||||
size: tuple[int, int],
|
||||
image: Image.Image,
|
||||
**options: dict[str, str | int | tuple[int, ...] | list[int]],
|
||||
) -> Image.Image:
|
||||
# can be overridden
|
||||
method, data = self.getdata()
|
||||
return image.transform(size, method, data, **options)
|
||||
|
||||
|
||||
class AffineTransform(Transform):
|
||||
"""
|
||||
Define an affine image transform.
|
||||
|
||||
This function takes a 6-tuple (a, b, c, d, e, f) which contain the first
|
||||
two rows from an affine transform matrix. For each pixel (x, y) in the
|
||||
output image, the new value is taken from a position (a x + b y + c,
|
||||
d x + e y + f) in the input image, rounded to nearest pixel.
|
||||
|
||||
This function can be used to scale, translate, rotate, and shear the
|
||||
original image.
|
||||
|
||||
See :py:meth:`~PIL.Image.Image.transform`
|
||||
|
||||
:param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows
|
||||
from an affine transform matrix.
|
||||
"""
|
||||
|
||||
method = Image.Transform.AFFINE
|
||||
|
||||
|
||||
class ExtentTransform(Transform):
|
||||
"""
|
||||
Define a transform to extract a subregion from an image.
|
||||
|
||||
Maps a rectangle (defined by two corners) from the image to a rectangle of
|
||||
the given size. The resulting image will contain data sampled from between
|
||||
the corners, such that (x0, y0) in the input image will end up at (0,0) in
|
||||
the output image, and (x1, y1) at size.
|
||||
|
||||
This method can be used to crop, stretch, shrink, or mirror an arbitrary
|
||||
rectangle in the current image. It is slightly slower than crop, but about
|
||||
as fast as a corresponding resize operation.
|
||||
|
||||
See :py:meth:`~PIL.Image.Image.transform`
|
||||
|
||||
:param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the
|
||||
input image's coordinate system. See :ref:`coordinate-system`.
|
||||
"""
|
||||
|
||||
method = Image.Transform.EXTENT
|
||||
|
||||
|
||||
class QuadTransform(Transform):
|
||||
"""
|
||||
Define a quad image transform.
|
||||
|
||||
Maps a quadrilateral (a region defined by four corners) from the image to a
|
||||
rectangle of the given size.
|
||||
|
||||
See :py:meth:`~PIL.Image.Image.transform`
|
||||
|
||||
:param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the
|
||||
upper left, lower left, lower right, and upper right corner of the
|
||||
source quadrilateral.
|
||||
"""
|
||||
|
||||
method = Image.Transform.QUAD
|
||||
|
||||
|
||||
class MeshTransform(Transform):
|
||||
"""
|
||||
Define a mesh image transform. A mesh transform consists of one or more
|
||||
individual quad transforms.
|
||||
|
||||
See :py:meth:`~PIL.Image.Image.transform`
|
||||
|
||||
:param data: A list of (bbox, quad) tuples.
|
||||
"""
|
||||
|
||||
method = Image.Transform.MESH
|
@ -1,231 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# a Windows DIB display interface
|
||||
#
|
||||
# History:
|
||||
# 1996-05-20 fl Created
|
||||
# 1996-09-20 fl Fixed subregion exposure
|
||||
# 1997-09-21 fl Added draw primitive (for tzPrint)
|
||||
# 2003-05-21 fl Added experimental Window/ImageWindow classes
|
||||
# 2003-09-05 fl Added fromstring/tostring methods
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-2003.
|
||||
# Copyright (c) Fredrik Lundh 1996-2003.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image
|
||||
|
||||
|
||||
class HDC:
|
||||
"""
|
||||
Wraps an HDC integer. The resulting object can be passed to the
|
||||
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
|
||||
methods.
|
||||
"""
|
||||
|
||||
def __init__(self, dc):
|
||||
self.dc = dc
|
||||
|
||||
def __int__(self):
|
||||
return self.dc
|
||||
|
||||
|
||||
class HWND:
|
||||
"""
|
||||
Wraps an HWND integer. The resulting object can be passed to the
|
||||
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
|
||||
methods, instead of a DC.
|
||||
"""
|
||||
|
||||
def __init__(self, wnd):
|
||||
self.wnd = wnd
|
||||
|
||||
def __int__(self):
|
||||
return self.wnd
|
||||
|
||||
|
||||
class Dib:
|
||||
"""
|
||||
A Windows bitmap with the given mode and size. The mode can be one of "1",
|
||||
"L", "P", or "RGB".
|
||||
|
||||
If the display requires a palette, this constructor creates a suitable
|
||||
palette and associates it with the image. For an "L" image, 128 graylevels
|
||||
are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together
|
||||
with 20 graylevels.
|
||||
|
||||
To make sure that palettes work properly under Windows, you must call the
|
||||
``palette`` method upon certain events from Windows.
|
||||
|
||||
:param image: Either a PIL image, or a mode string. If a mode string is
|
||||
used, a size must also be given. The mode can be one of "1",
|
||||
"L", "P", or "RGB".
|
||||
:param size: If the first argument is a mode string, this
|
||||
defines the size of the image.
|
||||
"""
|
||||
|
||||
def __init__(self, image, size=None):
|
||||
if hasattr(image, "mode") and hasattr(image, "size"):
|
||||
mode = image.mode
|
||||
size = image.size
|
||||
else:
|
||||
mode = image
|
||||
image = None
|
||||
if mode not in ["1", "L", "P", "RGB"]:
|
||||
mode = Image.getmodebase(mode)
|
||||
self.image = Image.core.display(mode, size)
|
||||
self.mode = mode
|
||||
self.size = size
|
||||
if image:
|
||||
self.paste(image)
|
||||
|
||||
def expose(self, handle):
|
||||
"""
|
||||
Copy the bitmap contents to a device context.
|
||||
|
||||
:param handle: Device context (HDC), cast to a Python integer, or an
|
||||
HDC or HWND instance. In PythonWin, you can use
|
||||
``CDC.GetHandleAttrib()`` to get a suitable handle.
|
||||
"""
|
||||
if isinstance(handle, HWND):
|
||||
dc = self.image.getdc(handle)
|
||||
try:
|
||||
result = self.image.expose(dc)
|
||||
finally:
|
||||
self.image.releasedc(handle, dc)
|
||||
else:
|
||||
result = self.image.expose(handle)
|
||||
return result
|
||||
|
||||
def draw(self, handle, dst, src=None):
|
||||
"""
|
||||
Same as expose, but allows you to specify where to draw the image, and
|
||||
what part of it to draw.
|
||||
|
||||
The destination and source areas are given as 4-tuple rectangles. If
|
||||
the source is omitted, the entire image is copied. If the source and
|
||||
the destination have different sizes, the image is resized as
|
||||
necessary.
|
||||
"""
|
||||
if not src:
|
||||
src = (0, 0) + self.size
|
||||
if isinstance(handle, HWND):
|
||||
dc = self.image.getdc(handle)
|
||||
try:
|
||||
result = self.image.draw(dc, dst, src)
|
||||
finally:
|
||||
self.image.releasedc(handle, dc)
|
||||
else:
|
||||
result = self.image.draw(handle, dst, src)
|
||||
return result
|
||||
|
||||
def query_palette(self, handle):
|
||||
"""
|
||||
Installs the palette associated with the image in the given device
|
||||
context.
|
||||
|
||||
This method should be called upon **QUERYNEWPALETTE** and
|
||||
**PALETTECHANGED** events from Windows. If this method returns a
|
||||
non-zero value, one or more display palette entries were changed, and
|
||||
the image should be redrawn.
|
||||
|
||||
:param handle: Device context (HDC), cast to a Python integer, or an
|
||||
HDC or HWND instance.
|
||||
:return: A true value if one or more entries were changed (this
|
||||
indicates that the image should be redrawn).
|
||||
"""
|
||||
if isinstance(handle, HWND):
|
||||
handle = self.image.getdc(handle)
|
||||
try:
|
||||
result = self.image.query_palette(handle)
|
||||
finally:
|
||||
self.image.releasedc(handle, handle)
|
||||
else:
|
||||
result = self.image.query_palette(handle)
|
||||
return result
|
||||
|
||||
def paste(self, im, box=None):
|
||||
"""
|
||||
Paste a PIL image into the bitmap image.
|
||||
|
||||
:param im: A PIL image. The size must match the target region.
|
||||
If the mode does not match, the image is converted to the
|
||||
mode of the bitmap image.
|
||||
:param box: A 4-tuple defining the left, upper, right, and
|
||||
lower pixel coordinate. See :ref:`coordinate-system`. If
|
||||
None is given instead of a tuple, all of the image is
|
||||
assumed.
|
||||
"""
|
||||
im.load()
|
||||
if self.mode != im.mode:
|
||||
im = im.convert(self.mode)
|
||||
if box:
|
||||
self.image.paste(im.im, box)
|
||||
else:
|
||||
self.image.paste(im.im)
|
||||
|
||||
def frombytes(self, buffer):
|
||||
"""
|
||||
Load display memory contents from byte data.
|
||||
|
||||
:param buffer: A buffer containing display data (usually
|
||||
data returned from :py:func:`~PIL.ImageWin.Dib.tobytes`)
|
||||
"""
|
||||
return self.image.frombytes(buffer)
|
||||
|
||||
def tobytes(self):
|
||||
"""
|
||||
Copy display memory contents to bytes object.
|
||||
|
||||
:return: A bytes object containing display data.
|
||||
"""
|
||||
return self.image.tobytes()
|
||||
|
||||
|
||||
class Window:
|
||||
"""Create a Window with the given title size."""
|
||||
|
||||
def __init__(self, title="PIL", width=None, height=None):
|
||||
self.hwnd = Image.core.createwindow(
|
||||
title, self.__dispatcher, width or 0, height or 0
|
||||
)
|
||||
|
||||
def __dispatcher(self, action, *args):
|
||||
return getattr(self, "ui_handle_" + action)(*args)
|
||||
|
||||
def ui_handle_clear(self, dc, x0, y0, x1, y1):
|
||||
pass
|
||||
|
||||
def ui_handle_damage(self, x0, y0, x1, y1):
|
||||
pass
|
||||
|
||||
def ui_handle_destroy(self):
|
||||
pass
|
||||
|
||||
def ui_handle_repair(self, dc, x0, y0, x1, y1):
|
||||
pass
|
||||
|
||||
def ui_handle_resize(self, width, height):
|
||||
pass
|
||||
|
||||
def mainloop(self):
|
||||
Image.core.eventloop()
|
||||
|
||||
|
||||
class ImageWindow(Window):
|
||||
"""Create an image window which displays the given image."""
|
||||
|
||||
def __init__(self, image, title="PIL"):
|
||||
if not isinstance(image, Dib):
|
||||
image = Dib(image)
|
||||
self.image = image
|
||||
width, height = image.size
|
||||
super().__init__(title, width=width, height=height)
|
||||
|
||||
def ui_handle_repair(self, dc, x0, y0, x1, y1):
|
||||
self.image.draw(dc, (x0, y0, x1, y1))
|
@ -1,101 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# IM Tools support for PIL
|
||||
#
|
||||
# history:
|
||||
# 1996-05-27 fl Created (read 8-bit images only)
|
||||
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2)
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-2001.
|
||||
# Copyright (c) Fredrik Lundh 1996-2001.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
field = re.compile(rb"([a-z]*) ([^ \r\n]*)")
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for IM Tools images.
|
||||
|
||||
|
||||
class ImtImageFile(ImageFile.ImageFile):
|
||||
format = "IMT"
|
||||
format_description = "IM Tools"
|
||||
|
||||
def _open(self):
|
||||
# Quick rejection: if there's not a LF among the first
|
||||
# 100 bytes, this is (probably) not a text header.
|
||||
|
||||
buffer = self.fp.read(100)
|
||||
if b"\n" not in buffer:
|
||||
msg = "not an IM file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
xsize = ysize = 0
|
||||
|
||||
while True:
|
||||
if buffer:
|
||||
s = buffer[:1]
|
||||
buffer = buffer[1:]
|
||||
else:
|
||||
s = self.fp.read(1)
|
||||
if not s:
|
||||
break
|
||||
|
||||
if s == b"\x0C":
|
||||
# image data begins
|
||||
self.tile = [
|
||||
(
|
||||
"raw",
|
||||
(0, 0) + self.size,
|
||||
self.fp.tell() - len(buffer),
|
||||
(self.mode, 0, 1),
|
||||
)
|
||||
]
|
||||
|
||||
break
|
||||
|
||||
else:
|
||||
# read key/value pair
|
||||
if b"\n" not in buffer:
|
||||
buffer += self.fp.read(100)
|
||||
lines = buffer.split(b"\n")
|
||||
s += lines.pop(0)
|
||||
buffer = b"\n".join(lines)
|
||||
if len(s) == 1 or len(s) > 100:
|
||||
break
|
||||
if s[0] == ord(b"*"):
|
||||
continue # comment
|
||||
|
||||
m = field.match(s)
|
||||
if not m:
|
||||
break
|
||||
k, v = m.group(1, 2)
|
||||
if k == b"width":
|
||||
xsize = int(v)
|
||||
self._size = xsize, ysize
|
||||
elif k == b"height":
|
||||
ysize = int(v)
|
||||
self._size = xsize, ysize
|
||||
elif k == b"pixel" and v == b"n8":
|
||||
self._mode = "L"
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open(ImtImageFile.format, ImtImageFile)
|
||||
|
||||
#
|
||||
# no extension registered (".im" is simply too common)
|
@ -1,235 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# IPTC/NAA file handling
|
||||
#
|
||||
# history:
|
||||
# 1995-10-01 fl Created
|
||||
# 1998-03-09 fl Cleaned up and added to PIL
|
||||
# 2002-06-18 fl Added getiptcinfo helper
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-2002.
|
||||
# Copyright (c) Fredrik Lundh 1995.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from io import BytesIO
|
||||
from typing import Sequence
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i16be as i16
|
||||
from ._binary import i32be as i32
|
||||
from ._deprecate import deprecate
|
||||
|
||||
COMPRESSION = {1: "raw", 5: "jpeg"}
|
||||
|
||||
|
||||
def __getattr__(name: str) -> bytes:
|
||||
if name == "PAD":
|
||||
deprecate("IptcImagePlugin.PAD", 12)
|
||||
return b"\0\0\0\0"
|
||||
msg = f"module '{__name__}' has no attribute '{name}'"
|
||||
raise AttributeError(msg)
|
||||
|
||||
|
||||
#
|
||||
# Helpers
|
||||
|
||||
|
||||
def _i(c: bytes) -> int:
|
||||
return i32((b"\0\0\0\0" + c)[-4:])
|
||||
|
||||
|
||||
def _i8(c: int | bytes) -> int:
|
||||
return c if isinstance(c, int) else c[0]
|
||||
|
||||
|
||||
def i(c: bytes) -> int:
|
||||
""".. deprecated:: 10.2.0"""
|
||||
deprecate("IptcImagePlugin.i", 12)
|
||||
return _i(c)
|
||||
|
||||
|
||||
def dump(c: Sequence[int | bytes]) -> None:
|
||||
""".. deprecated:: 10.2.0"""
|
||||
deprecate("IptcImagePlugin.dump", 12)
|
||||
for i in c:
|
||||
print("%02x" % _i8(i), end=" ")
|
||||
print()
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields
|
||||
# from TIFF and JPEG files, use the <b>getiptcinfo</b> function.
|
||||
|
||||
|
||||
class IptcImageFile(ImageFile.ImageFile):
|
||||
format = "IPTC"
|
||||
format_description = "IPTC/NAA"
|
||||
|
||||
def getint(self, key: tuple[int, int]) -> int:
|
||||
return _i(self.info[key])
|
||||
|
||||
def field(self) -> tuple[tuple[int, int] | None, int]:
|
||||
#
|
||||
# get a IPTC field header
|
||||
s = self.fp.read(5)
|
||||
if not s.strip(b"\x00"):
|
||||
return None, 0
|
||||
|
||||
tag = s[1], s[2]
|
||||
|
||||
# syntax
|
||||
if s[0] != 0x1C or tag[0] not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 240]:
|
||||
msg = "invalid IPTC/NAA file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# field size
|
||||
size = s[3]
|
||||
if size > 132:
|
||||
msg = "illegal field length in IPTC/NAA file"
|
||||
raise OSError(msg)
|
||||
elif size == 128:
|
||||
size = 0
|
||||
elif size > 128:
|
||||
size = _i(self.fp.read(size - 128))
|
||||
else:
|
||||
size = i16(s, 3)
|
||||
|
||||
return tag, size
|
||||
|
||||
def _open(self) -> None:
|
||||
# load descriptive fields
|
||||
while True:
|
||||
offset = self.fp.tell()
|
||||
tag, size = self.field()
|
||||
if not tag or tag == (8, 10):
|
||||
break
|
||||
if size:
|
||||
tagdata = self.fp.read(size)
|
||||
else:
|
||||
tagdata = None
|
||||
if tag in self.info:
|
||||
if isinstance(self.info[tag], list):
|
||||
self.info[tag].append(tagdata)
|
||||
else:
|
||||
self.info[tag] = [self.info[tag], tagdata]
|
||||
else:
|
||||
self.info[tag] = tagdata
|
||||
|
||||
# mode
|
||||
layers = self.info[(3, 60)][0]
|
||||
component = self.info[(3, 60)][1]
|
||||
if (3, 65) in self.info:
|
||||
id = self.info[(3, 65)][0] - 1
|
||||
else:
|
||||
id = 0
|
||||
if layers == 1 and not component:
|
||||
self._mode = "L"
|
||||
elif layers == 3 and component:
|
||||
self._mode = "RGB"[id]
|
||||
elif layers == 4 and component:
|
||||
self._mode = "CMYK"[id]
|
||||
|
||||
# size
|
||||
self._size = self.getint((3, 20)), self.getint((3, 30))
|
||||
|
||||
# compression
|
||||
try:
|
||||
compression = COMPRESSION[self.getint((3, 120))]
|
||||
except KeyError as e:
|
||||
msg = "Unknown IPTC image compression"
|
||||
raise OSError(msg) from e
|
||||
|
||||
# tile
|
||||
if tag == (8, 10):
|
||||
self.tile = [("iptc", (0, 0) + self.size, offset, compression)]
|
||||
|
||||
def load(self):
|
||||
if len(self.tile) != 1 or self.tile[0][0] != "iptc":
|
||||
return ImageFile.ImageFile.load(self)
|
||||
|
||||
offset, compression = self.tile[0][2:]
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# Copy image data to temporary file
|
||||
o = BytesIO()
|
||||
if compression == "raw":
|
||||
# To simplify access to the extracted file,
|
||||
# prepend a PPM header
|
||||
o.write(b"P5\n%d %d\n255\n" % self.size)
|
||||
while True:
|
||||
type, size = self.field()
|
||||
if type != (8, 10):
|
||||
break
|
||||
while size > 0:
|
||||
s = self.fp.read(min(size, 8192))
|
||||
if not s:
|
||||
break
|
||||
o.write(s)
|
||||
size -= len(s)
|
||||
|
||||
with Image.open(o) as _im:
|
||||
_im.load()
|
||||
self.im = _im.im
|
||||
|
||||
|
||||
Image.register_open(IptcImageFile.format, IptcImageFile)
|
||||
|
||||
Image.register_extension(IptcImageFile.format, ".iim")
|
||||
|
||||
|
||||
def getiptcinfo(im):
|
||||
"""
|
||||
Get IPTC information from TIFF, JPEG, or IPTC file.
|
||||
|
||||
:param im: An image containing IPTC data.
|
||||
:returns: A dictionary containing IPTC information, or None if
|
||||
no IPTC information block was found.
|
||||
"""
|
||||
from . import JpegImagePlugin, TiffImagePlugin
|
||||
|
||||
data = None
|
||||
|
||||
if isinstance(im, IptcImageFile):
|
||||
# return info dictionary right away
|
||||
return im.info
|
||||
|
||||
elif isinstance(im, JpegImagePlugin.JpegImageFile):
|
||||
# extract the IPTC/NAA resource
|
||||
photoshop = im.info.get("photoshop")
|
||||
if photoshop:
|
||||
data = photoshop.get(0x0404)
|
||||
|
||||
elif isinstance(im, TiffImagePlugin.TiffImageFile):
|
||||
# get raw data from the IPTC/NAA tag (PhotoShop tags the data
|
||||
# as 4-byte integers, so we cannot use the get method...)
|
||||
try:
|
||||
data = im.tag.tagdata[TiffImagePlugin.IPTC_NAA_CHUNK]
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
|
||||
if data is None:
|
||||
return None # no properties
|
||||
|
||||
# create an IptcImagePlugin object without initializing it
|
||||
class FakeImage:
|
||||
pass
|
||||
|
||||
im = FakeImage()
|
||||
im.__class__ = IptcImageFile
|
||||
|
||||
# parse the IPTC information chunk
|
||||
im.info = {}
|
||||
im.fp = BytesIO(data)
|
||||
|
||||
try:
|
||||
im._open()
|
||||
except (IndexError, KeyError):
|
||||
pass # expected failure
|
||||
|
||||
return im.info
|
@ -1,398 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# JPEG2000 file handling
|
||||
#
|
||||
# History:
|
||||
# 2014-03-12 ajh Created
|
||||
# 2021-06-30 rogermb Extract dpi information from the 'resc' header box
|
||||
#
|
||||
# Copyright (c) 2014 Coriolis Systems Limited
|
||||
# Copyright (c) 2014 Alastair Houghton
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import os
|
||||
import struct
|
||||
|
||||
from . import Image, ImageFile, _binary
|
||||
|
||||
|
||||
class BoxReader:
|
||||
"""
|
||||
A small helper class to read fields stored in JPEG2000 header boxes
|
||||
and to easily step into and read sub-boxes.
|
||||
"""
|
||||
|
||||
def __init__(self, fp, length=-1):
|
||||
self.fp = fp
|
||||
self.has_length = length >= 0
|
||||
self.length = length
|
||||
self.remaining_in_box = -1
|
||||
|
||||
def _can_read(self, num_bytes):
|
||||
if self.has_length and self.fp.tell() + num_bytes > self.length:
|
||||
# Outside box: ensure we don't read past the known file length
|
||||
return False
|
||||
if self.remaining_in_box >= 0:
|
||||
# Inside box contents: ensure read does not go past box boundaries
|
||||
return num_bytes <= self.remaining_in_box
|
||||
else:
|
||||
return True # No length known, just read
|
||||
|
||||
def _read_bytes(self, num_bytes):
|
||||
if not self._can_read(num_bytes):
|
||||
msg = "Not enough data in header"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
data = self.fp.read(num_bytes)
|
||||
if len(data) < num_bytes:
|
||||
msg = f"Expected to read {num_bytes} bytes but only got {len(data)}."
|
||||
raise OSError(msg)
|
||||
|
||||
if self.remaining_in_box > 0:
|
||||
self.remaining_in_box -= num_bytes
|
||||
return data
|
||||
|
||||
def read_fields(self, field_format):
|
||||
size = struct.calcsize(field_format)
|
||||
data = self._read_bytes(size)
|
||||
return struct.unpack(field_format, data)
|
||||
|
||||
def read_boxes(self):
|
||||
size = self.remaining_in_box
|
||||
data = self._read_bytes(size)
|
||||
return BoxReader(io.BytesIO(data), size)
|
||||
|
||||
def has_next_box(self):
|
||||
if self.has_length:
|
||||
return self.fp.tell() + self.remaining_in_box < self.length
|
||||
else:
|
||||
return True
|
||||
|
||||
def next_box_type(self):
|
||||
# Skip the rest of the box if it has not been read
|
||||
if self.remaining_in_box > 0:
|
||||
self.fp.seek(self.remaining_in_box, os.SEEK_CUR)
|
||||
self.remaining_in_box = -1
|
||||
|
||||
# Read the length and type of the next box
|
||||
lbox, tbox = self.read_fields(">I4s")
|
||||
if lbox == 1:
|
||||
lbox = self.read_fields(">Q")[0]
|
||||
hlen = 16
|
||||
else:
|
||||
hlen = 8
|
||||
|
||||
if lbox < hlen or not self._can_read(lbox - hlen):
|
||||
msg = "Invalid header length"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.remaining_in_box = lbox - hlen
|
||||
return tbox
|
||||
|
||||
|
||||
def _parse_codestream(fp):
|
||||
"""Parse the JPEG 2000 codestream to extract the size and component
|
||||
count from the SIZ marker segment, returning a PIL (size, mode) tuple."""
|
||||
|
||||
hdr = fp.read(2)
|
||||
lsiz = _binary.i16be(hdr)
|
||||
siz = hdr + fp.read(lsiz - 2)
|
||||
lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from(
|
||||
">HHIIIIIIIIH", siz
|
||||
)
|
||||
ssiz = [None] * csiz
|
||||
xrsiz = [None] * csiz
|
||||
yrsiz = [None] * csiz
|
||||
for i in range(csiz):
|
||||
ssiz[i], xrsiz[i], yrsiz[i] = struct.unpack_from(">BBB", siz, 36 + 3 * i)
|
||||
|
||||
size = (xsiz - xosiz, ysiz - yosiz)
|
||||
if csiz == 1:
|
||||
if (yrsiz[0] & 0x7F) > 8:
|
||||
mode = "I;16"
|
||||
else:
|
||||
mode = "L"
|
||||
elif csiz == 2:
|
||||
mode = "LA"
|
||||
elif csiz == 3:
|
||||
mode = "RGB"
|
||||
elif csiz == 4:
|
||||
mode = "RGBA"
|
||||
else:
|
||||
mode = None
|
||||
|
||||
return size, mode
|
||||
|
||||
|
||||
def _res_to_dpi(num, denom, exp):
|
||||
"""Convert JPEG2000's (numerator, denominator, exponent-base-10) resolution,
|
||||
calculated as (num / denom) * 10^exp and stored in dots per meter,
|
||||
to floating-point dots per inch."""
|
||||
if denom != 0:
|
||||
return (254 * num * (10**exp)) / (10000 * denom)
|
||||
|
||||
|
||||
def _parse_jp2_header(fp):
|
||||
"""Parse the JP2 header box to extract size, component count,
|
||||
color space information, and optionally DPI information,
|
||||
returning a (size, mode, mimetype, dpi) tuple."""
|
||||
|
||||
# Find the JP2 header box
|
||||
reader = BoxReader(fp)
|
||||
header = None
|
||||
mimetype = None
|
||||
while reader.has_next_box():
|
||||
tbox = reader.next_box_type()
|
||||
|
||||
if tbox == b"jp2h":
|
||||
header = reader.read_boxes()
|
||||
break
|
||||
elif tbox == b"ftyp":
|
||||
if reader.read_fields(">4s")[0] == b"jpx ":
|
||||
mimetype = "image/jpx"
|
||||
|
||||
size = None
|
||||
mode = None
|
||||
bpc = None
|
||||
nc = None
|
||||
dpi = None # 2-tuple of DPI info, or None
|
||||
|
||||
while header.has_next_box():
|
||||
tbox = header.next_box_type()
|
||||
|
||||
if tbox == b"ihdr":
|
||||
height, width, nc, bpc = header.read_fields(">IIHB")
|
||||
size = (width, height)
|
||||
if nc == 1 and (bpc & 0x7F) > 8:
|
||||
mode = "I;16"
|
||||
elif nc == 1:
|
||||
mode = "L"
|
||||
elif nc == 2:
|
||||
mode = "LA"
|
||||
elif nc == 3:
|
||||
mode = "RGB"
|
||||
elif nc == 4:
|
||||
mode = "RGBA"
|
||||
elif tbox == b"res ":
|
||||
res = header.read_boxes()
|
||||
while res.has_next_box():
|
||||
tres = res.next_box_type()
|
||||
if tres == b"resc":
|
||||
vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(">HHHHBB")
|
||||
hres = _res_to_dpi(hrcn, hrcd, hrce)
|
||||
vres = _res_to_dpi(vrcn, vrcd, vrce)
|
||||
if hres is not None and vres is not None:
|
||||
dpi = (hres, vres)
|
||||
break
|
||||
|
||||
if size is None or mode is None:
|
||||
msg = "Malformed JP2 header"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
return size, mode, mimetype, dpi
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for JPEG2000 images.
|
||||
|
||||
|
||||
class Jpeg2KImageFile(ImageFile.ImageFile):
|
||||
format = "JPEG2000"
|
||||
format_description = "JPEG 2000 (ISO 15444)"
|
||||
|
||||
def _open(self):
|
||||
sig = self.fp.read(4)
|
||||
if sig == b"\xff\x4f\xff\x51":
|
||||
self.codec = "j2k"
|
||||
self._size, self._mode = _parse_codestream(self.fp)
|
||||
else:
|
||||
sig = sig + self.fp.read(8)
|
||||
|
||||
if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a":
|
||||
self.codec = "jp2"
|
||||
header = _parse_jp2_header(self.fp)
|
||||
self._size, self._mode, self.custom_mimetype, dpi = header
|
||||
if dpi is not None:
|
||||
self.info["dpi"] = dpi
|
||||
if self.fp.read(12).endswith(b"jp2c\xff\x4f\xff\x51"):
|
||||
self._parse_comment()
|
||||
else:
|
||||
msg = "not a JPEG 2000 file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
if self.size is None or self.mode is None:
|
||||
msg = "unable to determine size/mode"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self._reduce = 0
|
||||
self.layers = 0
|
||||
|
||||
fd = -1
|
||||
length = -1
|
||||
|
||||
try:
|
||||
fd = self.fp.fileno()
|
||||
length = os.fstat(fd).st_size
|
||||
except Exception:
|
||||
fd = -1
|
||||
try:
|
||||
pos = self.fp.tell()
|
||||
self.fp.seek(0, io.SEEK_END)
|
||||
length = self.fp.tell()
|
||||
self.fp.seek(pos)
|
||||
except Exception:
|
||||
length = -1
|
||||
|
||||
self.tile = [
|
||||
(
|
||||
"jpeg2k",
|
||||
(0, 0) + self.size,
|
||||
0,
|
||||
(self.codec, self._reduce, self.layers, fd, length),
|
||||
)
|
||||
]
|
||||
|
||||
def _parse_comment(self):
|
||||
hdr = self.fp.read(2)
|
||||
length = _binary.i16be(hdr)
|
||||
self.fp.seek(length - 2, os.SEEK_CUR)
|
||||
|
||||
while True:
|
||||
marker = self.fp.read(2)
|
||||
if not marker:
|
||||
break
|
||||
typ = marker[1]
|
||||
if typ in (0x90, 0xD9):
|
||||
# Start of tile or end of codestream
|
||||
break
|
||||
hdr = self.fp.read(2)
|
||||
length = _binary.i16be(hdr)
|
||||
if typ == 0x64:
|
||||
# Comment
|
||||
self.info["comment"] = self.fp.read(length - 2)[2:]
|
||||
break
|
||||
else:
|
||||
self.fp.seek(length - 2, os.SEEK_CUR)
|
||||
|
||||
@property
|
||||
def reduce(self):
|
||||
# https://github.com/python-pillow/Pillow/issues/4343 found that the
|
||||
# new Image 'reduce' method was shadowed by this plugin's 'reduce'
|
||||
# property. This attempts to allow for both scenarios
|
||||
return self._reduce or super().reduce
|
||||
|
||||
@reduce.setter
|
||||
def reduce(self, value):
|
||||
self._reduce = value
|
||||
|
||||
def load(self):
|
||||
if self.tile and self._reduce:
|
||||
power = 1 << self._reduce
|
||||
adjust = power >> 1
|
||||
self._size = (
|
||||
int((self.size[0] + adjust) / power),
|
||||
int((self.size[1] + adjust) / power),
|
||||
)
|
||||
|
||||
# Update the reduce and layers settings
|
||||
t = self.tile[0]
|
||||
t3 = (t[3][0], self._reduce, self.layers, t[3][3], t[3][4])
|
||||
self.tile = [(t[0], (0, 0) + self.size, t[2], t3)]
|
||||
|
||||
return ImageFile.ImageFile.load(self)
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return (
|
||||
prefix[:4] == b"\xff\x4f\xff\x51"
|
||||
or prefix[:12] == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a"
|
||||
)
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Save support
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
# Get the keyword arguments
|
||||
info = im.encoderinfo
|
||||
|
||||
if filename.endswith(".j2k") or info.get("no_jp2", False):
|
||||
kind = "j2k"
|
||||
else:
|
||||
kind = "jp2"
|
||||
|
||||
offset = info.get("offset", None)
|
||||
tile_offset = info.get("tile_offset", None)
|
||||
tile_size = info.get("tile_size", None)
|
||||
quality_mode = info.get("quality_mode", "rates")
|
||||
quality_layers = info.get("quality_layers", None)
|
||||
if quality_layers is not None and not (
|
||||
isinstance(quality_layers, (list, tuple))
|
||||
and all(
|
||||
isinstance(quality_layer, (int, float)) for quality_layer in quality_layers
|
||||
)
|
||||
):
|
||||
msg = "quality_layers must be a sequence of numbers"
|
||||
raise ValueError(msg)
|
||||
|
||||
num_resolutions = info.get("num_resolutions", 0)
|
||||
cblk_size = info.get("codeblock_size", None)
|
||||
precinct_size = info.get("precinct_size", None)
|
||||
irreversible = info.get("irreversible", False)
|
||||
progression = info.get("progression", "LRCP")
|
||||
cinema_mode = info.get("cinema_mode", "no")
|
||||
mct = info.get("mct", 0)
|
||||
signed = info.get("signed", False)
|
||||
comment = info.get("comment")
|
||||
if isinstance(comment, str):
|
||||
comment = comment.encode()
|
||||
plt = info.get("plt", False)
|
||||
|
||||
fd = -1
|
||||
if hasattr(fp, "fileno"):
|
||||
try:
|
||||
fd = fp.fileno()
|
||||
except Exception:
|
||||
fd = -1
|
||||
|
||||
im.encoderconfig = (
|
||||
offset,
|
||||
tile_offset,
|
||||
tile_size,
|
||||
quality_mode,
|
||||
quality_layers,
|
||||
num_resolutions,
|
||||
cblk_size,
|
||||
precinct_size,
|
||||
irreversible,
|
||||
progression,
|
||||
cinema_mode,
|
||||
mct,
|
||||
signed,
|
||||
fd,
|
||||
comment,
|
||||
plt,
|
||||
)
|
||||
|
||||
ImageFile._save(im, fp, [("jpeg2k", (0, 0) + im.size, 0, kind)])
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Registry stuff
|
||||
|
||||
|
||||
Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept)
|
||||
Image.register_save(Jpeg2KImageFile.format, _save)
|
||||
|
||||
Image.register_extensions(
|
||||
Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"]
|
||||
)
|
||||
|
||||
Image.register_mime(Jpeg2KImageFile.format, "image/jp2")
|
@ -1,868 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# JPEG (JFIF) file handling
|
||||
#
|
||||
# See "Digital Compression and Coding of Continuous-Tone Still Images,
|
||||
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
|
||||
#
|
||||
# History:
|
||||
# 1995-09-09 fl Created
|
||||
# 1995-09-13 fl Added full parser
|
||||
# 1996-03-25 fl Added hack to use the IJG command line utilities
|
||||
# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
|
||||
# 1996-05-28 fl Added draft support, JFIF version (0.1)
|
||||
# 1996-12-30 fl Added encoder options, added progression property (0.2)
|
||||
# 1997-08-27 fl Save mode 1 images as BW (0.3)
|
||||
# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)
|
||||
# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)
|
||||
# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)
|
||||
# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)
|
||||
# 2003-04-25 fl Added experimental EXIF decoder (0.5)
|
||||
# 2003-06-06 fl Added experimental EXIF GPSinfo decoder
|
||||
# 2003-09-13 fl Extract COM markers
|
||||
# 2009-09-06 fl Added icc_profile support (from Florian Hoech)
|
||||
# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)
|
||||
# 2009-03-08 fl Added subsampling support (from Justin Huff).
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-1996 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import array
|
||||
import io
|
||||
import math
|
||||
import os
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import warnings
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i16be as i16
|
||||
from ._binary import i32be as i32
|
||||
from ._binary import o8
|
||||
from ._binary import o16be as o16
|
||||
from .JpegPresets import presets
|
||||
|
||||
#
|
||||
# Parser
|
||||
|
||||
|
||||
def Skip(self, marker):
|
||||
n = i16(self.fp.read(2)) - 2
|
||||
ImageFile._safe_read(self.fp, n)
|
||||
|
||||
|
||||
def APP(self, marker):
|
||||
#
|
||||
# Application marker. Store these in the APP dictionary.
|
||||
# Also look for well-known application markers.
|
||||
|
||||
n = i16(self.fp.read(2)) - 2
|
||||
s = ImageFile._safe_read(self.fp, n)
|
||||
|
||||
app = "APP%d" % (marker & 15)
|
||||
|
||||
self.app[app] = s # compatibility
|
||||
self.applist.append((app, s))
|
||||
|
||||
if marker == 0xFFE0 and s[:4] == b"JFIF":
|
||||
# extract JFIF information
|
||||
self.info["jfif"] = version = i16(s, 5) # version
|
||||
self.info["jfif_version"] = divmod(version, 256)
|
||||
# extract JFIF properties
|
||||
try:
|
||||
jfif_unit = s[7]
|
||||
jfif_density = i16(s, 8), i16(s, 10)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
if jfif_unit == 1:
|
||||
self.info["dpi"] = jfif_density
|
||||
self.info["jfif_unit"] = jfif_unit
|
||||
self.info["jfif_density"] = jfif_density
|
||||
elif marker == 0xFFE1 and s[:6] == b"Exif\0\0":
|
||||
# extract EXIF information
|
||||
if "exif" in self.info:
|
||||
self.info["exif"] += s[6:]
|
||||
else:
|
||||
self.info["exif"] = s
|
||||
self._exif_offset = self.fp.tell() - n + 6
|
||||
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
|
||||
# extract FlashPix information (incomplete)
|
||||
self.info["flashpix"] = s # FIXME: value will change
|
||||
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
|
||||
# Since an ICC profile can be larger than the maximum size of
|
||||
# a JPEG marker (64K), we need provisions to split it into
|
||||
# multiple markers. The format defined by the ICC specifies
|
||||
# one or more APP2 markers containing the following data:
|
||||
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
|
||||
# Marker sequence number 1, 2, etc (1 byte)
|
||||
# Number of markers Total of APP2's used (1 byte)
|
||||
# Profile data (remainder of APP2 data)
|
||||
# Decoders should use the marker sequence numbers to
|
||||
# reassemble the profile, rather than assuming that the APP2
|
||||
# markers appear in the correct sequence.
|
||||
self.icclist.append(s)
|
||||
elif marker == 0xFFED and s[:14] == b"Photoshop 3.0\x00":
|
||||
# parse the image resource block
|
||||
offset = 14
|
||||
photoshop = self.info.setdefault("photoshop", {})
|
||||
while s[offset : offset + 4] == b"8BIM":
|
||||
try:
|
||||
offset += 4
|
||||
# resource code
|
||||
code = i16(s, offset)
|
||||
offset += 2
|
||||
# resource name (usually empty)
|
||||
name_len = s[offset]
|
||||
# name = s[offset+1:offset+1+name_len]
|
||||
offset += 1 + name_len
|
||||
offset += offset & 1 # align
|
||||
# resource data block
|
||||
size = i32(s, offset)
|
||||
offset += 4
|
||||
data = s[offset : offset + size]
|
||||
if code == 0x03ED: # ResolutionInfo
|
||||
data = {
|
||||
"XResolution": i32(data, 0) / 65536,
|
||||
"DisplayedUnitsX": i16(data, 4),
|
||||
"YResolution": i32(data, 8) / 65536,
|
||||
"DisplayedUnitsY": i16(data, 12),
|
||||
}
|
||||
photoshop[code] = data
|
||||
offset += size
|
||||
offset += offset & 1 # align
|
||||
except struct.error:
|
||||
break # insufficient data
|
||||
|
||||
elif marker == 0xFFEE and s[:5] == b"Adobe":
|
||||
self.info["adobe"] = i16(s, 5)
|
||||
# extract Adobe custom properties
|
||||
try:
|
||||
adobe_transform = s[11]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
self.info["adobe_transform"] = adobe_transform
|
||||
elif marker == 0xFFE2 and s[:4] == b"MPF\0":
|
||||
# extract MPO information
|
||||
self.info["mp"] = s[4:]
|
||||
# offset is current location minus buffer size
|
||||
# plus constant header size
|
||||
self.info["mpoffset"] = self.fp.tell() - n + 4
|
||||
|
||||
# If DPI isn't in JPEG header, fetch from EXIF
|
||||
if "dpi" not in self.info and "exif" in self.info:
|
||||
try:
|
||||
exif = self.getexif()
|
||||
resolution_unit = exif[0x0128]
|
||||
x_resolution = exif[0x011A]
|
||||
try:
|
||||
dpi = float(x_resolution[0]) / x_resolution[1]
|
||||
except TypeError:
|
||||
dpi = x_resolution
|
||||
if math.isnan(dpi):
|
||||
msg = "DPI is not a number"
|
||||
raise ValueError(msg)
|
||||
if resolution_unit == 3: # cm
|
||||
# 1 dpcm = 2.54 dpi
|
||||
dpi *= 2.54
|
||||
self.info["dpi"] = dpi, dpi
|
||||
except (
|
||||
struct.error,
|
||||
KeyError,
|
||||
SyntaxError,
|
||||
TypeError,
|
||||
ValueError,
|
||||
ZeroDivisionError,
|
||||
):
|
||||
# struct.error for truncated EXIF
|
||||
# KeyError for dpi not included
|
||||
# SyntaxError for invalid/unreadable EXIF
|
||||
# ValueError or TypeError for dpi being an invalid float
|
||||
# ZeroDivisionError for invalid dpi rational value
|
||||
self.info["dpi"] = 72, 72
|
||||
|
||||
|
||||
def COM(self, marker):
|
||||
#
|
||||
# Comment marker. Store these in the APP dictionary.
|
||||
n = i16(self.fp.read(2)) - 2
|
||||
s = ImageFile._safe_read(self.fp, n)
|
||||
|
||||
self.info["comment"] = s
|
||||
self.app["COM"] = s # compatibility
|
||||
self.applist.append(("COM", s))
|
||||
|
||||
|
||||
def SOF(self, marker):
|
||||
#
|
||||
# Start of frame marker. Defines the size and mode of the
|
||||
# image. JPEG is colour blind, so we use some simple
|
||||
# heuristics to map the number of layers to an appropriate
|
||||
# mode. Note that this could be made a bit brighter, by
|
||||
# looking for JFIF and Adobe APP markers.
|
||||
|
||||
n = i16(self.fp.read(2)) - 2
|
||||
s = ImageFile._safe_read(self.fp, n)
|
||||
self._size = i16(s, 3), i16(s, 1)
|
||||
|
||||
self.bits = s[0]
|
||||
if self.bits != 8:
|
||||
msg = f"cannot handle {self.bits}-bit layers"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.layers = s[5]
|
||||
if self.layers == 1:
|
||||
self._mode = "L"
|
||||
elif self.layers == 3:
|
||||
self._mode = "RGB"
|
||||
elif self.layers == 4:
|
||||
self._mode = "CMYK"
|
||||
else:
|
||||
msg = f"cannot handle {self.layers}-layer images"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
|
||||
self.info["progressive"] = self.info["progression"] = 1
|
||||
|
||||
if self.icclist:
|
||||
# fixup icc profile
|
||||
self.icclist.sort() # sort by sequence number
|
||||
if self.icclist[0][13] == len(self.icclist):
|
||||
profile = [p[14:] for p in self.icclist]
|
||||
icc_profile = b"".join(profile)
|
||||
else:
|
||||
icc_profile = None # wrong number of fragments
|
||||
self.info["icc_profile"] = icc_profile
|
||||
self.icclist = []
|
||||
|
||||
for i in range(6, len(s), 3):
|
||||
t = s[i : i + 3]
|
||||
# 4-tuples: id, vsamp, hsamp, qtable
|
||||
self.layer.append((t[0], t[1] // 16, t[1] & 15, t[2]))
|
||||
|
||||
|
||||
def DQT(self, marker):
|
||||
#
|
||||
# Define quantization table. Note that there might be more
|
||||
# than one table in each marker.
|
||||
|
||||
# FIXME: The quantization tables can be used to estimate the
|
||||
# compression quality.
|
||||
|
||||
n = i16(self.fp.read(2)) - 2
|
||||
s = ImageFile._safe_read(self.fp, n)
|
||||
while len(s):
|
||||
v = s[0]
|
||||
precision = 1 if (v // 16 == 0) else 2 # in bytes
|
||||
qt_length = 1 + precision * 64
|
||||
if len(s) < qt_length:
|
||||
msg = "bad quantization table marker"
|
||||
raise SyntaxError(msg)
|
||||
data = array.array("B" if precision == 1 else "H", s[1:qt_length])
|
||||
if sys.byteorder == "little" and precision > 1:
|
||||
data.byteswap() # the values are always big-endian
|
||||
self.quantization[v & 15] = [data[i] for i in zigzag_index]
|
||||
s = s[qt_length:]
|
||||
|
||||
|
||||
#
|
||||
# JPEG marker table
|
||||
|
||||
MARKER = {
|
||||
0xFFC0: ("SOF0", "Baseline DCT", SOF),
|
||||
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
|
||||
0xFFC2: ("SOF2", "Progressive DCT", SOF),
|
||||
0xFFC3: ("SOF3", "Spatial lossless", SOF),
|
||||
0xFFC4: ("DHT", "Define Huffman table", Skip),
|
||||
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
|
||||
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
|
||||
0xFFC7: ("SOF7", "Differential spatial", SOF),
|
||||
0xFFC8: ("JPG", "Extension", None),
|
||||
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
|
||||
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
|
||||
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
|
||||
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
|
||||
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
|
||||
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
|
||||
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
|
||||
0xFFD0: ("RST0", "Restart 0", None),
|
||||
0xFFD1: ("RST1", "Restart 1", None),
|
||||
0xFFD2: ("RST2", "Restart 2", None),
|
||||
0xFFD3: ("RST3", "Restart 3", None),
|
||||
0xFFD4: ("RST4", "Restart 4", None),
|
||||
0xFFD5: ("RST5", "Restart 5", None),
|
||||
0xFFD6: ("RST6", "Restart 6", None),
|
||||
0xFFD7: ("RST7", "Restart 7", None),
|
||||
0xFFD8: ("SOI", "Start of image", None),
|
||||
0xFFD9: ("EOI", "End of image", None),
|
||||
0xFFDA: ("SOS", "Start of scan", Skip),
|
||||
0xFFDB: ("DQT", "Define quantization table", DQT),
|
||||
0xFFDC: ("DNL", "Define number of lines", Skip),
|
||||
0xFFDD: ("DRI", "Define restart interval", Skip),
|
||||
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
|
||||
0xFFDF: ("EXP", "Expand reference component", Skip),
|
||||
0xFFE0: ("APP0", "Application segment 0", APP),
|
||||
0xFFE1: ("APP1", "Application segment 1", APP),
|
||||
0xFFE2: ("APP2", "Application segment 2", APP),
|
||||
0xFFE3: ("APP3", "Application segment 3", APP),
|
||||
0xFFE4: ("APP4", "Application segment 4", APP),
|
||||
0xFFE5: ("APP5", "Application segment 5", APP),
|
||||
0xFFE6: ("APP6", "Application segment 6", APP),
|
||||
0xFFE7: ("APP7", "Application segment 7", APP),
|
||||
0xFFE8: ("APP8", "Application segment 8", APP),
|
||||
0xFFE9: ("APP9", "Application segment 9", APP),
|
||||
0xFFEA: ("APP10", "Application segment 10", APP),
|
||||
0xFFEB: ("APP11", "Application segment 11", APP),
|
||||
0xFFEC: ("APP12", "Application segment 12", APP),
|
||||
0xFFED: ("APP13", "Application segment 13", APP),
|
||||
0xFFEE: ("APP14", "Application segment 14", APP),
|
||||
0xFFEF: ("APP15", "Application segment 15", APP),
|
||||
0xFFF0: ("JPG0", "Extension 0", None),
|
||||
0xFFF1: ("JPG1", "Extension 1", None),
|
||||
0xFFF2: ("JPG2", "Extension 2", None),
|
||||
0xFFF3: ("JPG3", "Extension 3", None),
|
||||
0xFFF4: ("JPG4", "Extension 4", None),
|
||||
0xFFF5: ("JPG5", "Extension 5", None),
|
||||
0xFFF6: ("JPG6", "Extension 6", None),
|
||||
0xFFF7: ("JPG7", "Extension 7", None),
|
||||
0xFFF8: ("JPG8", "Extension 8", None),
|
||||
0xFFF9: ("JPG9", "Extension 9", None),
|
||||
0xFFFA: ("JPG10", "Extension 10", None),
|
||||
0xFFFB: ("JPG11", "Extension 11", None),
|
||||
0xFFFC: ("JPG12", "Extension 12", None),
|
||||
0xFFFD: ("JPG13", "Extension 13", None),
|
||||
0xFFFE: ("COM", "Comment", COM),
|
||||
}
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
# Magic number was taken from https://en.wikipedia.org/wiki/JPEG
|
||||
return prefix[:3] == b"\xFF\xD8\xFF"
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for JPEG and JFIF images.
|
||||
|
||||
|
||||
class JpegImageFile(ImageFile.ImageFile):
|
||||
format = "JPEG"
|
||||
format_description = "JPEG (ISO 10918)"
|
||||
|
||||
def _open(self):
|
||||
s = self.fp.read(3)
|
||||
|
||||
if not _accept(s):
|
||||
msg = "not a JPEG file"
|
||||
raise SyntaxError(msg)
|
||||
s = b"\xFF"
|
||||
|
||||
# Create attributes
|
||||
self.bits = self.layers = 0
|
||||
|
||||
# JPEG specifics (internal)
|
||||
self.layer = []
|
||||
self.huffman_dc = {}
|
||||
self.huffman_ac = {}
|
||||
self.quantization = {}
|
||||
self.app = {} # compatibility
|
||||
self.applist = []
|
||||
self.icclist = []
|
||||
|
||||
while True:
|
||||
i = s[0]
|
||||
if i == 0xFF:
|
||||
s = s + self.fp.read(1)
|
||||
i = i16(s)
|
||||
else:
|
||||
# Skip non-0xFF junk
|
||||
s = self.fp.read(1)
|
||||
continue
|
||||
|
||||
if i in MARKER:
|
||||
name, description, handler = MARKER[i]
|
||||
if handler is not None:
|
||||
handler(self, i)
|
||||
if i == 0xFFDA: # start of scan
|
||||
rawmode = self.mode
|
||||
if self.mode == "CMYK":
|
||||
rawmode = "CMYK;I" # assume adobe conventions
|
||||
self.tile = [("jpeg", (0, 0) + self.size, 0, (rawmode, ""))]
|
||||
# self.__offset = self.fp.tell()
|
||||
break
|
||||
s = self.fp.read(1)
|
||||
elif i in {0, 0xFFFF}:
|
||||
# padded marker or junk; move on
|
||||
s = b"\xff"
|
||||
elif i == 0xFF00: # Skip extraneous data (escaped 0xFF)
|
||||
s = self.fp.read(1)
|
||||
else:
|
||||
msg = "no marker found"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
def load_read(self, read_bytes):
|
||||
"""
|
||||
internal: read more image data
|
||||
For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker
|
||||
so libjpeg can finish decoding
|
||||
"""
|
||||
s = self.fp.read(read_bytes)
|
||||
|
||||
if not s and ImageFile.LOAD_TRUNCATED_IMAGES and not hasattr(self, "_ended"):
|
||||
# Premature EOF.
|
||||
# Pretend file is finished adding EOI marker
|
||||
self._ended = True
|
||||
return b"\xFF\xD9"
|
||||
|
||||
return s
|
||||
|
||||
def draft(self, mode, size):
|
||||
if len(self.tile) != 1:
|
||||
return
|
||||
|
||||
# Protect from second call
|
||||
if self.decoderconfig:
|
||||
return
|
||||
|
||||
d, e, o, a = self.tile[0]
|
||||
scale = 1
|
||||
original_size = self.size
|
||||
|
||||
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
|
||||
self._mode = mode
|
||||
a = mode, ""
|
||||
|
||||
if size:
|
||||
scale = min(self.size[0] // size[0], self.size[1] // size[1])
|
||||
for s in [8, 4, 2, 1]:
|
||||
if scale >= s:
|
||||
break
|
||||
e = (
|
||||
e[0],
|
||||
e[1],
|
||||
(e[2] - e[0] + s - 1) // s + e[0],
|
||||
(e[3] - e[1] + s - 1) // s + e[1],
|
||||
)
|
||||
self._size = ((self.size[0] + s - 1) // s, (self.size[1] + s - 1) // s)
|
||||
scale = s
|
||||
|
||||
self.tile = [(d, e, o, a)]
|
||||
self.decoderconfig = (scale, 0)
|
||||
|
||||
box = (0, 0, original_size[0] / scale, original_size[1] / scale)
|
||||
return self.mode, box
|
||||
|
||||
def load_djpeg(self):
|
||||
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
|
||||
|
||||
f, path = tempfile.mkstemp()
|
||||
os.close(f)
|
||||
if os.path.exists(self.filename):
|
||||
subprocess.check_call(["djpeg", "-outfile", path, self.filename])
|
||||
else:
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
msg = "Invalid Filename"
|
||||
raise ValueError(msg)
|
||||
|
||||
try:
|
||||
with Image.open(path) as _im:
|
||||
_im.load()
|
||||
self.im = _im.im
|
||||
finally:
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
self._mode = self.im.mode
|
||||
self._size = self.im.size
|
||||
|
||||
self.tile = []
|
||||
|
||||
def _getexif(self):
|
||||
return _getexif(self)
|
||||
|
||||
def _getmp(self):
|
||||
return _getmp(self)
|
||||
|
||||
def getxmp(self):
|
||||
"""
|
||||
Returns a dictionary containing the XMP tags.
|
||||
Requires defusedxml to be installed.
|
||||
|
||||
:returns: XMP tags in a dictionary.
|
||||
"""
|
||||
|
||||
for segment, content in self.applist:
|
||||
if segment == "APP1":
|
||||
marker, xmp_tags = content.split(b"\x00")[:2]
|
||||
if marker == b"http://ns.adobe.com/xap/1.0/":
|
||||
return self._getxmp(xmp_tags)
|
||||
return {}
|
||||
|
||||
|
||||
def _getexif(self):
|
||||
if "exif" not in self.info:
|
||||
return None
|
||||
return self.getexif()._get_merged_dict()
|
||||
|
||||
|
||||
def _getmp(self):
|
||||
# Extract MP information. This method was inspired by the "highly
|
||||
# experimental" _getexif version that's been in use for years now,
|
||||
# itself based on the ImageFileDirectory class in the TIFF plugin.
|
||||
|
||||
# The MP record essentially consists of a TIFF file embedded in a JPEG
|
||||
# application marker.
|
||||
try:
|
||||
data = self.info["mp"]
|
||||
except KeyError:
|
||||
return None
|
||||
file_contents = io.BytesIO(data)
|
||||
head = file_contents.read(8)
|
||||
endianness = ">" if head[:4] == b"\x4d\x4d\x00\x2a" else "<"
|
||||
# process dictionary
|
||||
from . import TiffImagePlugin
|
||||
|
||||
try:
|
||||
info = TiffImagePlugin.ImageFileDirectory_v2(head)
|
||||
file_contents.seek(info.next)
|
||||
info.load(file_contents)
|
||||
mp = dict(info)
|
||||
except Exception as e:
|
||||
msg = "malformed MP Index (unreadable directory)"
|
||||
raise SyntaxError(msg) from e
|
||||
# it's an error not to have a number of images
|
||||
try:
|
||||
quant = mp[0xB001]
|
||||
except KeyError as e:
|
||||
msg = "malformed MP Index (no number of images)"
|
||||
raise SyntaxError(msg) from e
|
||||
# get MP entries
|
||||
mpentries = []
|
||||
try:
|
||||
rawmpentries = mp[0xB002]
|
||||
for entrynum in range(0, quant):
|
||||
unpackedentry = struct.unpack_from(
|
||||
f"{endianness}LLLHH", rawmpentries, entrynum * 16
|
||||
)
|
||||
labels = ("Attribute", "Size", "DataOffset", "EntryNo1", "EntryNo2")
|
||||
mpentry = dict(zip(labels, unpackedentry))
|
||||
mpentryattr = {
|
||||
"DependentParentImageFlag": bool(mpentry["Attribute"] & (1 << 31)),
|
||||
"DependentChildImageFlag": bool(mpentry["Attribute"] & (1 << 30)),
|
||||
"RepresentativeImageFlag": bool(mpentry["Attribute"] & (1 << 29)),
|
||||
"Reserved": (mpentry["Attribute"] & (3 << 27)) >> 27,
|
||||
"ImageDataFormat": (mpentry["Attribute"] & (7 << 24)) >> 24,
|
||||
"MPType": mpentry["Attribute"] & 0x00FFFFFF,
|
||||
}
|
||||
if mpentryattr["ImageDataFormat"] == 0:
|
||||
mpentryattr["ImageDataFormat"] = "JPEG"
|
||||
else:
|
||||
msg = "unsupported picture format in MPO"
|
||||
raise SyntaxError(msg)
|
||||
mptypemap = {
|
||||
0x000000: "Undefined",
|
||||
0x010001: "Large Thumbnail (VGA Equivalent)",
|
||||
0x010002: "Large Thumbnail (Full HD Equivalent)",
|
||||
0x020001: "Multi-Frame Image (Panorama)",
|
||||
0x020002: "Multi-Frame Image: (Disparity)",
|
||||
0x020003: "Multi-Frame Image: (Multi-Angle)",
|
||||
0x030000: "Baseline MP Primary Image",
|
||||
}
|
||||
mpentryattr["MPType"] = mptypemap.get(mpentryattr["MPType"], "Unknown")
|
||||
mpentry["Attribute"] = mpentryattr
|
||||
mpentries.append(mpentry)
|
||||
mp[0xB002] = mpentries
|
||||
except KeyError as e:
|
||||
msg = "malformed MP Index (bad MP Entry)"
|
||||
raise SyntaxError(msg) from e
|
||||
# Next we should try and parse the individual image unique ID list;
|
||||
# we don't because I've never seen this actually used in a real MPO
|
||||
# file and so can't test it.
|
||||
return mp
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# stuff to save JPEG files
|
||||
|
||||
RAWMODE = {
|
||||
"1": "L",
|
||||
"L": "L",
|
||||
"RGB": "RGB",
|
||||
"RGBX": "RGB",
|
||||
"CMYK": "CMYK;I", # assume adobe conventions
|
||||
"YCbCr": "YCbCr",
|
||||
}
|
||||
|
||||
# fmt: off
|
||||
zigzag_index = (
|
||||
0, 1, 5, 6, 14, 15, 27, 28,
|
||||
2, 4, 7, 13, 16, 26, 29, 42,
|
||||
3, 8, 12, 17, 25, 30, 41, 43,
|
||||
9, 11, 18, 24, 31, 40, 44, 53,
|
||||
10, 19, 23, 32, 39, 45, 52, 54,
|
||||
20, 22, 33, 38, 46, 51, 55, 60,
|
||||
21, 34, 37, 47, 50, 56, 59, 61,
|
||||
35, 36, 48, 49, 57, 58, 62, 63,
|
||||
)
|
||||
|
||||
samplings = {
|
||||
(1, 1, 1, 1, 1, 1): 0,
|
||||
(2, 1, 1, 1, 1, 1): 1,
|
||||
(2, 2, 1, 1, 1, 1): 2,
|
||||
}
|
||||
# fmt: on
|
||||
|
||||
|
||||
def get_sampling(im):
|
||||
# There's no subsampling when images have only 1 layer
|
||||
# (grayscale images) or when they are CMYK (4 layers),
|
||||
# so set subsampling to the default value.
|
||||
#
|
||||
# NOTE: currently Pillow can't encode JPEG to YCCK format.
|
||||
# If YCCK support is added in the future, subsampling code will have
|
||||
# to be updated (here and in JpegEncode.c) to deal with 4 layers.
|
||||
if not hasattr(im, "layers") or im.layers in (1, 4):
|
||||
return -1
|
||||
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
|
||||
return samplings.get(sampling, -1)
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if im.width == 0 or im.height == 0:
|
||||
msg = "cannot write empty image as JPEG"
|
||||
raise ValueError(msg)
|
||||
|
||||
try:
|
||||
rawmode = RAWMODE[im.mode]
|
||||
except KeyError as e:
|
||||
msg = f"cannot write mode {im.mode} as JPEG"
|
||||
raise OSError(msg) from e
|
||||
|
||||
info = im.encoderinfo
|
||||
|
||||
dpi = [round(x) for x in info.get("dpi", (0, 0))]
|
||||
|
||||
quality = info.get("quality", -1)
|
||||
subsampling = info.get("subsampling", -1)
|
||||
qtables = info.get("qtables")
|
||||
|
||||
if quality == "keep":
|
||||
quality = -1
|
||||
subsampling = "keep"
|
||||
qtables = "keep"
|
||||
elif quality in presets:
|
||||
preset = presets[quality]
|
||||
quality = -1
|
||||
subsampling = preset.get("subsampling", -1)
|
||||
qtables = preset.get("quantization")
|
||||
elif not isinstance(quality, int):
|
||||
msg = "Invalid quality setting"
|
||||
raise ValueError(msg)
|
||||
else:
|
||||
if subsampling in presets:
|
||||
subsampling = presets[subsampling].get("subsampling", -1)
|
||||
if isinstance(qtables, str) and qtables in presets:
|
||||
qtables = presets[qtables].get("quantization")
|
||||
|
||||
if subsampling == "4:4:4":
|
||||
subsampling = 0
|
||||
elif subsampling == "4:2:2":
|
||||
subsampling = 1
|
||||
elif subsampling == "4:2:0":
|
||||
subsampling = 2
|
||||
elif subsampling == "4:1:1":
|
||||
# For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0.
|
||||
# Set 4:2:0 if someone is still using that value.
|
||||
subsampling = 2
|
||||
elif subsampling == "keep":
|
||||
if im.format != "JPEG":
|
||||
msg = "Cannot use 'keep' when original image is not a JPEG"
|
||||
raise ValueError(msg)
|
||||
subsampling = get_sampling(im)
|
||||
|
||||
def validate_qtables(qtables):
|
||||
if qtables is None:
|
||||
return qtables
|
||||
if isinstance(qtables, str):
|
||||
try:
|
||||
lines = [
|
||||
int(num)
|
||||
for line in qtables.splitlines()
|
||||
for num in line.split("#", 1)[0].split()
|
||||
]
|
||||
except ValueError as e:
|
||||
msg = "Invalid quantization table"
|
||||
raise ValueError(msg) from e
|
||||
else:
|
||||
qtables = [lines[s : s + 64] for s in range(0, len(lines), 64)]
|
||||
if isinstance(qtables, (tuple, list, dict)):
|
||||
if isinstance(qtables, dict):
|
||||
qtables = [
|
||||
qtables[key] for key in range(len(qtables)) if key in qtables
|
||||
]
|
||||
elif isinstance(qtables, tuple):
|
||||
qtables = list(qtables)
|
||||
if not (0 < len(qtables) < 5):
|
||||
msg = "None or too many quantization tables"
|
||||
raise ValueError(msg)
|
||||
for idx, table in enumerate(qtables):
|
||||
try:
|
||||
if len(table) != 64:
|
||||
msg = "Invalid quantization table"
|
||||
raise TypeError(msg)
|
||||
table = array.array("H", table)
|
||||
except TypeError as e:
|
||||
msg = "Invalid quantization table"
|
||||
raise ValueError(msg) from e
|
||||
else:
|
||||
qtables[idx] = list(table)
|
||||
return qtables
|
||||
|
||||
if qtables == "keep":
|
||||
if im.format != "JPEG":
|
||||
msg = "Cannot use 'keep' when original image is not a JPEG"
|
||||
raise ValueError(msg)
|
||||
qtables = getattr(im, "quantization", None)
|
||||
qtables = validate_qtables(qtables)
|
||||
|
||||
extra = info.get("extra", b"")
|
||||
|
||||
MAX_BYTES_IN_MARKER = 65533
|
||||
icc_profile = info.get("icc_profile")
|
||||
if icc_profile:
|
||||
ICC_OVERHEAD_LEN = 14
|
||||
MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN
|
||||
markers = []
|
||||
while icc_profile:
|
||||
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
|
||||
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
|
||||
i = 1
|
||||
for marker in markers:
|
||||
size = o16(2 + ICC_OVERHEAD_LEN + len(marker))
|
||||
extra += (
|
||||
b"\xFF\xE2"
|
||||
+ size
|
||||
+ b"ICC_PROFILE\0"
|
||||
+ o8(i)
|
||||
+ o8(len(markers))
|
||||
+ marker
|
||||
)
|
||||
i += 1
|
||||
|
||||
comment = info.get("comment", im.info.get("comment"))
|
||||
|
||||
# "progressive" is the official name, but older documentation
|
||||
# says "progression"
|
||||
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
|
||||
progressive = info.get("progressive", False) or info.get("progression", False)
|
||||
|
||||
optimize = info.get("optimize", False)
|
||||
|
||||
exif = info.get("exif", b"")
|
||||
if isinstance(exif, Image.Exif):
|
||||
exif = exif.tobytes()
|
||||
if len(exif) > MAX_BYTES_IN_MARKER:
|
||||
msg = "EXIF data is too long"
|
||||
raise ValueError(msg)
|
||||
|
||||
# get keyword arguments
|
||||
im.encoderconfig = (
|
||||
quality,
|
||||
progressive,
|
||||
info.get("smooth", 0),
|
||||
optimize,
|
||||
info.get("keep_rgb", False),
|
||||
info.get("streamtype", 0),
|
||||
dpi[0],
|
||||
dpi[1],
|
||||
subsampling,
|
||||
info.get("restart_marker_blocks", 0),
|
||||
info.get("restart_marker_rows", 0),
|
||||
qtables,
|
||||
comment,
|
||||
extra,
|
||||
exif,
|
||||
)
|
||||
|
||||
# if we optimize, libjpeg needs a buffer big enough to hold the whole image
|
||||
# in a shot. Guessing on the size, at im.size bytes. (raw pixel size is
|
||||
# channels*size, this is a value that's been used in a django patch.
|
||||
# https://github.com/matthewwithanm/django-imagekit/issues/50
|
||||
bufsize = 0
|
||||
if optimize or progressive:
|
||||
# CMYK can be bigger
|
||||
if im.mode == "CMYK":
|
||||
bufsize = 4 * im.size[0] * im.size[1]
|
||||
# keep sets quality to -1, but the actual value may be high.
|
||||
elif quality >= 95 or quality == -1:
|
||||
bufsize = 2 * im.size[0] * im.size[1]
|
||||
else:
|
||||
bufsize = im.size[0] * im.size[1]
|
||||
if exif:
|
||||
bufsize += len(exif) + 5
|
||||
if extra:
|
||||
bufsize += len(extra) + 1
|
||||
else:
|
||||
# The EXIF info needs to be written as one block, + APP1, + one spare byte.
|
||||
# Ensure that our buffer is big enough. Same with the icc_profile block.
|
||||
bufsize = max(bufsize, len(exif) + 5, len(extra) + 1)
|
||||
|
||||
ImageFile._save(im, fp, [("jpeg", (0, 0) + im.size, 0, rawmode)], bufsize)
|
||||
|
||||
|
||||
def _save_cjpeg(im, fp, filename):
|
||||
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
|
||||
tempfile = im._dump()
|
||||
subprocess.check_call(["cjpeg", "-outfile", filename, tempfile])
|
||||
try:
|
||||
os.unlink(tempfile)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
##
|
||||
# Factory for making JPEG and MPO instances
|
||||
def jpeg_factory(fp=None, filename=None):
|
||||
im = JpegImageFile(fp, filename)
|
||||
try:
|
||||
mpheader = im._getmp()
|
||||
if mpheader[45057] > 1:
|
||||
# It's actually an MPO
|
||||
from .MpoImagePlugin import MpoImageFile
|
||||
|
||||
# Don't reload everything, just convert it.
|
||||
im = MpoImageFile.adopt(im, mpheader)
|
||||
except (TypeError, IndexError):
|
||||
# It is really a JPEG
|
||||
pass
|
||||
except SyntaxError:
|
||||
warnings.warn(
|
||||
"Image appears to be a malformed MPO file, it will be "
|
||||
"interpreted as a base JPEG file"
|
||||
)
|
||||
return im
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Registry stuff
|
||||
|
||||
Image.register_open(JpegImageFile.format, jpeg_factory, _accept)
|
||||
Image.register_save(JpegImageFile.format, _save)
|
||||
|
||||
Image.register_extensions(JpegImageFile.format, [".jfif", ".jpe", ".jpg", ".jpeg"])
|
||||
|
||||
Image.register_mime(JpegImageFile.format, "image/jpeg")
|
@ -1,76 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Basic McIdas support for PIL
|
||||
#
|
||||
# History:
|
||||
# 1997-05-05 fl Created (8-bit images only)
|
||||
# 2009-03-08 fl Added 16/32-bit support.
|
||||
#
|
||||
# Thanks to Richard Jones and Craig Swank for specs and samples.
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import struct
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
|
||||
def _accept(s):
|
||||
return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04"
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for McIdas area images.
|
||||
|
||||
|
||||
class McIdasImageFile(ImageFile.ImageFile):
|
||||
format = "MCIDAS"
|
||||
format_description = "McIdas area file"
|
||||
|
||||
def _open(self):
|
||||
# parse area file directory
|
||||
s = self.fp.read(256)
|
||||
if not _accept(s) or len(s) != 256:
|
||||
msg = "not an McIdas area file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.area_descriptor_raw = s
|
||||
self.area_descriptor = w = [0] + list(struct.unpack("!64i", s))
|
||||
|
||||
# get mode
|
||||
if w[11] == 1:
|
||||
mode = rawmode = "L"
|
||||
elif w[11] == 2:
|
||||
# FIXME: add memory map support
|
||||
mode = "I"
|
||||
rawmode = "I;16B"
|
||||
elif w[11] == 4:
|
||||
# FIXME: add memory map support
|
||||
mode = "I"
|
||||
rawmode = "I;32B"
|
||||
else:
|
||||
msg = "unsupported McIdas format"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self._mode = mode
|
||||
self._size = w[10], w[9]
|
||||
|
||||
offset = w[34] + w[15]
|
||||
stride = w[15] + w[10] * w[11] * w[14]
|
||||
|
||||
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))]
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# registry
|
||||
|
||||
Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept)
|
||||
|
||||
# no default extension
|
@ -1,107 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Microsoft Image Composer support for PIL
|
||||
#
|
||||
# Notes:
|
||||
# uses TiffImagePlugin.py to read the actual image streams
|
||||
#
|
||||
# History:
|
||||
# 97-01-20 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import olefile
|
||||
|
||||
from . import Image, TiffImagePlugin
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:8] == olefile.MAGIC
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for Microsoft's Image Composer file format.
|
||||
|
||||
|
||||
class MicImageFile(TiffImagePlugin.TiffImageFile):
|
||||
format = "MIC"
|
||||
format_description = "Microsoft Image Composer"
|
||||
_close_exclusive_fp_after_loading = False
|
||||
|
||||
def _open(self):
|
||||
# read the OLE directory and see if this is a likely
|
||||
# to be a Microsoft Image Composer file
|
||||
|
||||
try:
|
||||
self.ole = olefile.OleFileIO(self.fp)
|
||||
except OSError as e:
|
||||
msg = "not an MIC file; invalid OLE file"
|
||||
raise SyntaxError(msg) from e
|
||||
|
||||
# find ACI subfiles with Image members (maybe not the
|
||||
# best way to identify MIC files, but what the... ;-)
|
||||
|
||||
self.images = [
|
||||
path
|
||||
for path in self.ole.listdir()
|
||||
if path[1:] and path[0][-4:] == ".ACI" and path[1] == "Image"
|
||||
]
|
||||
|
||||
# if we didn't find any images, this is probably not
|
||||
# an MIC file.
|
||||
if not self.images:
|
||||
msg = "not an MIC file; no image entries"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.frame = None
|
||||
self._n_frames = len(self.images)
|
||||
self.is_animated = self._n_frames > 1
|
||||
|
||||
self.__fp = self.fp
|
||||
self.seek(0)
|
||||
|
||||
def seek(self, frame):
|
||||
if not self._seek_check(frame):
|
||||
return
|
||||
try:
|
||||
filename = self.images[frame]
|
||||
except IndexError as e:
|
||||
msg = "no such frame"
|
||||
raise EOFError(msg) from e
|
||||
|
||||
self.fp = self.ole.openstream(filename)
|
||||
|
||||
TiffImagePlugin.TiffImageFile._open(self)
|
||||
|
||||
self.frame = frame
|
||||
|
||||
def tell(self):
|
||||
return self.frame
|
||||
|
||||
def close(self):
|
||||
self.__fp.close()
|
||||
self.ole.close()
|
||||
super().close()
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.__fp.close()
|
||||
self.ole.close()
|
||||
super().__exit__()
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open(MicImageFile.format, MicImageFile, _accept)
|
||||
|
||||
Image.register_extension(MicImageFile.format, ".mic")
|
@ -1,82 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# MPEG file handling
|
||||
#
|
||||
# History:
|
||||
# 95-09-09 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1995.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i8
|
||||
|
||||
#
|
||||
# Bitstream parser
|
||||
|
||||
|
||||
class BitStream:
|
||||
def __init__(self, fp):
|
||||
self.fp = fp
|
||||
self.bits = 0
|
||||
self.bitbuffer = 0
|
||||
|
||||
def next(self):
|
||||
return i8(self.fp.read(1))
|
||||
|
||||
def peek(self, bits):
|
||||
while self.bits < bits:
|
||||
c = self.next()
|
||||
if c < 0:
|
||||
self.bits = 0
|
||||
continue
|
||||
self.bitbuffer = (self.bitbuffer << 8) + c
|
||||
self.bits += 8
|
||||
return self.bitbuffer >> (self.bits - bits) & (1 << bits) - 1
|
||||
|
||||
def skip(self, bits):
|
||||
while self.bits < bits:
|
||||
self.bitbuffer = (self.bitbuffer << 8) + i8(self.fp.read(1))
|
||||
self.bits += 8
|
||||
self.bits = self.bits - bits
|
||||
|
||||
def read(self, bits):
|
||||
v = self.peek(bits)
|
||||
self.bits = self.bits - bits
|
||||
return v
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for MPEG streams. This plugin can identify a stream,
|
||||
# but it cannot read it.
|
||||
|
||||
|
||||
class MpegImageFile(ImageFile.ImageFile):
|
||||
format = "MPEG"
|
||||
format_description = "MPEG"
|
||||
|
||||
def _open(self):
|
||||
s = BitStream(self.fp)
|
||||
|
||||
if s.read(32) != 0x1B3:
|
||||
msg = "not an MPEG file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self._mode = "RGB"
|
||||
self._size = s.read(12), s.read(12)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry stuff
|
||||
|
||||
Image.register_open(MpegImageFile.format, MpegImageFile)
|
||||
|
||||
Image.register_extensions(MpegImageFile.format, [".mpg", ".mpeg"])
|
||||
|
||||
Image.register_mime(MpegImageFile.format, "video/mpeg")
|
@ -1,195 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# MPO file handling
|
||||
#
|
||||
# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the
|
||||
# Camera & Imaging Products Association)
|
||||
#
|
||||
# The multi-picture object combines multiple JPEG images (with a modified EXIF
|
||||
# data format) into a single file. While it can theoretically be used much like
|
||||
# a GIF animation, it is commonly used to represent 3D photographs and is (as
|
||||
# of this writing) the most commonly used format by 3D cameras.
|
||||
#
|
||||
# History:
|
||||
# 2014-03-13 Feneric Created
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import os
|
||||
import struct
|
||||
|
||||
from . import (
|
||||
ExifTags,
|
||||
Image,
|
||||
ImageFile,
|
||||
ImageSequence,
|
||||
JpegImagePlugin,
|
||||
TiffImagePlugin,
|
||||
)
|
||||
from ._binary import i16be as i16
|
||||
from ._binary import o32le
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
JpegImagePlugin._save(im, fp, filename)
|
||||
|
||||
|
||||
def _save_all(im, fp, filename):
|
||||
append_images = im.encoderinfo.get("append_images", [])
|
||||
if not append_images:
|
||||
try:
|
||||
animated = im.is_animated
|
||||
except AttributeError:
|
||||
animated = False
|
||||
if not animated:
|
||||
_save(im, fp, filename)
|
||||
return
|
||||
|
||||
mpf_offset = 28
|
||||
offsets = []
|
||||
for imSequence in itertools.chain([im], append_images):
|
||||
for im_frame in ImageSequence.Iterator(imSequence):
|
||||
if not offsets:
|
||||
# APP2 marker
|
||||
im_frame.encoderinfo["extra"] = (
|
||||
b"\xFF\xE2" + struct.pack(">H", 6 + 82) + b"MPF\0" + b" " * 82
|
||||
)
|
||||
exif = im_frame.encoderinfo.get("exif")
|
||||
if isinstance(exif, Image.Exif):
|
||||
exif = exif.tobytes()
|
||||
im_frame.encoderinfo["exif"] = exif
|
||||
if exif:
|
||||
mpf_offset += 4 + len(exif)
|
||||
|
||||
JpegImagePlugin._save(im_frame, fp, filename)
|
||||
offsets.append(fp.tell())
|
||||
else:
|
||||
im_frame.save(fp, "JPEG")
|
||||
offsets.append(fp.tell() - offsets[-1])
|
||||
|
||||
ifd = TiffImagePlugin.ImageFileDirectory_v2()
|
||||
ifd[0xB000] = b"0100"
|
||||
ifd[0xB001] = len(offsets)
|
||||
|
||||
mpentries = b""
|
||||
data_offset = 0
|
||||
for i, size in enumerate(offsets):
|
||||
if i == 0:
|
||||
mptype = 0x030000 # Baseline MP Primary Image
|
||||
else:
|
||||
mptype = 0x000000 # Undefined
|
||||
mpentries += struct.pack("<LLLHH", mptype, size, data_offset, 0, 0)
|
||||
if i == 0:
|
||||
data_offset -= mpf_offset
|
||||
data_offset += size
|
||||
ifd[0xB002] = mpentries
|
||||
|
||||
fp.seek(mpf_offset)
|
||||
fp.write(b"II\x2A\x00" + o32le(8) + ifd.tobytes(8))
|
||||
fp.seek(0, os.SEEK_END)
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for MPO images.
|
||||
|
||||
|
||||
class MpoImageFile(JpegImagePlugin.JpegImageFile):
|
||||
format = "MPO"
|
||||
format_description = "MPO (CIPA DC-007)"
|
||||
_close_exclusive_fp_after_loading = False
|
||||
|
||||
def _open(self):
|
||||
self.fp.seek(0) # prep the fp in order to pass the JPEG test
|
||||
JpegImagePlugin.JpegImageFile._open(self)
|
||||
self._after_jpeg_open()
|
||||
|
||||
def _after_jpeg_open(self, mpheader=None):
|
||||
self._initial_size = self.size
|
||||
self.mpinfo = mpheader if mpheader is not None else self._getmp()
|
||||
self.n_frames = self.mpinfo[0xB001]
|
||||
self.__mpoffsets = [
|
||||
mpent["DataOffset"] + self.info["mpoffset"] for mpent in self.mpinfo[0xB002]
|
||||
]
|
||||
self.__mpoffsets[0] = 0
|
||||
# Note that the following assertion will only be invalid if something
|
||||
# gets broken within JpegImagePlugin.
|
||||
assert self.n_frames == len(self.__mpoffsets)
|
||||
del self.info["mpoffset"] # no longer needed
|
||||
self.is_animated = self.n_frames > 1
|
||||
self._fp = self.fp # FIXME: hack
|
||||
self._fp.seek(self.__mpoffsets[0]) # get ready to read first frame
|
||||
self.__frame = 0
|
||||
self.offset = 0
|
||||
# for now we can only handle reading and individual frame extraction
|
||||
self.readonly = 1
|
||||
|
||||
def load_seek(self, pos):
|
||||
self._fp.seek(pos)
|
||||
|
||||
def seek(self, frame):
|
||||
if not self._seek_check(frame):
|
||||
return
|
||||
self.fp = self._fp
|
||||
self.offset = self.__mpoffsets[frame]
|
||||
|
||||
self.fp.seek(self.offset + 2) # skip SOI marker
|
||||
segment = self.fp.read(2)
|
||||
if not segment:
|
||||
msg = "No data found for frame"
|
||||
raise ValueError(msg)
|
||||
self._size = self._initial_size
|
||||
if i16(segment) == 0xFFE1: # APP1
|
||||
n = i16(self.fp.read(2)) - 2
|
||||
self.info["exif"] = ImageFile._safe_read(self.fp, n)
|
||||
self._reload_exif()
|
||||
|
||||
mptype = self.mpinfo[0xB002][frame]["Attribute"]["MPType"]
|
||||
if mptype.startswith("Large Thumbnail"):
|
||||
exif = self.getexif().get_ifd(ExifTags.IFD.Exif)
|
||||
if 40962 in exif and 40963 in exif:
|
||||
self._size = (exif[40962], exif[40963])
|
||||
elif "exif" in self.info:
|
||||
del self.info["exif"]
|
||||
self._reload_exif()
|
||||
|
||||
self.tile = [("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))]
|
||||
self.__frame = frame
|
||||
|
||||
def tell(self):
|
||||
return self.__frame
|
||||
|
||||
@staticmethod
|
||||
def adopt(jpeg_instance, mpheader=None):
|
||||
"""
|
||||
Transform the instance of JpegImageFile into
|
||||
an instance of MpoImageFile.
|
||||
After the call, the JpegImageFile is extended
|
||||
to be an MpoImageFile.
|
||||
|
||||
This is essentially useful when opening a JPEG
|
||||
file that reveals itself as an MPO, to avoid
|
||||
double call to _open.
|
||||
"""
|
||||
jpeg_instance.__class__ = MpoImageFile
|
||||
jpeg_instance._after_jpeg_open(mpheader)
|
||||
return jpeg_instance
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Registry stuff
|
||||
|
||||
# Note that since MPO shares a factory with JPEG, we do not need to do a
|
||||
# separate registration for it here.
|
||||
# Image.register_open(MpoImageFile.format,
|
||||
# JpegImagePlugin.jpeg_factory, _accept)
|
||||
Image.register_save(MpoImageFile.format, _save)
|
||||
Image.register_save_all(MpoImageFile.format, _save_all)
|
||||
|
||||
Image.register_extension(MpoImageFile.format, ".mpo")
|
||||
|
||||
Image.register_mime(MpoImageFile.format, "image/mpo")
|
@ -1,195 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
#
|
||||
# MSP file handling
|
||||
#
|
||||
# This is the format used by the Paint program in Windows 1 and 2.
|
||||
#
|
||||
# History:
|
||||
# 95-09-05 fl Created
|
||||
# 97-01-03 fl Read/write MSP images
|
||||
# 17-02-21 es Fixed RLE interpretation
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1995-97.
|
||||
# Copyright (c) Eric Soroos 2017.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
# More info on this format: https://archive.org/details/gg243631
|
||||
# Page 313:
|
||||
# Figure 205. Windows Paint Version 1: "DanM" Format
|
||||
# Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03
|
||||
#
|
||||
# See also: https://www.fileformat.info/format/mspaint/egff.htm
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import struct
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i16le as i16
|
||||
from ._binary import o16le as o16
|
||||
|
||||
#
|
||||
# read MSP files
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] in [b"DanM", b"LinS"]
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for Windows MSP images. This plugin supports both
|
||||
# uncompressed (Windows 1.0).
|
||||
|
||||
|
||||
class MspImageFile(ImageFile.ImageFile):
|
||||
format = "MSP"
|
||||
format_description = "Windows Paint"
|
||||
|
||||
def _open(self):
|
||||
# Header
|
||||
s = self.fp.read(32)
|
||||
if not _accept(s):
|
||||
msg = "not an MSP file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# Header checksum
|
||||
checksum = 0
|
||||
for i in range(0, 32, 2):
|
||||
checksum = checksum ^ i16(s, i)
|
||||
if checksum != 0:
|
||||
msg = "bad MSP checksum"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self._mode = "1"
|
||||
self._size = i16(s, 4), i16(s, 6)
|
||||
|
||||
if s[:4] == b"DanM":
|
||||
self.tile = [("raw", (0, 0) + self.size, 32, ("1", 0, 1))]
|
||||
else:
|
||||
self.tile = [("MSP", (0, 0) + self.size, 32, None)]
|
||||
|
||||
|
||||
class MspDecoder(ImageFile.PyDecoder):
|
||||
# The algo for the MSP decoder is from
|
||||
# https://www.fileformat.info/format/mspaint/egff.htm
|
||||
# cc-by-attribution -- That page references is taken from the
|
||||
# Encyclopedia of Graphics File Formats and is licensed by
|
||||
# O'Reilly under the Creative Common/Attribution license
|
||||
#
|
||||
# For RLE encoded files, the 32byte header is followed by a scan
|
||||
# line map, encoded as one 16bit word of encoded byte length per
|
||||
# line.
|
||||
#
|
||||
# NOTE: the encoded length of the line can be 0. This was not
|
||||
# handled in the previous version of this encoder, and there's no
|
||||
# mention of how to handle it in the documentation. From the few
|
||||
# examples I've seen, I've assumed that it is a fill of the
|
||||
# background color, in this case, white.
|
||||
#
|
||||
#
|
||||
# Pseudocode of the decoder:
|
||||
# Read a BYTE value as the RunType
|
||||
# If the RunType value is zero
|
||||
# Read next byte as the RunCount
|
||||
# Read the next byte as the RunValue
|
||||
# Write the RunValue byte RunCount times
|
||||
# If the RunType value is non-zero
|
||||
# Use this value as the RunCount
|
||||
# Read and write the next RunCount bytes literally
|
||||
#
|
||||
# e.g.:
|
||||
# 0x00 03 ff 05 00 01 02 03 04
|
||||
# would yield the bytes:
|
||||
# 0xff ff ff 00 01 02 03 04
|
||||
#
|
||||
# which are then interpreted as a bit packed mode '1' image
|
||||
|
||||
_pulls_fd = True
|
||||
|
||||
def decode(self, buffer):
|
||||
img = io.BytesIO()
|
||||
blank_line = bytearray((0xFF,) * ((self.state.xsize + 7) // 8))
|
||||
try:
|
||||
self.fd.seek(32)
|
||||
rowmap = struct.unpack_from(
|
||||
f"<{self.state.ysize}H", self.fd.read(self.state.ysize * 2)
|
||||
)
|
||||
except struct.error as e:
|
||||
msg = "Truncated MSP file in row map"
|
||||
raise OSError(msg) from e
|
||||
|
||||
for x, rowlen in enumerate(rowmap):
|
||||
try:
|
||||
if rowlen == 0:
|
||||
img.write(blank_line)
|
||||
continue
|
||||
row = self.fd.read(rowlen)
|
||||
if len(row) != rowlen:
|
||||
msg = f"Truncated MSP file, expected {rowlen} bytes on row {x}"
|
||||
raise OSError(msg)
|
||||
idx = 0
|
||||
while idx < rowlen:
|
||||
runtype = row[idx]
|
||||
idx += 1
|
||||
if runtype == 0:
|
||||
(runcount, runval) = struct.unpack_from("Bc", row, idx)
|
||||
img.write(runval * runcount)
|
||||
idx += 2
|
||||
else:
|
||||
runcount = runtype
|
||||
img.write(row[idx : idx + runcount])
|
||||
idx += runcount
|
||||
|
||||
except struct.error as e:
|
||||
msg = f"Corrupted MSP file in row {x}"
|
||||
raise OSError(msg) from e
|
||||
|
||||
self.set_as_raw(img.getvalue(), ("1", 0, 1))
|
||||
|
||||
return -1, 0
|
||||
|
||||
|
||||
Image.register_decoder("MSP", MspDecoder)
|
||||
|
||||
|
||||
#
|
||||
# write MSP files (uncompressed only)
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if im.mode != "1":
|
||||
msg = f"cannot write mode {im.mode} as MSP"
|
||||
raise OSError(msg)
|
||||
|
||||
# create MSP header
|
||||
header = [0] * 16
|
||||
|
||||
header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1
|
||||
header[2], header[3] = im.size
|
||||
header[4], header[5] = 1, 1
|
||||
header[6], header[7] = 1, 1
|
||||
header[8], header[9] = im.size
|
||||
|
||||
checksum = 0
|
||||
for h in header:
|
||||
checksum = checksum ^ h
|
||||
header[12] = checksum # FIXME: is this the right field?
|
||||
|
||||
# header
|
||||
for h in header:
|
||||
fp.write(o16(h))
|
||||
|
||||
# image body
|
||||
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 32, ("1", 0, 1))])
|
||||
|
||||
|
||||
#
|
||||
# registry
|
||||
|
||||
Image.register_open(MspImageFile.format, MspImageFile, _accept)
|
||||
Image.register_save(MspImageFile.format, _save)
|
||||
|
||||
Image.register_extension(MspImageFile.format, ".msp")
|
@ -1,230 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# Simple PostScript graphics interface
|
||||
#
|
||||
# History:
|
||||
# 1996-04-20 fl Created
|
||||
# 1999-01-10 fl Added gsave/grestore to image method
|
||||
# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge)
|
||||
#
|
||||
# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved.
|
||||
# Copyright (c) 1996 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
from . import EpsImagePlugin
|
||||
|
||||
##
|
||||
# Simple PostScript graphics interface.
|
||||
|
||||
|
||||
class PSDraw:
|
||||
"""
|
||||
Sets up printing to the given file. If ``fp`` is omitted,
|
||||
``sys.stdout.buffer`` or ``sys.stdout`` is assumed.
|
||||
"""
|
||||
|
||||
def __init__(self, fp=None):
|
||||
if not fp:
|
||||
try:
|
||||
fp = sys.stdout.buffer
|
||||
except AttributeError:
|
||||
fp = sys.stdout
|
||||
self.fp = fp
|
||||
|
||||
def begin_document(self, id=None):
|
||||
"""Set up printing of a document. (Write PostScript DSC header.)"""
|
||||
# FIXME: incomplete
|
||||
self.fp.write(
|
||||
b"%!PS-Adobe-3.0\n"
|
||||
b"save\n"
|
||||
b"/showpage { } def\n"
|
||||
b"%%EndComments\n"
|
||||
b"%%BeginDocument\n"
|
||||
)
|
||||
# self.fp.write(ERROR_PS) # debugging!
|
||||
self.fp.write(EDROFF_PS)
|
||||
self.fp.write(VDI_PS)
|
||||
self.fp.write(b"%%EndProlog\n")
|
||||
self.isofont = {}
|
||||
|
||||
def end_document(self):
|
||||
"""Ends printing. (Write PostScript DSC footer.)"""
|
||||
self.fp.write(b"%%EndDocument\nrestore showpage\n%%End\n")
|
||||
if hasattr(self.fp, "flush"):
|
||||
self.fp.flush()
|
||||
|
||||
def setfont(self, font, size):
|
||||
"""
|
||||
Selects which font to use.
|
||||
|
||||
:param font: A PostScript font name
|
||||
:param size: Size in points.
|
||||
"""
|
||||
font = bytes(font, "UTF-8")
|
||||
if font not in self.isofont:
|
||||
# reencode font
|
||||
self.fp.write(b"/PSDraw-%s ISOLatin1Encoding /%s E\n" % (font, font))
|
||||
self.isofont[font] = 1
|
||||
# rough
|
||||
self.fp.write(b"/F0 %d /PSDraw-%s F\n" % (size, font))
|
||||
|
||||
def line(self, xy0, xy1):
|
||||
"""
|
||||
Draws a line between the two points. Coordinates are given in
|
||||
PostScript point coordinates (72 points per inch, (0, 0) is the lower
|
||||
left corner of the page).
|
||||
"""
|
||||
self.fp.write(b"%d %d %d %d Vl\n" % (*xy0, *xy1))
|
||||
|
||||
def rectangle(self, box):
|
||||
"""
|
||||
Draws a rectangle.
|
||||
|
||||
:param box: A tuple of four integers, specifying left, bottom, width and
|
||||
height.
|
||||
"""
|
||||
self.fp.write(b"%d %d M 0 %d %d Vr\n" % box)
|
||||
|
||||
def text(self, xy, text):
|
||||
"""
|
||||
Draws text at the given position. You must use
|
||||
:py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method.
|
||||
"""
|
||||
text = bytes(text, "UTF-8")
|
||||
text = b"\\(".join(text.split(b"("))
|
||||
text = b"\\)".join(text.split(b")"))
|
||||
xy += (text,)
|
||||
self.fp.write(b"%d %d M (%s) S\n" % xy)
|
||||
|
||||
def image(self, box, im, dpi=None):
|
||||
"""Draw a PIL image, centered in the given box."""
|
||||
# default resolution depends on mode
|
||||
if not dpi:
|
||||
if im.mode == "1":
|
||||
dpi = 200 # fax
|
||||
else:
|
||||
dpi = 100 # grayscale
|
||||
# image size (on paper)
|
||||
x = im.size[0] * 72 / dpi
|
||||
y = im.size[1] * 72 / dpi
|
||||
# max allowed size
|
||||
xmax = float(box[2] - box[0])
|
||||
ymax = float(box[3] - box[1])
|
||||
if x > xmax:
|
||||
y = y * xmax / x
|
||||
x = xmax
|
||||
if y > ymax:
|
||||
x = x * ymax / y
|
||||
y = ymax
|
||||
dx = (xmax - x) / 2 + box[0]
|
||||
dy = (ymax - y) / 2 + box[1]
|
||||
self.fp.write(b"gsave\n%f %f translate\n" % (dx, dy))
|
||||
if (x, y) != im.size:
|
||||
# EpsImagePlugin._save prints the image at (0,0,xsize,ysize)
|
||||
sx = x / im.size[0]
|
||||
sy = y / im.size[1]
|
||||
self.fp.write(b"%f %f scale\n" % (sx, sy))
|
||||
EpsImagePlugin._save(im, self.fp, None, 0)
|
||||
self.fp.write(b"\ngrestore\n")
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# PostScript driver
|
||||
|
||||
#
|
||||
# EDROFF.PS -- PostScript driver for Edroff 2
|
||||
#
|
||||
# History:
|
||||
# 94-01-25 fl: created (edroff 2.04)
|
||||
#
|
||||
# Copyright (c) Fredrik Lundh 1994.
|
||||
#
|
||||
|
||||
|
||||
EDROFF_PS = b"""\
|
||||
/S { show } bind def
|
||||
/P { moveto show } bind def
|
||||
/M { moveto } bind def
|
||||
/X { 0 rmoveto } bind def
|
||||
/Y { 0 exch rmoveto } bind def
|
||||
/E { findfont
|
||||
dup maxlength dict begin
|
||||
{
|
||||
1 index /FID ne { def } { pop pop } ifelse
|
||||
} forall
|
||||
/Encoding exch def
|
||||
dup /FontName exch def
|
||||
currentdict end definefont pop
|
||||
} bind def
|
||||
/F { findfont exch scalefont dup setfont
|
||||
[ exch /setfont cvx ] cvx bind def
|
||||
} bind def
|
||||
"""
|
||||
|
||||
#
|
||||
# VDI.PS -- PostScript driver for VDI meta commands
|
||||
#
|
||||
# History:
|
||||
# 94-01-25 fl: created (edroff 2.04)
|
||||
#
|
||||
# Copyright (c) Fredrik Lundh 1994.
|
||||
#
|
||||
|
||||
VDI_PS = b"""\
|
||||
/Vm { moveto } bind def
|
||||
/Va { newpath arcn stroke } bind def
|
||||
/Vl { moveto lineto stroke } bind def
|
||||
/Vc { newpath 0 360 arc closepath } bind def
|
||||
/Vr { exch dup 0 rlineto
|
||||
exch dup 0 exch rlineto
|
||||
exch neg 0 rlineto
|
||||
0 exch neg rlineto
|
||||
setgray fill } bind def
|
||||
/Tm matrix def
|
||||
/Ve { Tm currentmatrix pop
|
||||
translate scale newpath 0 0 .5 0 360 arc closepath
|
||||
Tm setmatrix
|
||||
} bind def
|
||||
/Vf { currentgray exch setgray fill setgray } bind def
|
||||
"""
|
||||
|
||||
#
|
||||
# ERROR.PS -- Error handler
|
||||
#
|
||||
# History:
|
||||
# 89-11-21 fl: created (pslist 1.10)
|
||||
#
|
||||
|
||||
ERROR_PS = b"""\
|
||||
/landscape false def
|
||||
/errorBUF 200 string def
|
||||
/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def
|
||||
errordict begin /handleerror {
|
||||
initmatrix /Courier findfont 10 scalefont setfont
|
||||
newpath 72 720 moveto $error begin /newerror false def
|
||||
(PostScript Error) show errorNL errorNL
|
||||
(Error: ) show
|
||||
/errorname load errorBUF cvs show errorNL errorNL
|
||||
(Command: ) show
|
||||
/command load dup type /stringtype ne { errorBUF cvs } if show
|
||||
errorNL errorNL
|
||||
(VMstatus: ) show
|
||||
vmstatus errorBUF cvs show ( bytes available, ) show
|
||||
errorBUF cvs show ( bytes used at level ) show
|
||||
errorBUF cvs show errorNL errorNL
|
||||
(Operand stargck: ) show errorNL /ostargck load {
|
||||
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
|
||||
} forall errorNL
|
||||
(Execution stargck: ) show errorNL /estargck load {
|
||||
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
|
||||
} forall
|
||||
end showpage
|
||||
} def end
|
||||
"""
|
@ -1,52 +0,0 @@
|
||||
#
|
||||
# Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# stuff to read simple, teragon-style palette files
|
||||
#
|
||||
# History:
|
||||
# 97-08-23 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from ._binary import o8
|
||||
|
||||
|
||||
class PaletteFile:
|
||||
"""File handler for Teragon-style palette files."""
|
||||
|
||||
rawmode = "RGB"
|
||||
|
||||
def __init__(self, fp):
|
||||
self.palette = [(i, i, i) for i in range(256)]
|
||||
|
||||
while True:
|
||||
s = fp.readline()
|
||||
|
||||
if not s:
|
||||
break
|
||||
if s[:1] == b"#":
|
||||
continue
|
||||
if len(s) > 100:
|
||||
msg = "bad palette file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
v = [int(x) for x in s.split()]
|
||||
try:
|
||||
[i, r, g, b] = v
|
||||
except ValueError:
|
||||
[i, r] = v
|
||||
g = b = r
|
||||
|
||||
if 0 <= i <= 255:
|
||||
self.palette[i] = o8(r) + o8(g) + o8(b)
|
||||
|
||||
self.palette = b"".join(self.palette)
|
||||
|
||||
def getpalette(self):
|
||||
return self.palette, self.rawmode
|
@ -1,226 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
|
||||
##
|
||||
# Image plugin for Palm pixmap images (output only).
|
||||
##
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import o8
|
||||
from ._binary import o16be as o16b
|
||||
|
||||
# fmt: off
|
||||
_Palm8BitColormapValues = (
|
||||
(255, 255, 255), (255, 204, 255), (255, 153, 255), (255, 102, 255),
|
||||
(255, 51, 255), (255, 0, 255), (255, 255, 204), (255, 204, 204),
|
||||
(255, 153, 204), (255, 102, 204), (255, 51, 204), (255, 0, 204),
|
||||
(255, 255, 153), (255, 204, 153), (255, 153, 153), (255, 102, 153),
|
||||
(255, 51, 153), (255, 0, 153), (204, 255, 255), (204, 204, 255),
|
||||
(204, 153, 255), (204, 102, 255), (204, 51, 255), (204, 0, 255),
|
||||
(204, 255, 204), (204, 204, 204), (204, 153, 204), (204, 102, 204),
|
||||
(204, 51, 204), (204, 0, 204), (204, 255, 153), (204, 204, 153),
|
||||
(204, 153, 153), (204, 102, 153), (204, 51, 153), (204, 0, 153),
|
||||
(153, 255, 255), (153, 204, 255), (153, 153, 255), (153, 102, 255),
|
||||
(153, 51, 255), (153, 0, 255), (153, 255, 204), (153, 204, 204),
|
||||
(153, 153, 204), (153, 102, 204), (153, 51, 204), (153, 0, 204),
|
||||
(153, 255, 153), (153, 204, 153), (153, 153, 153), (153, 102, 153),
|
||||
(153, 51, 153), (153, 0, 153), (102, 255, 255), (102, 204, 255),
|
||||
(102, 153, 255), (102, 102, 255), (102, 51, 255), (102, 0, 255),
|
||||
(102, 255, 204), (102, 204, 204), (102, 153, 204), (102, 102, 204),
|
||||
(102, 51, 204), (102, 0, 204), (102, 255, 153), (102, 204, 153),
|
||||
(102, 153, 153), (102, 102, 153), (102, 51, 153), (102, 0, 153),
|
||||
(51, 255, 255), (51, 204, 255), (51, 153, 255), (51, 102, 255),
|
||||
(51, 51, 255), (51, 0, 255), (51, 255, 204), (51, 204, 204),
|
||||
(51, 153, 204), (51, 102, 204), (51, 51, 204), (51, 0, 204),
|
||||
(51, 255, 153), (51, 204, 153), (51, 153, 153), (51, 102, 153),
|
||||
(51, 51, 153), (51, 0, 153), (0, 255, 255), (0, 204, 255),
|
||||
(0, 153, 255), (0, 102, 255), (0, 51, 255), (0, 0, 255),
|
||||
(0, 255, 204), (0, 204, 204), (0, 153, 204), (0, 102, 204),
|
||||
(0, 51, 204), (0, 0, 204), (0, 255, 153), (0, 204, 153),
|
||||
(0, 153, 153), (0, 102, 153), (0, 51, 153), (0, 0, 153),
|
||||
(255, 255, 102), (255, 204, 102), (255, 153, 102), (255, 102, 102),
|
||||
(255, 51, 102), (255, 0, 102), (255, 255, 51), (255, 204, 51),
|
||||
(255, 153, 51), (255, 102, 51), (255, 51, 51), (255, 0, 51),
|
||||
(255, 255, 0), (255, 204, 0), (255, 153, 0), (255, 102, 0),
|
||||
(255, 51, 0), (255, 0, 0), (204, 255, 102), (204, 204, 102),
|
||||
(204, 153, 102), (204, 102, 102), (204, 51, 102), (204, 0, 102),
|
||||
(204, 255, 51), (204, 204, 51), (204, 153, 51), (204, 102, 51),
|
||||
(204, 51, 51), (204, 0, 51), (204, 255, 0), (204, 204, 0),
|
||||
(204, 153, 0), (204, 102, 0), (204, 51, 0), (204, 0, 0),
|
||||
(153, 255, 102), (153, 204, 102), (153, 153, 102), (153, 102, 102),
|
||||
(153, 51, 102), (153, 0, 102), (153, 255, 51), (153, 204, 51),
|
||||
(153, 153, 51), (153, 102, 51), (153, 51, 51), (153, 0, 51),
|
||||
(153, 255, 0), (153, 204, 0), (153, 153, 0), (153, 102, 0),
|
||||
(153, 51, 0), (153, 0, 0), (102, 255, 102), (102, 204, 102),
|
||||
(102, 153, 102), (102, 102, 102), (102, 51, 102), (102, 0, 102),
|
||||
(102, 255, 51), (102, 204, 51), (102, 153, 51), (102, 102, 51),
|
||||
(102, 51, 51), (102, 0, 51), (102, 255, 0), (102, 204, 0),
|
||||
(102, 153, 0), (102, 102, 0), (102, 51, 0), (102, 0, 0),
|
||||
(51, 255, 102), (51, 204, 102), (51, 153, 102), (51, 102, 102),
|
||||
(51, 51, 102), (51, 0, 102), (51, 255, 51), (51, 204, 51),
|
||||
(51, 153, 51), (51, 102, 51), (51, 51, 51), (51, 0, 51),
|
||||
(51, 255, 0), (51, 204, 0), (51, 153, 0), (51, 102, 0),
|
||||
(51, 51, 0), (51, 0, 0), (0, 255, 102), (0, 204, 102),
|
||||
(0, 153, 102), (0, 102, 102), (0, 51, 102), (0, 0, 102),
|
||||
(0, 255, 51), (0, 204, 51), (0, 153, 51), (0, 102, 51),
|
||||
(0, 51, 51), (0, 0, 51), (0, 255, 0), (0, 204, 0),
|
||||
(0, 153, 0), (0, 102, 0), (0, 51, 0), (17, 17, 17),
|
||||
(34, 34, 34), (68, 68, 68), (85, 85, 85), (119, 119, 119),
|
||||
(136, 136, 136), (170, 170, 170), (187, 187, 187), (221, 221, 221),
|
||||
(238, 238, 238), (192, 192, 192), (128, 0, 0), (128, 0, 128),
|
||||
(0, 128, 0), (0, 128, 128), (0, 0, 0), (0, 0, 0),
|
||||
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
|
||||
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
|
||||
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
|
||||
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
|
||||
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
|
||||
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0))
|
||||
# fmt: on
|
||||
|
||||
|
||||
# so build a prototype image to be used for palette resampling
|
||||
def build_prototype_image():
|
||||
image = Image.new("L", (1, len(_Palm8BitColormapValues)))
|
||||
image.putdata(list(range(len(_Palm8BitColormapValues))))
|
||||
palettedata = ()
|
||||
for colormapValue in _Palm8BitColormapValues:
|
||||
palettedata += colormapValue
|
||||
palettedata += (0, 0, 0) * (256 - len(_Palm8BitColormapValues))
|
||||
image.putpalette(palettedata)
|
||||
return image
|
||||
|
||||
|
||||
Palm8BitColormapImage = build_prototype_image()
|
||||
|
||||
# OK, we now have in Palm8BitColormapImage,
|
||||
# a "P"-mode image with the right palette
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
_FLAGS = {"custom-colormap": 0x4000, "is-compressed": 0x8000, "has-transparent": 0x2000}
|
||||
|
||||
_COMPRESSION_TYPES = {"none": 0xFF, "rle": 0x01, "scanline": 0x00}
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
##
|
||||
# (Internal) Image save plugin for the Palm format.
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if im.mode == "P":
|
||||
# we assume this is a color Palm image with the standard colormap,
|
||||
# unless the "info" dict has a "custom-colormap" field
|
||||
|
||||
rawmode = "P"
|
||||
bpp = 8
|
||||
version = 1
|
||||
|
||||
elif im.mode == "L":
|
||||
if im.encoderinfo.get("bpp") in (1, 2, 4):
|
||||
# this is 8-bit grayscale, so we shift it to get the high-order bits,
|
||||
# and invert it because
|
||||
# Palm does grayscale from white (0) to black (1)
|
||||
bpp = im.encoderinfo["bpp"]
|
||||
im = im.point(
|
||||
lambda x, shift=8 - bpp, maxval=(1 << bpp) - 1: maxval - (x >> shift)
|
||||
)
|
||||
elif im.info.get("bpp") in (1, 2, 4):
|
||||
# here we assume that even though the inherent mode is 8-bit grayscale,
|
||||
# only the lower bpp bits are significant.
|
||||
# We invert them to match the Palm.
|
||||
bpp = im.info["bpp"]
|
||||
im = im.point(lambda x, maxval=(1 << bpp) - 1: maxval - (x & maxval))
|
||||
else:
|
||||
msg = f"cannot write mode {im.mode} as Palm"
|
||||
raise OSError(msg)
|
||||
|
||||
# we ignore the palette here
|
||||
im.mode = "P"
|
||||
rawmode = "P;" + str(bpp)
|
||||
version = 1
|
||||
|
||||
elif im.mode == "1":
|
||||
# monochrome -- write it inverted, as is the Palm standard
|
||||
rawmode = "1;I"
|
||||
bpp = 1
|
||||
version = 0
|
||||
|
||||
else:
|
||||
msg = f"cannot write mode {im.mode} as Palm"
|
||||
raise OSError(msg)
|
||||
|
||||
#
|
||||
# make sure image data is available
|
||||
im.load()
|
||||
|
||||
# write header
|
||||
|
||||
cols = im.size[0]
|
||||
rows = im.size[1]
|
||||
|
||||
rowbytes = int((cols + (16 // bpp - 1)) / (16 // bpp)) * 2
|
||||
transparent_index = 0
|
||||
compression_type = _COMPRESSION_TYPES["none"]
|
||||
|
||||
flags = 0
|
||||
if im.mode == "P" and "custom-colormap" in im.info:
|
||||
flags = flags & _FLAGS["custom-colormap"]
|
||||
colormapsize = 4 * 256 + 2
|
||||
colormapmode = im.palette.mode
|
||||
colormap = im.getdata().getpalette()
|
||||
else:
|
||||
colormapsize = 0
|
||||
|
||||
if "offset" in im.info:
|
||||
offset = (rowbytes * rows + 16 + 3 + colormapsize) // 4
|
||||
else:
|
||||
offset = 0
|
||||
|
||||
fp.write(o16b(cols) + o16b(rows) + o16b(rowbytes) + o16b(flags))
|
||||
fp.write(o8(bpp))
|
||||
fp.write(o8(version))
|
||||
fp.write(o16b(offset))
|
||||
fp.write(o8(transparent_index))
|
||||
fp.write(o8(compression_type))
|
||||
fp.write(o16b(0)) # reserved by Palm
|
||||
|
||||
# now write colormap if necessary
|
||||
|
||||
if colormapsize > 0:
|
||||
fp.write(o16b(256))
|
||||
for i in range(256):
|
||||
fp.write(o8(i))
|
||||
if colormapmode == "RGB":
|
||||
fp.write(
|
||||
o8(colormap[3 * i])
|
||||
+ o8(colormap[3 * i + 1])
|
||||
+ o8(colormap[3 * i + 2])
|
||||
)
|
||||
elif colormapmode == "RGBA":
|
||||
fp.write(
|
||||
o8(colormap[4 * i])
|
||||
+ o8(colormap[4 * i + 1])
|
||||
+ o8(colormap[4 * i + 2])
|
||||
)
|
||||
|
||||
# now convert data to raw form
|
||||
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, rowbytes, 1))])
|
||||
|
||||
if hasattr(fp, "flush"):
|
||||
fp.flush()
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_save("Palm", _save)
|
||||
|
||||
Image.register_extension("Palm", ".palm")
|
||||
|
||||
Image.register_mime("Palm", "image/palm")
|
@ -1,62 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# PCD file handling
|
||||
#
|
||||
# History:
|
||||
# 96-05-10 fl Created
|
||||
# 96-05-27 fl Added draft mode (128x192, 256x384)
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
##
|
||||
# Image plugin for PhotoCD images. This plugin only reads the 768x512
|
||||
# image from the file; higher resolutions are encoded in a proprietary
|
||||
# encoding.
|
||||
|
||||
|
||||
class PcdImageFile(ImageFile.ImageFile):
|
||||
format = "PCD"
|
||||
format_description = "Kodak PhotoCD"
|
||||
|
||||
def _open(self):
|
||||
# rough
|
||||
self.fp.seek(2048)
|
||||
s = self.fp.read(2048)
|
||||
|
||||
if s[:4] != b"PCD_":
|
||||
msg = "not a PCD file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
orientation = s[1538] & 3
|
||||
self.tile_post_rotate = None
|
||||
if orientation == 1:
|
||||
self.tile_post_rotate = 90
|
||||
elif orientation == 3:
|
||||
self.tile_post_rotate = -90
|
||||
|
||||
self._mode = "RGB"
|
||||
self._size = 768, 512 # FIXME: not correct for rotated images!
|
||||
self.tile = [("pcd", (0, 0) + self.size, 96 * 2048, None)]
|
||||
|
||||
def load_end(self):
|
||||
if self.tile_post_rotate:
|
||||
# Handle rotated PCDs
|
||||
self.im = self.im.rotate(self.tile_post_rotate)
|
||||
self._size = self.im.size
|
||||
|
||||
|
||||
#
|
||||
# registry
|
||||
|
||||
Image.register_open(PcdImageFile.format, PcdImageFile)
|
||||
|
||||
Image.register_extension(PcdImageFile.format, ".pcd")
|
@ -1,254 +0,0 @@
|
||||
#
|
||||
# THIS IS WORK IN PROGRESS
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# portable compiled font file parser
|
||||
#
|
||||
# history:
|
||||
# 1997-08-19 fl created
|
||||
# 2003-09-13 fl fixed loading of unicode fonts
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1997-2003 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
from typing import BinaryIO, Callable
|
||||
|
||||
from . import FontFile, Image
|
||||
from ._binary import i8
|
||||
from ._binary import i16be as b16
|
||||
from ._binary import i16le as l16
|
||||
from ._binary import i32be as b32
|
||||
from ._binary import i32le as l32
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# declarations
|
||||
|
||||
PCF_MAGIC = 0x70636601 # "\x01fcp"
|
||||
|
||||
PCF_PROPERTIES = 1 << 0
|
||||
PCF_ACCELERATORS = 1 << 1
|
||||
PCF_METRICS = 1 << 2
|
||||
PCF_BITMAPS = 1 << 3
|
||||
PCF_INK_METRICS = 1 << 4
|
||||
PCF_BDF_ENCODINGS = 1 << 5
|
||||
PCF_SWIDTHS = 1 << 6
|
||||
PCF_GLYPH_NAMES = 1 << 7
|
||||
PCF_BDF_ACCELERATORS = 1 << 8
|
||||
|
||||
BYTES_PER_ROW: list[Callable[[int], int]] = [
|
||||
lambda bits: ((bits + 7) >> 3),
|
||||
lambda bits: ((bits + 15) >> 3) & ~1,
|
||||
lambda bits: ((bits + 31) >> 3) & ~3,
|
||||
lambda bits: ((bits + 63) >> 3) & ~7,
|
||||
]
|
||||
|
||||
|
||||
def sz(s: bytes, o: int) -> bytes:
|
||||
return s[o : s.index(b"\0", o)]
|
||||
|
||||
|
||||
class PcfFontFile(FontFile.FontFile):
|
||||
"""Font file plugin for the X11 PCF format."""
|
||||
|
||||
name = "name"
|
||||
|
||||
def __init__(self, fp: BinaryIO, charset_encoding: str = "iso8859-1"):
|
||||
self.charset_encoding = charset_encoding
|
||||
|
||||
magic = l32(fp.read(4))
|
||||
if magic != PCF_MAGIC:
|
||||
msg = "not a PCF file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
super().__init__()
|
||||
|
||||
count = l32(fp.read(4))
|
||||
self.toc = {}
|
||||
for i in range(count):
|
||||
type = l32(fp.read(4))
|
||||
self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4))
|
||||
|
||||
self.fp = fp
|
||||
|
||||
self.info = self._load_properties()
|
||||
|
||||
metrics = self._load_metrics()
|
||||
bitmaps = self._load_bitmaps(metrics)
|
||||
encoding = self._load_encoding()
|
||||
|
||||
#
|
||||
# create glyph structure
|
||||
|
||||
for ch, ix in enumerate(encoding):
|
||||
if ix is not None:
|
||||
(
|
||||
xsize,
|
||||
ysize,
|
||||
left,
|
||||
right,
|
||||
width,
|
||||
ascent,
|
||||
descent,
|
||||
attributes,
|
||||
) = metrics[ix]
|
||||
self.glyph[ch] = (
|
||||
(width, 0),
|
||||
(left, descent - ysize, xsize + left, descent),
|
||||
(0, 0, xsize, ysize),
|
||||
bitmaps[ix],
|
||||
)
|
||||
|
||||
def _getformat(
|
||||
self, tag: int
|
||||
) -> tuple[BinaryIO, int, Callable[[bytes], int], Callable[[bytes], int]]:
|
||||
format, size, offset = self.toc[tag]
|
||||
|
||||
fp = self.fp
|
||||
fp.seek(offset)
|
||||
|
||||
format = l32(fp.read(4))
|
||||
|
||||
if format & 4:
|
||||
i16, i32 = b16, b32
|
||||
else:
|
||||
i16, i32 = l16, l32
|
||||
|
||||
return fp, format, i16, i32
|
||||
|
||||
def _load_properties(self) -> dict[bytes, bytes | int]:
|
||||
#
|
||||
# font properties
|
||||
|
||||
properties = {}
|
||||
|
||||
fp, format, i16, i32 = self._getformat(PCF_PROPERTIES)
|
||||
|
||||
nprops = i32(fp.read(4))
|
||||
|
||||
# read property description
|
||||
p = [(i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4))) for _ in range(nprops)]
|
||||
|
||||
if nprops & 3:
|
||||
fp.seek(4 - (nprops & 3), io.SEEK_CUR) # pad
|
||||
|
||||
data = fp.read(i32(fp.read(4)))
|
||||
|
||||
for k, s, v in p:
|
||||
property_value: bytes | int = sz(data, v) if s else v
|
||||
properties[sz(data, k)] = property_value
|
||||
|
||||
return properties
|
||||
|
||||
def _load_metrics(self) -> list[tuple[int, int, int, int, int, int, int, int]]:
|
||||
#
|
||||
# font metrics
|
||||
|
||||
metrics: list[tuple[int, int, int, int, int, int, int, int]] = []
|
||||
|
||||
fp, format, i16, i32 = self._getformat(PCF_METRICS)
|
||||
|
||||
append = metrics.append
|
||||
|
||||
if (format & 0xFF00) == 0x100:
|
||||
# "compressed" metrics
|
||||
for i in range(i16(fp.read(2))):
|
||||
left = i8(fp.read(1)) - 128
|
||||
right = i8(fp.read(1)) - 128
|
||||
width = i8(fp.read(1)) - 128
|
||||
ascent = i8(fp.read(1)) - 128
|
||||
descent = i8(fp.read(1)) - 128
|
||||
xsize = right - left
|
||||
ysize = ascent + descent
|
||||
append((xsize, ysize, left, right, width, ascent, descent, 0))
|
||||
|
||||
else:
|
||||
# "jumbo" metrics
|
||||
for i in range(i32(fp.read(4))):
|
||||
left = i16(fp.read(2))
|
||||
right = i16(fp.read(2))
|
||||
width = i16(fp.read(2))
|
||||
ascent = i16(fp.read(2))
|
||||
descent = i16(fp.read(2))
|
||||
attributes = i16(fp.read(2))
|
||||
xsize = right - left
|
||||
ysize = ascent + descent
|
||||
append((xsize, ysize, left, right, width, ascent, descent, attributes))
|
||||
|
||||
return metrics
|
||||
|
||||
def _load_bitmaps(
|
||||
self, metrics: list[tuple[int, int, int, int, int, int, int, int]]
|
||||
) -> list[Image.Image]:
|
||||
#
|
||||
# bitmap data
|
||||
|
||||
fp, format, i16, i32 = self._getformat(PCF_BITMAPS)
|
||||
|
||||
nbitmaps = i32(fp.read(4))
|
||||
|
||||
if nbitmaps != len(metrics):
|
||||
msg = "Wrong number of bitmaps"
|
||||
raise OSError(msg)
|
||||
|
||||
offsets = [i32(fp.read(4)) for _ in range(nbitmaps)]
|
||||
|
||||
bitmap_sizes = [i32(fp.read(4)) for _ in range(4)]
|
||||
|
||||
# byteorder = format & 4 # non-zero => MSB
|
||||
bitorder = format & 8 # non-zero => MSB
|
||||
padindex = format & 3
|
||||
|
||||
bitmapsize = bitmap_sizes[padindex]
|
||||
offsets.append(bitmapsize)
|
||||
|
||||
data = fp.read(bitmapsize)
|
||||
|
||||
pad = BYTES_PER_ROW[padindex]
|
||||
mode = "1;R"
|
||||
if bitorder:
|
||||
mode = "1"
|
||||
|
||||
bitmaps = []
|
||||
for i in range(nbitmaps):
|
||||
xsize, ysize = metrics[i][:2]
|
||||
b, e = offsets[i : i + 2]
|
||||
bitmaps.append(
|
||||
Image.frombytes("1", (xsize, ysize), data[b:e], "raw", mode, pad(xsize))
|
||||
)
|
||||
|
||||
return bitmaps
|
||||
|
||||
def _load_encoding(self) -> list[int | None]:
|
||||
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
|
||||
|
||||
first_col, last_col = i16(fp.read(2)), i16(fp.read(2))
|
||||
first_row, last_row = i16(fp.read(2)), i16(fp.read(2))
|
||||
|
||||
i16(fp.read(2)) # default
|
||||
|
||||
nencoding = (last_col - first_col + 1) * (last_row - first_row + 1)
|
||||
|
||||
# map character code to bitmap index
|
||||
encoding: list[int | None] = [None] * min(256, nencoding)
|
||||
|
||||
encoding_offsets = [i16(fp.read(2)) for _ in range(nencoding)]
|
||||
|
||||
for i in range(first_col, len(encoding)):
|
||||
try:
|
||||
encoding_offset = encoding_offsets[
|
||||
ord(bytearray([i]).decode(self.charset_encoding))
|
||||
]
|
||||
if encoding_offset != 0xFFFF:
|
||||
encoding[i] = encoding_offset
|
||||
except UnicodeDecodeError:
|
||||
# character is not supported in selected encoding
|
||||
pass
|
||||
|
||||
return encoding
|
@ -1,222 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# PCX file handling
|
||||
#
|
||||
# This format was originally used by ZSoft's popular PaintBrush
|
||||
# program for the IBM PC. It is also supported by many MS-DOS and
|
||||
# Windows applications, including the Windows PaintBrush program in
|
||||
# Windows 3.
|
||||
#
|
||||
# history:
|
||||
# 1995-09-01 fl Created
|
||||
# 1996-05-20 fl Fixed RGB support
|
||||
# 1997-01-03 fl Fixed 2-bit and 4-bit support
|
||||
# 1999-02-03 fl Fixed 8-bit support (broken in 1.0b1)
|
||||
# 1999-02-07 fl Added write support
|
||||
# 2002-06-09 fl Made 2-bit and 4-bit support a bit more robust
|
||||
# 2002-07-30 fl Seek from to current position, not beginning of file
|
||||
# 2003-06-03 fl Extract DPI settings (info["dpi"])
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-2003 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import logging
|
||||
|
||||
from . import Image, ImageFile, ImagePalette
|
||||
from ._binary import i16le as i16
|
||||
from ._binary import o8
|
||||
from ._binary import o16le as o16
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[0] == 10 and prefix[1] in [0, 2, 3, 5]
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for Paintbrush images.
|
||||
|
||||
|
||||
class PcxImageFile(ImageFile.ImageFile):
|
||||
format = "PCX"
|
||||
format_description = "Paintbrush"
|
||||
|
||||
def _open(self):
|
||||
# header
|
||||
s = self.fp.read(128)
|
||||
if not _accept(s):
|
||||
msg = "not a PCX file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# image
|
||||
bbox = i16(s, 4), i16(s, 6), i16(s, 8) + 1, i16(s, 10) + 1
|
||||
if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]:
|
||||
msg = "bad PCX image size"
|
||||
raise SyntaxError(msg)
|
||||
logger.debug("BBox: %s %s %s %s", *bbox)
|
||||
|
||||
# format
|
||||
version = s[1]
|
||||
bits = s[3]
|
||||
planes = s[65]
|
||||
provided_stride = i16(s, 66)
|
||||
logger.debug(
|
||||
"PCX version %s, bits %s, planes %s, stride %s",
|
||||
version,
|
||||
bits,
|
||||
planes,
|
||||
provided_stride,
|
||||
)
|
||||
|
||||
self.info["dpi"] = i16(s, 12), i16(s, 14)
|
||||
|
||||
if bits == 1 and planes == 1:
|
||||
mode = rawmode = "1"
|
||||
|
||||
elif bits == 1 and planes in (2, 4):
|
||||
mode = "P"
|
||||
rawmode = "P;%dL" % planes
|
||||
self.palette = ImagePalette.raw("RGB", s[16:64])
|
||||
|
||||
elif version == 5 and bits == 8 and planes == 1:
|
||||
mode = rawmode = "L"
|
||||
# FIXME: hey, this doesn't work with the incremental loader !!!
|
||||
self.fp.seek(-769, io.SEEK_END)
|
||||
s = self.fp.read(769)
|
||||
if len(s) == 769 and s[0] == 12:
|
||||
# check if the palette is linear grayscale
|
||||
for i in range(256):
|
||||
if s[i * 3 + 1 : i * 3 + 4] != o8(i) * 3:
|
||||
mode = rawmode = "P"
|
||||
break
|
||||
if mode == "P":
|
||||
self.palette = ImagePalette.raw("RGB", s[1:])
|
||||
self.fp.seek(128)
|
||||
|
||||
elif version == 5 and bits == 8 and planes == 3:
|
||||
mode = "RGB"
|
||||
rawmode = "RGB;L"
|
||||
|
||||
else:
|
||||
msg = "unknown PCX mode"
|
||||
raise OSError(msg)
|
||||
|
||||
self._mode = mode
|
||||
self._size = bbox[2] - bbox[0], bbox[3] - bbox[1]
|
||||
|
||||
# Don't trust the passed in stride.
|
||||
# Calculate the approximate position for ourselves.
|
||||
# CVE-2020-35653
|
||||
stride = (self._size[0] * bits + 7) // 8
|
||||
|
||||
# While the specification states that this must be even,
|
||||
# not all images follow this
|
||||
if provided_stride != stride:
|
||||
stride += stride % 2
|
||||
|
||||
bbox = (0, 0) + self.size
|
||||
logger.debug("size: %sx%s", *self.size)
|
||||
|
||||
self.tile = [("pcx", bbox, self.fp.tell(), (rawmode, planes * stride))]
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# save PCX files
|
||||
|
||||
|
||||
SAVE = {
|
||||
# mode: (version, bits, planes, raw mode)
|
||||
"1": (2, 1, 1, "1"),
|
||||
"L": (5, 8, 1, "L"),
|
||||
"P": (5, 8, 1, "P"),
|
||||
"RGB": (5, 8, 3, "RGB;L"),
|
||||
}
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
try:
|
||||
version, bits, planes, rawmode = SAVE[im.mode]
|
||||
except KeyError as e:
|
||||
msg = f"Cannot save {im.mode} images as PCX"
|
||||
raise ValueError(msg) from e
|
||||
|
||||
# bytes per plane
|
||||
stride = (im.size[0] * bits + 7) // 8
|
||||
# stride should be even
|
||||
stride += stride % 2
|
||||
# Stride needs to be kept in sync with the PcxEncode.c version.
|
||||
# Ideally it should be passed in in the state, but the bytes value
|
||||
# gets overwritten.
|
||||
|
||||
logger.debug(
|
||||
"PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d",
|
||||
im.size[0],
|
||||
bits,
|
||||
stride,
|
||||
)
|
||||
|
||||
# under windows, we could determine the current screen size with
|
||||
# "Image.core.display_mode()[1]", but I think that's overkill...
|
||||
|
||||
screen = im.size
|
||||
|
||||
dpi = 100, 100
|
||||
|
||||
# PCX header
|
||||
fp.write(
|
||||
o8(10)
|
||||
+ o8(version)
|
||||
+ o8(1)
|
||||
+ o8(bits)
|
||||
+ o16(0)
|
||||
+ o16(0)
|
||||
+ o16(im.size[0] - 1)
|
||||
+ o16(im.size[1] - 1)
|
||||
+ o16(dpi[0])
|
||||
+ o16(dpi[1])
|
||||
+ b"\0" * 24
|
||||
+ b"\xFF" * 24
|
||||
+ b"\0"
|
||||
+ o8(planes)
|
||||
+ o16(stride)
|
||||
+ o16(1)
|
||||
+ o16(screen[0])
|
||||
+ o16(screen[1])
|
||||
+ b"\0" * 54
|
||||
)
|
||||
|
||||
assert fp.tell() == 128
|
||||
|
||||
ImageFile._save(im, fp, [("pcx", (0, 0) + im.size, 0, (rawmode, bits * planes))])
|
||||
|
||||
if im.mode == "P":
|
||||
# colour palette
|
||||
fp.write(o8(12))
|
||||
palette = im.im.getpalette("RGB", "RGB")
|
||||
palette += b"\x00" * (768 - len(palette))
|
||||
fp.write(palette) # 768 bytes
|
||||
elif im.mode == "L":
|
||||
# grayscale palette
|
||||
fp.write(o8(12))
|
||||
for i in range(256):
|
||||
fp.write(o8(i) * 3)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# registry
|
||||
|
||||
|
||||
Image.register_open(PcxImageFile.format, PcxImageFile, _accept)
|
||||
Image.register_save(PcxImageFile.format, _save)
|
||||
|
||||
Image.register_extension(PcxImageFile.format, ".pcx")
|
||||
|
||||
Image.register_mime(PcxImageFile.format, "image/x-pcx")
|
@ -1,303 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# PDF (Acrobat) file handling
|
||||
#
|
||||
# History:
|
||||
# 1996-07-16 fl Created
|
||||
# 1997-01-18 fl Fixed header
|
||||
# 2004-02-21 fl Fixes for 1/L/CMYK images, etc.
|
||||
# 2004-02-24 fl Fixes for 1 and P images.
|
||||
#
|
||||
# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved.
|
||||
# Copyright (c) 1996-1997 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
##
|
||||
# Image plugin for PDF images (output only).
|
||||
##
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
|
||||
from . import Image, ImageFile, ImageSequence, PdfParser, __version__, features
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
# object ids:
|
||||
# 1. catalogue
|
||||
# 2. pages
|
||||
# 3. image
|
||||
# 4. page
|
||||
# 5. page contents
|
||||
|
||||
|
||||
def _save_all(im, fp, filename):
|
||||
_save(im, fp, filename, save_all=True)
|
||||
|
||||
|
||||
##
|
||||
# (Internal) Image save plugin for the PDF format.
|
||||
|
||||
|
||||
def _write_image(im, filename, existing_pdf, image_refs):
|
||||
# FIXME: Should replace ASCIIHexDecode with RunLengthDecode
|
||||
# (packbits) or LZWDecode (tiff/lzw compression). Note that
|
||||
# PDF 1.2 also supports Flatedecode (zip compression).
|
||||
|
||||
params = None
|
||||
decode = None
|
||||
|
||||
#
|
||||
# Get image characteristics
|
||||
|
||||
width, height = im.size
|
||||
|
||||
dict_obj = {"BitsPerComponent": 8}
|
||||
if im.mode == "1":
|
||||
if features.check("libtiff"):
|
||||
filter = "CCITTFaxDecode"
|
||||
dict_obj["BitsPerComponent"] = 1
|
||||
params = PdfParser.PdfArray(
|
||||
[
|
||||
PdfParser.PdfDict(
|
||||
{
|
||||
"K": -1,
|
||||
"BlackIs1": True,
|
||||
"Columns": width,
|
||||
"Rows": height,
|
||||
}
|
||||
)
|
||||
]
|
||||
)
|
||||
else:
|
||||
filter = "DCTDecode"
|
||||
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray")
|
||||
procset = "ImageB" # grayscale
|
||||
elif im.mode == "L":
|
||||
filter = "DCTDecode"
|
||||
# params = f"<< /Predictor 15 /Columns {width-2} >>"
|
||||
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray")
|
||||
procset = "ImageB" # grayscale
|
||||
elif im.mode == "LA":
|
||||
filter = "JPXDecode"
|
||||
# params = f"<< /Predictor 15 /Columns {width-2} >>"
|
||||
procset = "ImageB" # grayscale
|
||||
dict_obj["SMaskInData"] = 1
|
||||
elif im.mode == "P":
|
||||
filter = "ASCIIHexDecode"
|
||||
palette = im.getpalette()
|
||||
dict_obj["ColorSpace"] = [
|
||||
PdfParser.PdfName("Indexed"),
|
||||
PdfParser.PdfName("DeviceRGB"),
|
||||
len(palette) // 3 - 1,
|
||||
PdfParser.PdfBinary(palette),
|
||||
]
|
||||
procset = "ImageI" # indexed color
|
||||
|
||||
if "transparency" in im.info:
|
||||
smask = im.convert("LA").getchannel("A")
|
||||
smask.encoderinfo = {}
|
||||
|
||||
image_ref = _write_image(smask, filename, existing_pdf, image_refs)[0]
|
||||
dict_obj["SMask"] = image_ref
|
||||
elif im.mode == "RGB":
|
||||
filter = "DCTDecode"
|
||||
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceRGB")
|
||||
procset = "ImageC" # color images
|
||||
elif im.mode == "RGBA":
|
||||
filter = "JPXDecode"
|
||||
procset = "ImageC" # color images
|
||||
dict_obj["SMaskInData"] = 1
|
||||
elif im.mode == "CMYK":
|
||||
filter = "DCTDecode"
|
||||
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceCMYK")
|
||||
procset = "ImageC" # color images
|
||||
decode = [1, 0, 1, 0, 1, 0, 1, 0]
|
||||
else:
|
||||
msg = f"cannot save mode {im.mode}"
|
||||
raise ValueError(msg)
|
||||
|
||||
#
|
||||
# image
|
||||
|
||||
op = io.BytesIO()
|
||||
|
||||
if filter == "ASCIIHexDecode":
|
||||
ImageFile._save(im, op, [("hex", (0, 0) + im.size, 0, im.mode)])
|
||||
elif filter == "CCITTFaxDecode":
|
||||
im.save(
|
||||
op,
|
||||
"TIFF",
|
||||
compression="group4",
|
||||
# use a single strip
|
||||
strip_size=math.ceil(width / 8) * height,
|
||||
)
|
||||
elif filter == "DCTDecode":
|
||||
Image.SAVE["JPEG"](im, op, filename)
|
||||
elif filter == "JPXDecode":
|
||||
del dict_obj["BitsPerComponent"]
|
||||
Image.SAVE["JPEG2000"](im, op, filename)
|
||||
else:
|
||||
msg = f"unsupported PDF filter ({filter})"
|
||||
raise ValueError(msg)
|
||||
|
||||
stream = op.getvalue()
|
||||
if filter == "CCITTFaxDecode":
|
||||
stream = stream[8:]
|
||||
filter = PdfParser.PdfArray([PdfParser.PdfName(filter)])
|
||||
else:
|
||||
filter = PdfParser.PdfName(filter)
|
||||
|
||||
image_ref = image_refs.pop(0)
|
||||
existing_pdf.write_obj(
|
||||
image_ref,
|
||||
stream=stream,
|
||||
Type=PdfParser.PdfName("XObject"),
|
||||
Subtype=PdfParser.PdfName("Image"),
|
||||
Width=width, # * 72.0 / x_resolution,
|
||||
Height=height, # * 72.0 / y_resolution,
|
||||
Filter=filter,
|
||||
Decode=decode,
|
||||
DecodeParms=params,
|
||||
**dict_obj,
|
||||
)
|
||||
|
||||
return image_ref, procset
|
||||
|
||||
|
||||
def _save(im, fp, filename, save_all=False):
|
||||
is_appending = im.encoderinfo.get("append", False)
|
||||
if is_appending:
|
||||
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="r+b")
|
||||
else:
|
||||
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="w+b")
|
||||
|
||||
dpi = im.encoderinfo.get("dpi")
|
||||
if dpi:
|
||||
x_resolution = dpi[0]
|
||||
y_resolution = dpi[1]
|
||||
else:
|
||||
x_resolution = y_resolution = im.encoderinfo.get("resolution", 72.0)
|
||||
|
||||
info = {
|
||||
"title": None
|
||||
if is_appending
|
||||
else os.path.splitext(os.path.basename(filename))[0],
|
||||
"author": None,
|
||||
"subject": None,
|
||||
"keywords": None,
|
||||
"creator": None,
|
||||
"producer": None,
|
||||
"creationDate": None if is_appending else time.gmtime(),
|
||||
"modDate": None if is_appending else time.gmtime(),
|
||||
}
|
||||
for k, default in info.items():
|
||||
v = im.encoderinfo.get(k) if k in im.encoderinfo else default
|
||||
if v:
|
||||
existing_pdf.info[k[0].upper() + k[1:]] = v
|
||||
|
||||
#
|
||||
# make sure image data is available
|
||||
im.load()
|
||||
|
||||
existing_pdf.start_writing()
|
||||
existing_pdf.write_header()
|
||||
existing_pdf.write_comment(f"created by Pillow {__version__} PDF driver")
|
||||
|
||||
#
|
||||
# pages
|
||||
ims = [im]
|
||||
if save_all:
|
||||
append_images = im.encoderinfo.get("append_images", [])
|
||||
for append_im in append_images:
|
||||
append_im.encoderinfo = im.encoderinfo.copy()
|
||||
ims.append(append_im)
|
||||
number_of_pages = 0
|
||||
image_refs = []
|
||||
page_refs = []
|
||||
contents_refs = []
|
||||
for im in ims:
|
||||
im_number_of_pages = 1
|
||||
if save_all:
|
||||
try:
|
||||
im_number_of_pages = im.n_frames
|
||||
except AttributeError:
|
||||
# Image format does not have n_frames.
|
||||
# It is a single frame image
|
||||
pass
|
||||
number_of_pages += im_number_of_pages
|
||||
for i in range(im_number_of_pages):
|
||||
image_refs.append(existing_pdf.next_object_id(0))
|
||||
if im.mode == "P" and "transparency" in im.info:
|
||||
image_refs.append(existing_pdf.next_object_id(0))
|
||||
|
||||
page_refs.append(existing_pdf.next_object_id(0))
|
||||
contents_refs.append(existing_pdf.next_object_id(0))
|
||||
existing_pdf.pages.append(page_refs[-1])
|
||||
|
||||
#
|
||||
# catalog and list of pages
|
||||
existing_pdf.write_catalog()
|
||||
|
||||
page_number = 0
|
||||
for im_sequence in ims:
|
||||
im_pages = ImageSequence.Iterator(im_sequence) if save_all else [im_sequence]
|
||||
for im in im_pages:
|
||||
image_ref, procset = _write_image(im, filename, existing_pdf, image_refs)
|
||||
|
||||
#
|
||||
# page
|
||||
|
||||
existing_pdf.write_page(
|
||||
page_refs[page_number],
|
||||
Resources=PdfParser.PdfDict(
|
||||
ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)],
|
||||
XObject=PdfParser.PdfDict(image=image_ref),
|
||||
),
|
||||
MediaBox=[
|
||||
0,
|
||||
0,
|
||||
im.width * 72.0 / x_resolution,
|
||||
im.height * 72.0 / y_resolution,
|
||||
],
|
||||
Contents=contents_refs[page_number],
|
||||
)
|
||||
|
||||
#
|
||||
# page contents
|
||||
|
||||
page_contents = b"q %f 0 0 %f 0 0 cm /image Do Q\n" % (
|
||||
im.width * 72.0 / x_resolution,
|
||||
im.height * 72.0 / y_resolution,
|
||||
)
|
||||
|
||||
existing_pdf.write_obj(contents_refs[page_number], stream=page_contents)
|
||||
|
||||
page_number += 1
|
||||
|
||||
#
|
||||
# trailer
|
||||
existing_pdf.write_xref_and_trailer()
|
||||
if hasattr(fp, "flush"):
|
||||
fp.flush()
|
||||
existing_pdf.close()
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
Image.register_save("PDF", _save)
|
||||
Image.register_save_all("PDF", _save_all)
|
||||
|
||||
Image.register_extension("PDF", ".pdf")
|
||||
|
||||
Image.register_mime("PDF", "application/pdf")
|
@ -1,998 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import calendar
|
||||
import codecs
|
||||
import collections
|
||||
import mmap
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import zlib
|
||||
|
||||
|
||||
# see 7.9.2.2 Text String Type on page 86 and D.3 PDFDocEncoding Character Set
|
||||
# on page 656
|
||||
def encode_text(s):
|
||||
return codecs.BOM_UTF16_BE + s.encode("utf_16_be")
|
||||
|
||||
|
||||
PDFDocEncoding = {
|
||||
0x16: "\u0017",
|
||||
0x18: "\u02D8",
|
||||
0x19: "\u02C7",
|
||||
0x1A: "\u02C6",
|
||||
0x1B: "\u02D9",
|
||||
0x1C: "\u02DD",
|
||||
0x1D: "\u02DB",
|
||||
0x1E: "\u02DA",
|
||||
0x1F: "\u02DC",
|
||||
0x80: "\u2022",
|
||||
0x81: "\u2020",
|
||||
0x82: "\u2021",
|
||||
0x83: "\u2026",
|
||||
0x84: "\u2014",
|
||||
0x85: "\u2013",
|
||||
0x86: "\u0192",
|
||||
0x87: "\u2044",
|
||||
0x88: "\u2039",
|
||||
0x89: "\u203A",
|
||||
0x8A: "\u2212",
|
||||
0x8B: "\u2030",
|
||||
0x8C: "\u201E",
|
||||
0x8D: "\u201C",
|
||||
0x8E: "\u201D",
|
||||
0x8F: "\u2018",
|
||||
0x90: "\u2019",
|
||||
0x91: "\u201A",
|
||||
0x92: "\u2122",
|
||||
0x93: "\uFB01",
|
||||
0x94: "\uFB02",
|
||||
0x95: "\u0141",
|
||||
0x96: "\u0152",
|
||||
0x97: "\u0160",
|
||||
0x98: "\u0178",
|
||||
0x99: "\u017D",
|
||||
0x9A: "\u0131",
|
||||
0x9B: "\u0142",
|
||||
0x9C: "\u0153",
|
||||
0x9D: "\u0161",
|
||||
0x9E: "\u017E",
|
||||
0xA0: "\u20AC",
|
||||
}
|
||||
|
||||
|
||||
def decode_text(b):
|
||||
if b[: len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE:
|
||||
return b[len(codecs.BOM_UTF16_BE) :].decode("utf_16_be")
|
||||
else:
|
||||
return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b)
|
||||
|
||||
|
||||
class PdfFormatError(RuntimeError):
|
||||
"""An error that probably indicates a syntactic or semantic error in the
|
||||
PDF file structure"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def check_format_condition(condition, error_message):
|
||||
if not condition:
|
||||
raise PdfFormatError(error_message)
|
||||
|
||||
|
||||
class IndirectReference(
|
||||
collections.namedtuple("IndirectReferenceTuple", ["object_id", "generation"])
|
||||
):
|
||||
def __str__(self):
|
||||
return f"{self.object_id} {self.generation} R"
|
||||
|
||||
def __bytes__(self):
|
||||
return self.__str__().encode("us-ascii")
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
other.__class__ is self.__class__
|
||||
and other.object_id == self.object_id
|
||||
and other.generation == self.generation
|
||||
)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.object_id, self.generation))
|
||||
|
||||
|
||||
class IndirectObjectDef(IndirectReference):
|
||||
def __str__(self):
|
||||
return f"{self.object_id} {self.generation} obj"
|
||||
|
||||
|
||||
class XrefTable:
|
||||
def __init__(self):
|
||||
self.existing_entries = {} # object ID => (offset, generation)
|
||||
self.new_entries = {} # object ID => (offset, generation)
|
||||
self.deleted_entries = {0: 65536} # object ID => generation
|
||||
self.reading_finished = False
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if self.reading_finished:
|
||||
self.new_entries[key] = value
|
||||
else:
|
||||
self.existing_entries[key] = value
|
||||
if key in self.deleted_entries:
|
||||
del self.deleted_entries[key]
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return self.new_entries[key]
|
||||
except KeyError:
|
||||
return self.existing_entries[key]
|
||||
|
||||
def __delitem__(self, key):
|
||||
if key in self.new_entries:
|
||||
generation = self.new_entries[key][1] + 1
|
||||
del self.new_entries[key]
|
||||
self.deleted_entries[key] = generation
|
||||
elif key in self.existing_entries:
|
||||
generation = self.existing_entries[key][1] + 1
|
||||
self.deleted_entries[key] = generation
|
||||
elif key in self.deleted_entries:
|
||||
generation = self.deleted_entries[key]
|
||||
else:
|
||||
msg = (
|
||||
"object ID " + str(key) + " cannot be deleted because it doesn't exist"
|
||||
)
|
||||
raise IndexError(msg)
|
||||
|
||||
def __contains__(self, key):
|
||||
return key in self.existing_entries or key in self.new_entries
|
||||
|
||||
def __len__(self):
|
||||
return len(
|
||||
set(self.existing_entries.keys())
|
||||
| set(self.new_entries.keys())
|
||||
| set(self.deleted_entries.keys())
|
||||
)
|
||||
|
||||
def keys(self):
|
||||
return (
|
||||
set(self.existing_entries.keys()) - set(self.deleted_entries.keys())
|
||||
) | set(self.new_entries.keys())
|
||||
|
||||
def write(self, f):
|
||||
keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys()))
|
||||
deleted_keys = sorted(set(self.deleted_entries.keys()))
|
||||
startxref = f.tell()
|
||||
f.write(b"xref\n")
|
||||
while keys:
|
||||
# find a contiguous sequence of object IDs
|
||||
prev = None
|
||||
for index, key in enumerate(keys):
|
||||
if prev is None or prev + 1 == key:
|
||||
prev = key
|
||||
else:
|
||||
contiguous_keys = keys[:index]
|
||||
keys = keys[index:]
|
||||
break
|
||||
else:
|
||||
contiguous_keys = keys
|
||||
keys = None
|
||||
f.write(b"%d %d\n" % (contiguous_keys[0], len(contiguous_keys)))
|
||||
for object_id in contiguous_keys:
|
||||
if object_id in self.new_entries:
|
||||
f.write(b"%010d %05d n \n" % self.new_entries[object_id])
|
||||
else:
|
||||
this_deleted_object_id = deleted_keys.pop(0)
|
||||
check_format_condition(
|
||||
object_id == this_deleted_object_id,
|
||||
f"expected the next deleted object ID to be {object_id}, "
|
||||
f"instead found {this_deleted_object_id}",
|
||||
)
|
||||
try:
|
||||
next_in_linked_list = deleted_keys[0]
|
||||
except IndexError:
|
||||
next_in_linked_list = 0
|
||||
f.write(
|
||||
b"%010d %05d f \n"
|
||||
% (next_in_linked_list, self.deleted_entries[object_id])
|
||||
)
|
||||
return startxref
|
||||
|
||||
|
||||
class PdfName:
|
||||
def __init__(self, name):
|
||||
if isinstance(name, PdfName):
|
||||
self.name = name.name
|
||||
elif isinstance(name, bytes):
|
||||
self.name = name
|
||||
else:
|
||||
self.name = name.encode("us-ascii")
|
||||
|
||||
def name_as_str(self):
|
||||
return self.name.decode("us-ascii")
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
isinstance(other, PdfName) and other.name == self.name
|
||||
) or other == self.name
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.name)
|
||||
|
||||
def __repr__(self):
|
||||
return f"PdfName({repr(self.name)})"
|
||||
|
||||
@classmethod
|
||||
def from_pdf_stream(cls, data):
|
||||
return cls(PdfParser.interpret_name(data))
|
||||
|
||||
allowed_chars = set(range(33, 127)) - {ord(c) for c in "#%/()<>[]{}"}
|
||||
|
||||
def __bytes__(self):
|
||||
result = bytearray(b"/")
|
||||
for b in self.name:
|
||||
if b in self.allowed_chars:
|
||||
result.append(b)
|
||||
else:
|
||||
result.extend(b"#%02X" % b)
|
||||
return bytes(result)
|
||||
|
||||
|
||||
class PdfArray(list):
|
||||
def __bytes__(self):
|
||||
return b"[ " + b" ".join(pdf_repr(x) for x in self) + b" ]"
|
||||
|
||||
|
||||
class PdfDict(collections.UserDict):
|
||||
def __setattr__(self, key, value):
|
||||
if key == "data":
|
||||
collections.UserDict.__setattr__(self, key, value)
|
||||
else:
|
||||
self[key.encode("us-ascii")] = value
|
||||
|
||||
def __getattr__(self, key):
|
||||
try:
|
||||
value = self[key.encode("us-ascii")]
|
||||
except KeyError as e:
|
||||
raise AttributeError(key) from e
|
||||
if isinstance(value, bytes):
|
||||
value = decode_text(value)
|
||||
if key.endswith("Date"):
|
||||
if value.startswith("D:"):
|
||||
value = value[2:]
|
||||
|
||||
relationship = "Z"
|
||||
if len(value) > 17:
|
||||
relationship = value[14]
|
||||
offset = int(value[15:17]) * 60
|
||||
if len(value) > 20:
|
||||
offset += int(value[18:20])
|
||||
|
||||
format = "%Y%m%d%H%M%S"[: len(value) - 2]
|
||||
value = time.strptime(value[: len(format) + 2], format)
|
||||
if relationship in ["+", "-"]:
|
||||
offset *= 60
|
||||
if relationship == "+":
|
||||
offset *= -1
|
||||
value = time.gmtime(calendar.timegm(value) + offset)
|
||||
return value
|
||||
|
||||
def __bytes__(self):
|
||||
out = bytearray(b"<<")
|
||||
for key, value in self.items():
|
||||
if value is None:
|
||||
continue
|
||||
value = pdf_repr(value)
|
||||
out.extend(b"\n")
|
||||
out.extend(bytes(PdfName(key)))
|
||||
out.extend(b" ")
|
||||
out.extend(value)
|
||||
out.extend(b"\n>>")
|
||||
return bytes(out)
|
||||
|
||||
|
||||
class PdfBinary:
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def __bytes__(self):
|
||||
return b"<%s>" % b"".join(b"%02X" % b for b in self.data)
|
||||
|
||||
|
||||
class PdfStream:
|
||||
def __init__(self, dictionary, buf):
|
||||
self.dictionary = dictionary
|
||||
self.buf = buf
|
||||
|
||||
def decode(self):
|
||||
try:
|
||||
filter = self.dictionary.Filter
|
||||
except AttributeError:
|
||||
return self.buf
|
||||
if filter == b"FlateDecode":
|
||||
try:
|
||||
expected_length = self.dictionary.DL
|
||||
except AttributeError:
|
||||
expected_length = self.dictionary.Length
|
||||
return zlib.decompress(self.buf, bufsize=int(expected_length))
|
||||
else:
|
||||
msg = f"stream filter {repr(self.dictionary.Filter)} unknown/unsupported"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
def pdf_repr(x):
|
||||
if x is True:
|
||||
return b"true"
|
||||
elif x is False:
|
||||
return b"false"
|
||||
elif x is None:
|
||||
return b"null"
|
||||
elif isinstance(x, (PdfName, PdfDict, PdfArray, PdfBinary)):
|
||||
return bytes(x)
|
||||
elif isinstance(x, (int, float)):
|
||||
return str(x).encode("us-ascii")
|
||||
elif isinstance(x, time.struct_time):
|
||||
return b"(D:" + time.strftime("%Y%m%d%H%M%SZ", x).encode("us-ascii") + b")"
|
||||
elif isinstance(x, dict):
|
||||
return bytes(PdfDict(x))
|
||||
elif isinstance(x, list):
|
||||
return bytes(PdfArray(x))
|
||||
elif isinstance(x, str):
|
||||
return pdf_repr(encode_text(x))
|
||||
elif isinstance(x, bytes):
|
||||
# XXX escape more chars? handle binary garbage
|
||||
x = x.replace(b"\\", b"\\\\")
|
||||
x = x.replace(b"(", b"\\(")
|
||||
x = x.replace(b")", b"\\)")
|
||||
return b"(" + x + b")"
|
||||
else:
|
||||
return bytes(x)
|
||||
|
||||
|
||||
class PdfParser:
|
||||
"""Based on
|
||||
https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf
|
||||
Supports PDF up to 1.4
|
||||
"""
|
||||
|
||||
def __init__(self, filename=None, f=None, buf=None, start_offset=0, mode="rb"):
|
||||
if buf and f:
|
||||
msg = "specify buf or f or filename, but not both buf and f"
|
||||
raise RuntimeError(msg)
|
||||
self.filename = filename
|
||||
self.buf = buf
|
||||
self.f = f
|
||||
self.start_offset = start_offset
|
||||
self.should_close_buf = False
|
||||
self.should_close_file = False
|
||||
if filename is not None and f is None:
|
||||
self.f = f = open(filename, mode)
|
||||
self.should_close_file = True
|
||||
if f is not None:
|
||||
self.buf = buf = self.get_buf_from_file(f)
|
||||
self.should_close_buf = True
|
||||
if not filename and hasattr(f, "name"):
|
||||
self.filename = f.name
|
||||
self.cached_objects = {}
|
||||
if buf:
|
||||
self.read_pdf_info()
|
||||
else:
|
||||
self.file_size_total = self.file_size_this = 0
|
||||
self.root = PdfDict()
|
||||
self.root_ref = None
|
||||
self.info = PdfDict()
|
||||
self.info_ref = None
|
||||
self.page_tree_root = {}
|
||||
self.pages = []
|
||||
self.orig_pages = []
|
||||
self.pages_ref = None
|
||||
self.last_xref_section_offset = None
|
||||
self.trailer_dict = {}
|
||||
self.xref_table = XrefTable()
|
||||
self.xref_table.reading_finished = True
|
||||
if f:
|
||||
self.seek_end()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False # do not suppress exceptions
|
||||
|
||||
def start_writing(self):
|
||||
self.close_buf()
|
||||
self.seek_end()
|
||||
|
||||
def close_buf(self):
|
||||
try:
|
||||
self.buf.close()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.buf = None
|
||||
|
||||
def close(self):
|
||||
if self.should_close_buf:
|
||||
self.close_buf()
|
||||
if self.f is not None and self.should_close_file:
|
||||
self.f.close()
|
||||
self.f = None
|
||||
|
||||
def seek_end(self):
|
||||
self.f.seek(0, os.SEEK_END)
|
||||
|
||||
def write_header(self):
|
||||
self.f.write(b"%PDF-1.4\n")
|
||||
|
||||
def write_comment(self, s):
|
||||
self.f.write(f"% {s}\n".encode())
|
||||
|
||||
def write_catalog(self):
|
||||
self.del_root()
|
||||
self.root_ref = self.next_object_id(self.f.tell())
|
||||
self.pages_ref = self.next_object_id(0)
|
||||
self.rewrite_pages()
|
||||
self.write_obj(self.root_ref, Type=PdfName(b"Catalog"), Pages=self.pages_ref)
|
||||
self.write_obj(
|
||||
self.pages_ref,
|
||||
Type=PdfName(b"Pages"),
|
||||
Count=len(self.pages),
|
||||
Kids=self.pages,
|
||||
)
|
||||
return self.root_ref
|
||||
|
||||
def rewrite_pages(self):
|
||||
pages_tree_nodes_to_delete = []
|
||||
for i, page_ref in enumerate(self.orig_pages):
|
||||
page_info = self.cached_objects[page_ref]
|
||||
del self.xref_table[page_ref.object_id]
|
||||
pages_tree_nodes_to_delete.append(page_info[PdfName(b"Parent")])
|
||||
if page_ref not in self.pages:
|
||||
# the page has been deleted
|
||||
continue
|
||||
# make dict keys into strings for passing to write_page
|
||||
stringified_page_info = {}
|
||||
for key, value in page_info.items():
|
||||
# key should be a PdfName
|
||||
stringified_page_info[key.name_as_str()] = value
|
||||
stringified_page_info["Parent"] = self.pages_ref
|
||||
new_page_ref = self.write_page(None, **stringified_page_info)
|
||||
for j, cur_page_ref in enumerate(self.pages):
|
||||
if cur_page_ref == page_ref:
|
||||
# replace the page reference with the new one
|
||||
self.pages[j] = new_page_ref
|
||||
# delete redundant Pages tree nodes from xref table
|
||||
for pages_tree_node_ref in pages_tree_nodes_to_delete:
|
||||
while pages_tree_node_ref:
|
||||
pages_tree_node = self.cached_objects[pages_tree_node_ref]
|
||||
if pages_tree_node_ref.object_id in self.xref_table:
|
||||
del self.xref_table[pages_tree_node_ref.object_id]
|
||||
pages_tree_node_ref = pages_tree_node.get(b"Parent", None)
|
||||
self.orig_pages = []
|
||||
|
||||
def write_xref_and_trailer(self, new_root_ref=None):
|
||||
if new_root_ref:
|
||||
self.del_root()
|
||||
self.root_ref = new_root_ref
|
||||
if self.info:
|
||||
self.info_ref = self.write_obj(None, self.info)
|
||||
start_xref = self.xref_table.write(self.f)
|
||||
num_entries = len(self.xref_table)
|
||||
trailer_dict = {b"Root": self.root_ref, b"Size": num_entries}
|
||||
if self.last_xref_section_offset is not None:
|
||||
trailer_dict[b"Prev"] = self.last_xref_section_offset
|
||||
if self.info:
|
||||
trailer_dict[b"Info"] = self.info_ref
|
||||
self.last_xref_section_offset = start_xref
|
||||
self.f.write(
|
||||
b"trailer\n"
|
||||
+ bytes(PdfDict(trailer_dict))
|
||||
+ b"\nstartxref\n%d\n%%%%EOF" % start_xref
|
||||
)
|
||||
|
||||
def write_page(self, ref, *objs, **dict_obj):
|
||||
if isinstance(ref, int):
|
||||
ref = self.pages[ref]
|
||||
if "Type" not in dict_obj:
|
||||
dict_obj["Type"] = PdfName(b"Page")
|
||||
if "Parent" not in dict_obj:
|
||||
dict_obj["Parent"] = self.pages_ref
|
||||
return self.write_obj(ref, *objs, **dict_obj)
|
||||
|
||||
def write_obj(self, ref, *objs, **dict_obj):
|
||||
f = self.f
|
||||
if ref is None:
|
||||
ref = self.next_object_id(f.tell())
|
||||
else:
|
||||
self.xref_table[ref.object_id] = (f.tell(), ref.generation)
|
||||
f.write(bytes(IndirectObjectDef(*ref)))
|
||||
stream = dict_obj.pop("stream", None)
|
||||
if stream is not None:
|
||||
dict_obj["Length"] = len(stream)
|
||||
if dict_obj:
|
||||
f.write(pdf_repr(dict_obj))
|
||||
for obj in objs:
|
||||
f.write(pdf_repr(obj))
|
||||
if stream is not None:
|
||||
f.write(b"stream\n")
|
||||
f.write(stream)
|
||||
f.write(b"\nendstream\n")
|
||||
f.write(b"endobj\n")
|
||||
return ref
|
||||
|
||||
def del_root(self):
|
||||
if self.root_ref is None:
|
||||
return
|
||||
del self.xref_table[self.root_ref.object_id]
|
||||
del self.xref_table[self.root[b"Pages"].object_id]
|
||||
|
||||
@staticmethod
|
||||
def get_buf_from_file(f):
|
||||
if hasattr(f, "getbuffer"):
|
||||
return f.getbuffer()
|
||||
elif hasattr(f, "getvalue"):
|
||||
return f.getvalue()
|
||||
else:
|
||||
try:
|
||||
return mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
|
||||
except ValueError: # cannot mmap an empty file
|
||||
return b""
|
||||
|
||||
def read_pdf_info(self):
|
||||
self.file_size_total = len(self.buf)
|
||||
self.file_size_this = self.file_size_total - self.start_offset
|
||||
self.read_trailer()
|
||||
self.root_ref = self.trailer_dict[b"Root"]
|
||||
self.info_ref = self.trailer_dict.get(b"Info", None)
|
||||
self.root = PdfDict(self.read_indirect(self.root_ref))
|
||||
if self.info_ref is None:
|
||||
self.info = PdfDict()
|
||||
else:
|
||||
self.info = PdfDict(self.read_indirect(self.info_ref))
|
||||
check_format_condition(b"Type" in self.root, "/Type missing in Root")
|
||||
check_format_condition(
|
||||
self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog"
|
||||
)
|
||||
check_format_condition(b"Pages" in self.root, "/Pages missing in Root")
|
||||
check_format_condition(
|
||||
isinstance(self.root[b"Pages"], IndirectReference),
|
||||
"/Pages in Root is not an indirect reference",
|
||||
)
|
||||
self.pages_ref = self.root[b"Pages"]
|
||||
self.page_tree_root = self.read_indirect(self.pages_ref)
|
||||
self.pages = self.linearize_page_tree(self.page_tree_root)
|
||||
# save the original list of page references
|
||||
# in case the user modifies, adds or deletes some pages
|
||||
# and we need to rewrite the pages and their list
|
||||
self.orig_pages = self.pages[:]
|
||||
|
||||
def next_object_id(self, offset=None):
|
||||
try:
|
||||
# TODO: support reuse of deleted objects
|
||||
reference = IndirectReference(max(self.xref_table.keys()) + 1, 0)
|
||||
except ValueError:
|
||||
reference = IndirectReference(1, 0)
|
||||
if offset is not None:
|
||||
self.xref_table[reference.object_id] = (offset, 0)
|
||||
return reference
|
||||
|
||||
delimiter = rb"[][()<>{}/%]"
|
||||
delimiter_or_ws = rb"[][()<>{}/%\000\011\012\014\015\040]"
|
||||
whitespace = rb"[\000\011\012\014\015\040]"
|
||||
whitespace_or_hex = rb"[\000\011\012\014\015\0400-9a-fA-F]"
|
||||
whitespace_optional = whitespace + b"*"
|
||||
whitespace_mandatory = whitespace + b"+"
|
||||
# No "\012" aka "\n" or "\015" aka "\r":
|
||||
whitespace_optional_no_nl = rb"[\000\011\014\040]*"
|
||||
newline_only = rb"[\r\n]+"
|
||||
newline = whitespace_optional_no_nl + newline_only + whitespace_optional_no_nl
|
||||
re_trailer_end = re.compile(
|
||||
whitespace_mandatory
|
||||
+ rb"trailer"
|
||||
+ whitespace_optional
|
||||
+ rb"<<(.*>>)"
|
||||
+ newline
|
||||
+ rb"startxref"
|
||||
+ newline
|
||||
+ rb"([0-9]+)"
|
||||
+ newline
|
||||
+ rb"%%EOF"
|
||||
+ whitespace_optional
|
||||
+ rb"$",
|
||||
re.DOTALL,
|
||||
)
|
||||
re_trailer_prev = re.compile(
|
||||
whitespace_optional
|
||||
+ rb"trailer"
|
||||
+ whitespace_optional
|
||||
+ rb"<<(.*?>>)"
|
||||
+ newline
|
||||
+ rb"startxref"
|
||||
+ newline
|
||||
+ rb"([0-9]+)"
|
||||
+ newline
|
||||
+ rb"%%EOF"
|
||||
+ whitespace_optional,
|
||||
re.DOTALL,
|
||||
)
|
||||
|
||||
def read_trailer(self):
|
||||
search_start_offset = len(self.buf) - 16384
|
||||
if search_start_offset < self.start_offset:
|
||||
search_start_offset = self.start_offset
|
||||
m = self.re_trailer_end.search(self.buf, search_start_offset)
|
||||
check_format_condition(m, "trailer end not found")
|
||||
# make sure we found the LAST trailer
|
||||
last_match = m
|
||||
while m:
|
||||
last_match = m
|
||||
m = self.re_trailer_end.search(self.buf, m.start() + 16)
|
||||
if not m:
|
||||
m = last_match
|
||||
trailer_data = m.group(1)
|
||||
self.last_xref_section_offset = int(m.group(2))
|
||||
self.trailer_dict = self.interpret_trailer(trailer_data)
|
||||
self.xref_table = XrefTable()
|
||||
self.read_xref_table(xref_section_offset=self.last_xref_section_offset)
|
||||
if b"Prev" in self.trailer_dict:
|
||||
self.read_prev_trailer(self.trailer_dict[b"Prev"])
|
||||
|
||||
def read_prev_trailer(self, xref_section_offset):
|
||||
trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset)
|
||||
m = self.re_trailer_prev.search(
|
||||
self.buf[trailer_offset : trailer_offset + 16384]
|
||||
)
|
||||
check_format_condition(m, "previous trailer not found")
|
||||
trailer_data = m.group(1)
|
||||
check_format_condition(
|
||||
int(m.group(2)) == xref_section_offset,
|
||||
"xref section offset in previous trailer doesn't match what was expected",
|
||||
)
|
||||
trailer_dict = self.interpret_trailer(trailer_data)
|
||||
if b"Prev" in trailer_dict:
|
||||
self.read_prev_trailer(trailer_dict[b"Prev"])
|
||||
|
||||
re_whitespace_optional = re.compile(whitespace_optional)
|
||||
re_name = re.compile(
|
||||
whitespace_optional
|
||||
+ rb"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?="
|
||||
+ delimiter_or_ws
|
||||
+ rb")"
|
||||
)
|
||||
re_dict_start = re.compile(whitespace_optional + rb"<<")
|
||||
re_dict_end = re.compile(whitespace_optional + rb">>" + whitespace_optional)
|
||||
|
||||
@classmethod
|
||||
def interpret_trailer(cls, trailer_data):
|
||||
trailer = {}
|
||||
offset = 0
|
||||
while True:
|
||||
m = cls.re_name.match(trailer_data, offset)
|
||||
if not m:
|
||||
m = cls.re_dict_end.match(trailer_data, offset)
|
||||
check_format_condition(
|
||||
m and m.end() == len(trailer_data),
|
||||
"name not found in trailer, remaining data: "
|
||||
+ repr(trailer_data[offset:]),
|
||||
)
|
||||
break
|
||||
key = cls.interpret_name(m.group(1))
|
||||
value, offset = cls.get_value(trailer_data, m.end())
|
||||
trailer[key] = value
|
||||
check_format_condition(
|
||||
b"Size" in trailer and isinstance(trailer[b"Size"], int),
|
||||
"/Size not in trailer or not an integer",
|
||||
)
|
||||
check_format_condition(
|
||||
b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference),
|
||||
"/Root not in trailer or not an indirect reference",
|
||||
)
|
||||
return trailer
|
||||
|
||||
re_hashes_in_name = re.compile(rb"([^#]*)(#([0-9a-fA-F]{2}))?")
|
||||
|
||||
@classmethod
|
||||
def interpret_name(cls, raw, as_text=False):
|
||||
name = b""
|
||||
for m in cls.re_hashes_in_name.finditer(raw):
|
||||
if m.group(3):
|
||||
name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii"))
|
||||
else:
|
||||
name += m.group(1)
|
||||
if as_text:
|
||||
return name.decode("utf-8")
|
||||
else:
|
||||
return bytes(name)
|
||||
|
||||
re_null = re.compile(whitespace_optional + rb"null(?=" + delimiter_or_ws + rb")")
|
||||
re_true = re.compile(whitespace_optional + rb"true(?=" + delimiter_or_ws + rb")")
|
||||
re_false = re.compile(whitespace_optional + rb"false(?=" + delimiter_or_ws + rb")")
|
||||
re_int = re.compile(
|
||||
whitespace_optional + rb"([-+]?[0-9]+)(?=" + delimiter_or_ws + rb")"
|
||||
)
|
||||
re_real = re.compile(
|
||||
whitespace_optional
|
||||
+ rb"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?="
|
||||
+ delimiter_or_ws
|
||||
+ rb")"
|
||||
)
|
||||
re_array_start = re.compile(whitespace_optional + rb"\[")
|
||||
re_array_end = re.compile(whitespace_optional + rb"]")
|
||||
re_string_hex = re.compile(
|
||||
whitespace_optional + rb"<(" + whitespace_or_hex + rb"*)>"
|
||||
)
|
||||
re_string_lit = re.compile(whitespace_optional + rb"\(")
|
||||
re_indirect_reference = re.compile(
|
||||
whitespace_optional
|
||||
+ rb"([-+]?[0-9]+)"
|
||||
+ whitespace_mandatory
|
||||
+ rb"([-+]?[0-9]+)"
|
||||
+ whitespace_mandatory
|
||||
+ rb"R(?="
|
||||
+ delimiter_or_ws
|
||||
+ rb")"
|
||||
)
|
||||
re_indirect_def_start = re.compile(
|
||||
whitespace_optional
|
||||
+ rb"([-+]?[0-9]+)"
|
||||
+ whitespace_mandatory
|
||||
+ rb"([-+]?[0-9]+)"
|
||||
+ whitespace_mandatory
|
||||
+ rb"obj(?="
|
||||
+ delimiter_or_ws
|
||||
+ rb")"
|
||||
)
|
||||
re_indirect_def_end = re.compile(
|
||||
whitespace_optional + rb"endobj(?=" + delimiter_or_ws + rb")"
|
||||
)
|
||||
re_comment = re.compile(
|
||||
rb"(" + whitespace_optional + rb"%[^\r\n]*" + newline + rb")*"
|
||||
)
|
||||
re_stream_start = re.compile(whitespace_optional + rb"stream\r?\n")
|
||||
re_stream_end = re.compile(
|
||||
whitespace_optional + rb"endstream(?=" + delimiter_or_ws + rb")"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_value(cls, data, offset, expect_indirect=None, max_nesting=-1):
|
||||
if max_nesting == 0:
|
||||
return None, None
|
||||
m = cls.re_comment.match(data, offset)
|
||||
if m:
|
||||
offset = m.end()
|
||||
m = cls.re_indirect_def_start.match(data, offset)
|
||||
if m:
|
||||
check_format_condition(
|
||||
int(m.group(1)) > 0,
|
||||
"indirect object definition: object ID must be greater than 0",
|
||||
)
|
||||
check_format_condition(
|
||||
int(m.group(2)) >= 0,
|
||||
"indirect object definition: generation must be non-negative",
|
||||
)
|
||||
check_format_condition(
|
||||
expect_indirect is None
|
||||
or expect_indirect
|
||||
== IndirectReference(int(m.group(1)), int(m.group(2))),
|
||||
"indirect object definition different than expected",
|
||||
)
|
||||
object, offset = cls.get_value(data, m.end(), max_nesting=max_nesting - 1)
|
||||
if offset is None:
|
||||
return object, None
|
||||
m = cls.re_indirect_def_end.match(data, offset)
|
||||
check_format_condition(m, "indirect object definition end not found")
|
||||
return object, m.end()
|
||||
check_format_condition(
|
||||
not expect_indirect, "indirect object definition not found"
|
||||
)
|
||||
m = cls.re_indirect_reference.match(data, offset)
|
||||
if m:
|
||||
check_format_condition(
|
||||
int(m.group(1)) > 0,
|
||||
"indirect object reference: object ID must be greater than 0",
|
||||
)
|
||||
check_format_condition(
|
||||
int(m.group(2)) >= 0,
|
||||
"indirect object reference: generation must be non-negative",
|
||||
)
|
||||
return IndirectReference(int(m.group(1)), int(m.group(2))), m.end()
|
||||
m = cls.re_dict_start.match(data, offset)
|
||||
if m:
|
||||
offset = m.end()
|
||||
result = {}
|
||||
m = cls.re_dict_end.match(data, offset)
|
||||
while not m:
|
||||
key, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1)
|
||||
if offset is None:
|
||||
return result, None
|
||||
value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1)
|
||||
result[key] = value
|
||||
if offset is None:
|
||||
return result, None
|
||||
m = cls.re_dict_end.match(data, offset)
|
||||
offset = m.end()
|
||||
m = cls.re_stream_start.match(data, offset)
|
||||
if m:
|
||||
try:
|
||||
stream_len = int(result[b"Length"])
|
||||
except (TypeError, KeyError, ValueError) as e:
|
||||
msg = "bad or missing Length in stream dict (%r)" % result.get(
|
||||
b"Length", None
|
||||
)
|
||||
raise PdfFormatError(msg) from e
|
||||
stream_data = data[m.end() : m.end() + stream_len]
|
||||
m = cls.re_stream_end.match(data, m.end() + stream_len)
|
||||
check_format_condition(m, "stream end not found")
|
||||
offset = m.end()
|
||||
result = PdfStream(PdfDict(result), stream_data)
|
||||
else:
|
||||
result = PdfDict(result)
|
||||
return result, offset
|
||||
m = cls.re_array_start.match(data, offset)
|
||||
if m:
|
||||
offset = m.end()
|
||||
result = []
|
||||
m = cls.re_array_end.match(data, offset)
|
||||
while not m:
|
||||
value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1)
|
||||
result.append(value)
|
||||
if offset is None:
|
||||
return result, None
|
||||
m = cls.re_array_end.match(data, offset)
|
||||
return result, m.end()
|
||||
m = cls.re_null.match(data, offset)
|
||||
if m:
|
||||
return None, m.end()
|
||||
m = cls.re_true.match(data, offset)
|
||||
if m:
|
||||
return True, m.end()
|
||||
m = cls.re_false.match(data, offset)
|
||||
if m:
|
||||
return False, m.end()
|
||||
m = cls.re_name.match(data, offset)
|
||||
if m:
|
||||
return PdfName(cls.interpret_name(m.group(1))), m.end()
|
||||
m = cls.re_int.match(data, offset)
|
||||
if m:
|
||||
return int(m.group(1)), m.end()
|
||||
m = cls.re_real.match(data, offset)
|
||||
if m:
|
||||
# XXX Decimal instead of float???
|
||||
return float(m.group(1)), m.end()
|
||||
m = cls.re_string_hex.match(data, offset)
|
||||
if m:
|
||||
# filter out whitespace
|
||||
hex_string = bytearray(
|
||||
b for b in m.group(1) if b in b"0123456789abcdefABCDEF"
|
||||
)
|
||||
if len(hex_string) % 2 == 1:
|
||||
# append a 0 if the length is not even - yes, at the end
|
||||
hex_string.append(ord(b"0"))
|
||||
return bytearray.fromhex(hex_string.decode("us-ascii")), m.end()
|
||||
m = cls.re_string_lit.match(data, offset)
|
||||
if m:
|
||||
return cls.get_literal_string(data, m.end())
|
||||
# return None, offset # fallback (only for debugging)
|
||||
msg = "unrecognized object: " + repr(data[offset : offset + 32])
|
||||
raise PdfFormatError(msg)
|
||||
|
||||
re_lit_str_token = re.compile(
|
||||
rb"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))"
|
||||
)
|
||||
escaped_chars = {
|
||||
b"n": b"\n",
|
||||
b"r": b"\r",
|
||||
b"t": b"\t",
|
||||
b"b": b"\b",
|
||||
b"f": b"\f",
|
||||
b"(": b"(",
|
||||
b")": b")",
|
||||
b"\\": b"\\",
|
||||
ord(b"n"): b"\n",
|
||||
ord(b"r"): b"\r",
|
||||
ord(b"t"): b"\t",
|
||||
ord(b"b"): b"\b",
|
||||
ord(b"f"): b"\f",
|
||||
ord(b"("): b"(",
|
||||
ord(b")"): b")",
|
||||
ord(b"\\"): b"\\",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_literal_string(cls, data, offset):
|
||||
nesting_depth = 0
|
||||
result = bytearray()
|
||||
for m in cls.re_lit_str_token.finditer(data, offset):
|
||||
result.extend(data[offset : m.start()])
|
||||
if m.group(1):
|
||||
result.extend(cls.escaped_chars[m.group(1)[1]])
|
||||
elif m.group(2):
|
||||
result.append(int(m.group(2)[1:], 8))
|
||||
elif m.group(3):
|
||||
pass
|
||||
elif m.group(5):
|
||||
result.extend(b"\n")
|
||||
elif m.group(6):
|
||||
result.extend(b"(")
|
||||
nesting_depth += 1
|
||||
elif m.group(7):
|
||||
if nesting_depth == 0:
|
||||
return bytes(result), m.end()
|
||||
result.extend(b")")
|
||||
nesting_depth -= 1
|
||||
offset = m.end()
|
||||
msg = "unfinished literal string"
|
||||
raise PdfFormatError(msg)
|
||||
|
||||
re_xref_section_start = re.compile(whitespace_optional + rb"xref" + newline)
|
||||
re_xref_subsection_start = re.compile(
|
||||
whitespace_optional
|
||||
+ rb"([0-9]+)"
|
||||
+ whitespace_mandatory
|
||||
+ rb"([0-9]+)"
|
||||
+ whitespace_optional
|
||||
+ newline_only
|
||||
)
|
||||
re_xref_entry = re.compile(rb"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)")
|
||||
|
||||
def read_xref_table(self, xref_section_offset):
|
||||
subsection_found = False
|
||||
m = self.re_xref_section_start.match(
|
||||
self.buf, xref_section_offset + self.start_offset
|
||||
)
|
||||
check_format_condition(m, "xref section start not found")
|
||||
offset = m.end()
|
||||
while True:
|
||||
m = self.re_xref_subsection_start.match(self.buf, offset)
|
||||
if not m:
|
||||
check_format_condition(
|
||||
subsection_found, "xref subsection start not found"
|
||||
)
|
||||
break
|
||||
subsection_found = True
|
||||
offset = m.end()
|
||||
first_object = int(m.group(1))
|
||||
num_objects = int(m.group(2))
|
||||
for i in range(first_object, first_object + num_objects):
|
||||
m = self.re_xref_entry.match(self.buf, offset)
|
||||
check_format_condition(m, "xref entry not found")
|
||||
offset = m.end()
|
||||
is_free = m.group(3) == b"f"
|
||||
if not is_free:
|
||||
generation = int(m.group(2))
|
||||
new_entry = (int(m.group(1)), generation)
|
||||
if i not in self.xref_table:
|
||||
self.xref_table[i] = new_entry
|
||||
return offset
|
||||
|
||||
def read_indirect(self, ref, max_nesting=-1):
|
||||
offset, generation = self.xref_table[ref[0]]
|
||||
check_format_condition(
|
||||
generation == ref[1],
|
||||
f"expected to find generation {ref[1]} for object ID {ref[0]} in xref "
|
||||
f"table, instead found generation {generation} at offset {offset}",
|
||||
)
|
||||
value = self.get_value(
|
||||
self.buf,
|
||||
offset + self.start_offset,
|
||||
expect_indirect=IndirectReference(*ref),
|
||||
max_nesting=max_nesting,
|
||||
)[0]
|
||||
self.cached_objects[ref] = value
|
||||
return value
|
||||
|
||||
def linearize_page_tree(self, node=None):
|
||||
if node is None:
|
||||
node = self.page_tree_root
|
||||
check_format_condition(
|
||||
node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages"
|
||||
)
|
||||
pages = []
|
||||
for kid in node[b"Kids"]:
|
||||
kid_object = self.read_indirect(kid)
|
||||
if kid_object[b"Type"] == b"Page":
|
||||
pages.append(kid)
|
||||
else:
|
||||
pages.extend(self.linearize_page_tree(node=kid_object))
|
||||
return pages
|
@ -1,70 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# PIXAR raster support for PIL
|
||||
#
|
||||
# history:
|
||||
# 97-01-29 fl Created
|
||||
#
|
||||
# notes:
|
||||
# This is incomplete; it is based on a few samples created with
|
||||
# Photoshop 2.5 and 3.0, and a summary description provided by
|
||||
# Greg Coats <gcoats@labiris.er.usgs.gov>. Hopefully, "L" and
|
||||
# "RGBA" support will be added in future versions.
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i16le as i16
|
||||
|
||||
#
|
||||
# helpers
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == b"\200\350\000\000"
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for PIXAR raster images.
|
||||
|
||||
|
||||
class PixarImageFile(ImageFile.ImageFile):
|
||||
format = "PIXAR"
|
||||
format_description = "PIXAR raster image"
|
||||
|
||||
def _open(self):
|
||||
# assuming a 4-byte magic label
|
||||
s = self.fp.read(4)
|
||||
if not _accept(s):
|
||||
msg = "not a PIXAR file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# read rest of header
|
||||
s = s + self.fp.read(508)
|
||||
|
||||
self._size = i16(s, 418), i16(s, 416)
|
||||
|
||||
# get channel/depth descriptions
|
||||
mode = i16(s, 424), i16(s, 426)
|
||||
|
||||
if mode == (14, 2):
|
||||
self._mode = "RGB"
|
||||
# FIXME: to be continued...
|
||||
|
||||
# create tile descriptor (assuming "dumped")
|
||||
self.tile = [("raw", (0, 0) + self.size, 1024, (self.mode, 0, 1))]
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open(PixarImageFile.format, PixarImageFile, _accept)
|
||||
|
||||
Image.register_extension(PixarImageFile.format, ".pxr")
|
File diff suppressed because it is too large
Load Diff
@ -1,344 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# PPM support for PIL
|
||||
#
|
||||
# History:
|
||||
# 96-03-24 fl Created
|
||||
# 98-03-06 fl Write RGBA images (as RGB, that is)
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-98.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i16be as i16
|
||||
from ._binary import o8
|
||||
from ._binary import o32le as o32
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d"
|
||||
|
||||
MODES = {
|
||||
# standard
|
||||
b"P1": "1",
|
||||
b"P2": "L",
|
||||
b"P3": "RGB",
|
||||
b"P4": "1",
|
||||
b"P5": "L",
|
||||
b"P6": "RGB",
|
||||
# extensions
|
||||
b"P0CMYK": "CMYK",
|
||||
# PIL extensions (for test purposes only)
|
||||
b"PyP": "P",
|
||||
b"PyRGBA": "RGBA",
|
||||
b"PyCMYK": "CMYK",
|
||||
}
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[0:1] == b"P" and prefix[1] in b"0123456y"
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for PBM, PGM, and PPM images.
|
||||
|
||||
|
||||
class PpmImageFile(ImageFile.ImageFile):
|
||||
format = "PPM"
|
||||
format_description = "Pbmplus image"
|
||||
|
||||
def _read_magic(self):
|
||||
magic = b""
|
||||
# read until whitespace or longest available magic number
|
||||
for _ in range(6):
|
||||
c = self.fp.read(1)
|
||||
if not c or c in b_whitespace:
|
||||
break
|
||||
magic += c
|
||||
return magic
|
||||
|
||||
def _read_token(self):
|
||||
token = b""
|
||||
while len(token) <= 10: # read until next whitespace or limit of 10 characters
|
||||
c = self.fp.read(1)
|
||||
if not c:
|
||||
break
|
||||
elif c in b_whitespace: # token ended
|
||||
if not token:
|
||||
# skip whitespace at start
|
||||
continue
|
||||
break
|
||||
elif c == b"#":
|
||||
# ignores rest of the line; stops at CR, LF or EOF
|
||||
while self.fp.read(1) not in b"\r\n":
|
||||
pass
|
||||
continue
|
||||
token += c
|
||||
if not token:
|
||||
# Token was not even 1 byte
|
||||
msg = "Reached EOF while reading header"
|
||||
raise ValueError(msg)
|
||||
elif len(token) > 10:
|
||||
msg = f"Token too long in file header: {token.decode()}"
|
||||
raise ValueError(msg)
|
||||
return token
|
||||
|
||||
def _open(self):
|
||||
magic_number = self._read_magic()
|
||||
try:
|
||||
mode = MODES[magic_number]
|
||||
except KeyError:
|
||||
msg = "not a PPM file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
if magic_number in (b"P1", b"P4"):
|
||||
self.custom_mimetype = "image/x-portable-bitmap"
|
||||
elif magic_number in (b"P2", b"P5"):
|
||||
self.custom_mimetype = "image/x-portable-graymap"
|
||||
elif magic_number in (b"P3", b"P6"):
|
||||
self.custom_mimetype = "image/x-portable-pixmap"
|
||||
|
||||
maxval = None
|
||||
decoder_name = "raw"
|
||||
if magic_number in (b"P1", b"P2", b"P3"):
|
||||
decoder_name = "ppm_plain"
|
||||
for ix in range(3):
|
||||
token = int(self._read_token())
|
||||
if ix == 0: # token is the x size
|
||||
xsize = token
|
||||
elif ix == 1: # token is the y size
|
||||
ysize = token
|
||||
if mode == "1":
|
||||
self._mode = "1"
|
||||
rawmode = "1;I"
|
||||
break
|
||||
else:
|
||||
self._mode = rawmode = mode
|
||||
elif ix == 2: # token is maxval
|
||||
maxval = token
|
||||
if not 0 < maxval < 65536:
|
||||
msg = "maxval must be greater than 0 and less than 65536"
|
||||
raise ValueError(msg)
|
||||
if maxval > 255 and mode == "L":
|
||||
self._mode = "I"
|
||||
|
||||
if decoder_name != "ppm_plain":
|
||||
# If maxval matches a bit depth, use the raw decoder directly
|
||||
if maxval == 65535 and mode == "L":
|
||||
rawmode = "I;16B"
|
||||
elif maxval != 255:
|
||||
decoder_name = "ppm"
|
||||
|
||||
args = (rawmode, 0, 1) if decoder_name == "raw" else (rawmode, maxval)
|
||||
self._size = xsize, ysize
|
||||
self.tile = [(decoder_name, (0, 0, xsize, ysize), self.fp.tell(), args)]
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
class PpmPlainDecoder(ImageFile.PyDecoder):
|
||||
_pulls_fd = True
|
||||
|
||||
def _read_block(self):
|
||||
return self.fd.read(ImageFile.SAFEBLOCK)
|
||||
|
||||
def _find_comment_end(self, block, start=0):
|
||||
a = block.find(b"\n", start)
|
||||
b = block.find(b"\r", start)
|
||||
return min(a, b) if a * b > 0 else max(a, b) # lowest nonnegative index (or -1)
|
||||
|
||||
def _ignore_comments(self, block):
|
||||
if self._comment_spans:
|
||||
# Finish current comment
|
||||
while block:
|
||||
comment_end = self._find_comment_end(block)
|
||||
if comment_end != -1:
|
||||
# Comment ends in this block
|
||||
# Delete tail of comment
|
||||
block = block[comment_end + 1 :]
|
||||
break
|
||||
else:
|
||||
# Comment spans whole block
|
||||
# So read the next block, looking for the end
|
||||
block = self._read_block()
|
||||
|
||||
# Search for any further comments
|
||||
self._comment_spans = False
|
||||
while True:
|
||||
comment_start = block.find(b"#")
|
||||
if comment_start == -1:
|
||||
# No comment found
|
||||
break
|
||||
comment_end = self._find_comment_end(block, comment_start)
|
||||
if comment_end != -1:
|
||||
# Comment ends in this block
|
||||
# Delete comment
|
||||
block = block[:comment_start] + block[comment_end + 1 :]
|
||||
else:
|
||||
# Comment continues to next block(s)
|
||||
block = block[:comment_start]
|
||||
self._comment_spans = True
|
||||
break
|
||||
return block
|
||||
|
||||
def _decode_bitonal(self):
|
||||
"""
|
||||
This is a separate method because in the plain PBM format, all data tokens are
|
||||
exactly one byte, so the inter-token whitespace is optional.
|
||||
"""
|
||||
data = bytearray()
|
||||
total_bytes = self.state.xsize * self.state.ysize
|
||||
|
||||
while len(data) != total_bytes:
|
||||
block = self._read_block() # read next block
|
||||
if not block:
|
||||
# eof
|
||||
break
|
||||
|
||||
block = self._ignore_comments(block)
|
||||
|
||||
tokens = b"".join(block.split())
|
||||
for token in tokens:
|
||||
if token not in (48, 49):
|
||||
msg = b"Invalid token for this mode: %s" % bytes([token])
|
||||
raise ValueError(msg)
|
||||
data = (data + tokens)[:total_bytes]
|
||||
invert = bytes.maketrans(b"01", b"\xFF\x00")
|
||||
return data.translate(invert)
|
||||
|
||||
def _decode_blocks(self, maxval):
|
||||
data = bytearray()
|
||||
max_len = 10
|
||||
out_byte_count = 4 if self.mode == "I" else 1
|
||||
out_max = 65535 if self.mode == "I" else 255
|
||||
bands = Image.getmodebands(self.mode)
|
||||
total_bytes = self.state.xsize * self.state.ysize * bands * out_byte_count
|
||||
|
||||
half_token = False
|
||||
while len(data) != total_bytes:
|
||||
block = self._read_block() # read next block
|
||||
if not block:
|
||||
if half_token:
|
||||
block = bytearray(b" ") # flush half_token
|
||||
else:
|
||||
# eof
|
||||
break
|
||||
|
||||
block = self._ignore_comments(block)
|
||||
|
||||
if half_token:
|
||||
block = half_token + block # stitch half_token to new block
|
||||
half_token = False
|
||||
|
||||
tokens = block.split()
|
||||
|
||||
if block and not block[-1:].isspace(): # block might split token
|
||||
half_token = tokens.pop() # save half token for later
|
||||
if len(half_token) > max_len: # prevent buildup of half_token
|
||||
msg = (
|
||||
b"Token too long found in data: %s" % half_token[: max_len + 1]
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
for token in tokens:
|
||||
if len(token) > max_len:
|
||||
msg = b"Token too long found in data: %s" % token[: max_len + 1]
|
||||
raise ValueError(msg)
|
||||
value = int(token)
|
||||
if value > maxval:
|
||||
msg = f"Channel value too large for this mode: {value}"
|
||||
raise ValueError(msg)
|
||||
value = round(value / maxval * out_max)
|
||||
data += o32(value) if self.mode == "I" else o8(value)
|
||||
if len(data) == total_bytes: # finished!
|
||||
break
|
||||
return data
|
||||
|
||||
def decode(self, buffer):
|
||||
self._comment_spans = False
|
||||
if self.mode == "1":
|
||||
data = self._decode_bitonal()
|
||||
rawmode = "1;8"
|
||||
else:
|
||||
maxval = self.args[-1]
|
||||
data = self._decode_blocks(maxval)
|
||||
rawmode = "I;32" if self.mode == "I" else self.mode
|
||||
self.set_as_raw(bytes(data), rawmode)
|
||||
return -1, 0
|
||||
|
||||
|
||||
class PpmDecoder(ImageFile.PyDecoder):
|
||||
_pulls_fd = True
|
||||
|
||||
def decode(self, buffer):
|
||||
data = bytearray()
|
||||
maxval = self.args[-1]
|
||||
in_byte_count = 1 if maxval < 256 else 2
|
||||
out_byte_count = 4 if self.mode == "I" else 1
|
||||
out_max = 65535 if self.mode == "I" else 255
|
||||
bands = Image.getmodebands(self.mode)
|
||||
while len(data) < self.state.xsize * self.state.ysize * bands * out_byte_count:
|
||||
pixels = self.fd.read(in_byte_count * bands)
|
||||
if len(pixels) < in_byte_count * bands:
|
||||
# eof
|
||||
break
|
||||
for b in range(bands):
|
||||
value = (
|
||||
pixels[b] if in_byte_count == 1 else i16(pixels, b * in_byte_count)
|
||||
)
|
||||
value = min(out_max, round(value / maxval * out_max))
|
||||
data += o32(value) if self.mode == "I" else o8(value)
|
||||
rawmode = "I;32" if self.mode == "I" else self.mode
|
||||
self.set_as_raw(bytes(data), rawmode)
|
||||
return -1, 0
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if im.mode == "1":
|
||||
rawmode, head = "1;I", b"P4"
|
||||
elif im.mode == "L":
|
||||
rawmode, head = "L", b"P5"
|
||||
elif im.mode == "I":
|
||||
rawmode, head = "I;16B", b"P5"
|
||||
elif im.mode in ("RGB", "RGBA"):
|
||||
rawmode, head = "RGB", b"P6"
|
||||
else:
|
||||
msg = f"cannot write mode {im.mode} as PPM"
|
||||
raise OSError(msg)
|
||||
fp.write(head + b"\n%d %d\n" % im.size)
|
||||
if head == b"P6":
|
||||
fp.write(b"255\n")
|
||||
elif head == b"P5":
|
||||
if rawmode == "L":
|
||||
fp.write(b"255\n")
|
||||
else:
|
||||
fp.write(b"65535\n")
|
||||
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))])
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
Image.register_open(PpmImageFile.format, PpmImageFile, _accept)
|
||||
Image.register_save(PpmImageFile.format, _save)
|
||||
|
||||
Image.register_decoder("ppm", PpmDecoder)
|
||||
Image.register_decoder("ppm_plain", PpmPlainDecoder)
|
||||
|
||||
Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm"])
|
||||
|
||||
Image.register_mime(PpmImageFile.format, "image/x-portable-anymap")
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue