Raid de discos
This commit is contained in:
parent
02bbe6d260
commit
f03c9e50f5
|
|
@ -2,6 +2,15 @@
|
||||||
|
|
||||||
## Administración de bases de datos.
|
## Administración de bases de datos.
|
||||||
## Sistemas de almacenamiento y su virtualización.
|
## Sistemas de almacenamiento y su virtualización.
|
||||||
|
### RAID
|
||||||
|
| Nivel | Descripción | Ventajas | Desventajas | Requisitos minimos |
|
||||||
|
| --- | --- | --- | --- | --- |
|
||||||
|
| RAID 0 | Striping | Velocidad maxima | No hay redundancia, si un disco falla se pierden todos los datos | 2 discos |
|
||||||
|
| RAID 1 | Mirroring | Redundancia total; Seguridad alta | Capacidad, = 1 disco; coste alto | 2 discos |
|
||||||
|
| RAID 5 | Striping con paridad | Seguridad + velocidad equilibrada | Un disco puede fallar; Reconstruccion lenta | 3 discos |
|
||||||
|
| RAID 6 | Igual que RAID 5, pero con doble paridad | Puede soportal fallo de dos discos | Mas lento al escribir y mas caro | 4 discos minimo |
|
||||||
|
| RAID 10 | RAID 1 + RAID 0 | Alta velocidad + redundancia | Necesita muchos discos = coste alto | 4 discos |
|
||||||
|
|
||||||
## Políticas, sistemas y procedimientos de backup y su recuperación.
|
## Políticas, sistemas y procedimientos de backup y su recuperación.
|
||||||
## Backup de sistemas físicos y virtuales.
|
## Backup de sistemas físicos y virtuales.
|
||||||
## Virtualización de sistemas y virtualización de puestos de usuario.
|
## Virtualización de sistemas y virtualización de puestos de usuario.
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import PyPDF2
|
||||||
|
from gtts import gTTS
|
||||||
|
|
||||||
|
def convertir_a_audio(pdf_input):
|
||||||
|
# Creamos el nombre del mp3 a partir del nombre del pdf
|
||||||
|
base_name = os.path.splitext(pdf_input)[0]
|
||||||
|
audio_output = f"{base_name}.mp3"
|
||||||
|
|
||||||
|
print(f"--- Procesando: {pdf_input} ---")
|
||||||
|
|
||||||
|
try:
|
||||||
|
texto_completo = ""
|
||||||
|
with open(pdf_input, 'rb') as f:
|
||||||
|
lector = PyPDF2.PdfReader(f)
|
||||||
|
# Recorremos cada página para sacar el texto
|
||||||
|
for pagina in lector.pages:
|
||||||
|
txt = pagina.extract_text()
|
||||||
|
if txt:
|
||||||
|
# Limpiamos saltos de línea para que gTTS no se líe
|
||||||
|
texto_completo += txt.replace('\n', ' ') + " "
|
||||||
|
|
||||||
|
# Comprobamos que haya algo que leer
|
||||||
|
longitud = len(texto_completo.strip())
|
||||||
|
if longitud < 10:
|
||||||
|
print("Error: No se ha podido extraer texto suficiente del PDF.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Texto extraído correctamente ({longitud} caracteres).")
|
||||||
|
print("Conectando con Google para generar el audio (paciencia)...")
|
||||||
|
|
||||||
|
# Generamos el audio
|
||||||
|
tts = gTTS(text=texto_completo, lang='es')
|
||||||
|
tts.save(audio_output)
|
||||||
|
print(f"¡Éxito! Ya tienes tu audio en: {audio_output}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Vaya, algo ha fallado: {e}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
print("Uso: python3 pdf_a_audio.py <tu_archivo.pdf>")
|
||||||
|
else:
|
||||||
|
fichero = sys.argv[1]
|
||||||
|
if os.path.exists(fichero):
|
||||||
|
convertir_a_audio(fichero)
|
||||||
|
else:
|
||||||
|
print(f"Error: El archivo '{fichero}' no existe en esa ruta.")
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,247 @@
|
||||||
|
<#
|
||||||
|
.Synopsis
|
||||||
|
Activate a Python virtual environment for the current PowerShell session.
|
||||||
|
|
||||||
|
.Description
|
||||||
|
Pushes the python executable for a virtual environment to the front of the
|
||||||
|
$Env:PATH environment variable and sets the prompt to signify that you are
|
||||||
|
in a Python virtual environment. Makes use of the command line switches as
|
||||||
|
well as the `pyvenv.cfg` file values present in the virtual environment.
|
||||||
|
|
||||||
|
.Parameter VenvDir
|
||||||
|
Path to the directory that contains the virtual environment to activate. The
|
||||||
|
default value for this is the parent of the directory that the Activate.ps1
|
||||||
|
script is located within.
|
||||||
|
|
||||||
|
.Parameter Prompt
|
||||||
|
The prompt prefix to display when this virtual environment is activated. By
|
||||||
|
default, this prompt is the name of the virtual environment folder (VenvDir)
|
||||||
|
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
||||||
|
|
||||||
|
.Example
|
||||||
|
Activate.ps1
|
||||||
|
Activates the Python virtual environment that contains the Activate.ps1 script.
|
||||||
|
|
||||||
|
.Example
|
||||||
|
Activate.ps1 -Verbose
|
||||||
|
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||||
|
and shows extra information about the activation as it executes.
|
||||||
|
|
||||||
|
.Example
|
||||||
|
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
||||||
|
Activates the Python virtual environment located in the specified location.
|
||||||
|
|
||||||
|
.Example
|
||||||
|
Activate.ps1 -Prompt "MyPython"
|
||||||
|
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||||
|
and prefixes the current prompt with the specified string (surrounded in
|
||||||
|
parentheses) while the virtual environment is active.
|
||||||
|
|
||||||
|
.Notes
|
||||||
|
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
||||||
|
execution policy for the user. You can do this by issuing the following PowerShell
|
||||||
|
command:
|
||||||
|
|
||||||
|
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||||
|
|
||||||
|
For more information on Execution Policies:
|
||||||
|
https://go.microsoft.com/fwlink/?LinkID=135170
|
||||||
|
|
||||||
|
#>
|
||||||
|
Param(
|
||||||
|
[Parameter(Mandatory = $false)]
|
||||||
|
[String]
|
||||||
|
$VenvDir,
|
||||||
|
[Parameter(Mandatory = $false)]
|
||||||
|
[String]
|
||||||
|
$Prompt
|
||||||
|
)
|
||||||
|
|
||||||
|
<# Function declarations --------------------------------------------------- #>
|
||||||
|
|
||||||
|
<#
|
||||||
|
.Synopsis
|
||||||
|
Remove all shell session elements added by the Activate script, including the
|
||||||
|
addition of the virtual environment's Python executable from the beginning of
|
||||||
|
the PATH variable.
|
||||||
|
|
||||||
|
.Parameter NonDestructive
|
||||||
|
If present, do not remove this function from the global namespace for the
|
||||||
|
session.
|
||||||
|
|
||||||
|
#>
|
||||||
|
function global:deactivate ([switch]$NonDestructive) {
|
||||||
|
# Revert to original values
|
||||||
|
|
||||||
|
# The prior prompt:
|
||||||
|
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
||||||
|
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
||||||
|
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
||||||
|
}
|
||||||
|
|
||||||
|
# The prior PYTHONHOME:
|
||||||
|
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
||||||
|
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
||||||
|
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
||||||
|
}
|
||||||
|
|
||||||
|
# The prior PATH:
|
||||||
|
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
||||||
|
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
||||||
|
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
||||||
|
}
|
||||||
|
|
||||||
|
# Just remove the VIRTUAL_ENV altogether:
|
||||||
|
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
||||||
|
Remove-Item -Path env:VIRTUAL_ENV
|
||||||
|
}
|
||||||
|
|
||||||
|
# Just remove VIRTUAL_ENV_PROMPT altogether.
|
||||||
|
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
|
||||||
|
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
|
||||||
|
}
|
||||||
|
|
||||||
|
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
||||||
|
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
||||||
|
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
||||||
|
}
|
||||||
|
|
||||||
|
# Leave deactivate function in the global namespace if requested:
|
||||||
|
if (-not $NonDestructive) {
|
||||||
|
Remove-Item -Path function:deactivate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
<#
|
||||||
|
.Description
|
||||||
|
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
||||||
|
given folder, and returns them in a map.
|
||||||
|
|
||||||
|
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
||||||
|
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
||||||
|
then it is considered a `key = value` line. The left hand string is the key,
|
||||||
|
the right hand is the value.
|
||||||
|
|
||||||
|
If the value starts with a `'` or a `"` then the first and last character is
|
||||||
|
stripped from the value before being captured.
|
||||||
|
|
||||||
|
.Parameter ConfigDir
|
||||||
|
Path to the directory that contains the `pyvenv.cfg` file.
|
||||||
|
#>
|
||||||
|
function Get-PyVenvConfig(
|
||||||
|
[String]
|
||||||
|
$ConfigDir
|
||||||
|
) {
|
||||||
|
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
||||||
|
|
||||||
|
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
||||||
|
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
||||||
|
|
||||||
|
# An empty map will be returned if no config file is found.
|
||||||
|
$pyvenvConfig = @{ }
|
||||||
|
|
||||||
|
if ($pyvenvConfigPath) {
|
||||||
|
|
||||||
|
Write-Verbose "File exists, parse `key = value` lines"
|
||||||
|
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
||||||
|
|
||||||
|
$pyvenvConfigContent | ForEach-Object {
|
||||||
|
$keyval = $PSItem -split "\s*=\s*", 2
|
||||||
|
if ($keyval[0] -and $keyval[1]) {
|
||||||
|
$val = $keyval[1]
|
||||||
|
|
||||||
|
# Remove extraneous quotations around a string value.
|
||||||
|
if ("'""".Contains($val.Substring(0, 1))) {
|
||||||
|
$val = $val.Substring(1, $val.Length - 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
$pyvenvConfig[$keyval[0]] = $val
|
||||||
|
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return $pyvenvConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
<# Begin Activate script --------------------------------------------------- #>
|
||||||
|
|
||||||
|
# Determine the containing directory of this script
|
||||||
|
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
||||||
|
$VenvExecDir = Get-Item -Path $VenvExecPath
|
||||||
|
|
||||||
|
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
||||||
|
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
||||||
|
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
||||||
|
|
||||||
|
# Set values required in priority: CmdLine, ConfigFile, Default
|
||||||
|
# First, get the location of the virtual environment, it might not be
|
||||||
|
# VenvExecDir if specified on the command line.
|
||||||
|
if ($VenvDir) {
|
||||||
|
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
||||||
|
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
||||||
|
Write-Verbose "VenvDir=$VenvDir"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Next, read the `pyvenv.cfg` file to determine any required value such
|
||||||
|
# as `prompt`.
|
||||||
|
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
||||||
|
|
||||||
|
# Next, set the prompt from the command line, or the config file, or
|
||||||
|
# just use the name of the virtual environment folder.
|
||||||
|
if ($Prompt) {
|
||||||
|
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
||||||
|
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
||||||
|
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
||||||
|
$Prompt = $pyvenvCfg['prompt'];
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
|
||||||
|
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
||||||
|
$Prompt = Split-Path -Path $venvDir -Leaf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Write-Verbose "Prompt = '$Prompt'"
|
||||||
|
Write-Verbose "VenvDir='$VenvDir'"
|
||||||
|
|
||||||
|
# Deactivate any currently active virtual environment, but leave the
|
||||||
|
# deactivate function in place.
|
||||||
|
deactivate -nondestructive
|
||||||
|
|
||||||
|
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
||||||
|
# that there is an activated venv.
|
||||||
|
$env:VIRTUAL_ENV = $VenvDir
|
||||||
|
|
||||||
|
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
||||||
|
|
||||||
|
Write-Verbose "Setting prompt to '$Prompt'"
|
||||||
|
|
||||||
|
# Set the prompt to include the env name
|
||||||
|
# Make sure _OLD_VIRTUAL_PROMPT is global
|
||||||
|
function global:_OLD_VIRTUAL_PROMPT { "" }
|
||||||
|
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
||||||
|
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
||||||
|
|
||||||
|
function global:prompt {
|
||||||
|
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
||||||
|
_OLD_VIRTUAL_PROMPT
|
||||||
|
}
|
||||||
|
$env:VIRTUAL_ENV_PROMPT = $Prompt
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clear PYTHONHOME
|
||||||
|
if (Test-Path -Path Env:PYTHONHOME) {
|
||||||
|
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
||||||
|
Remove-Item -Path Env:PYTHONHOME
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add the venv to the PATH
|
||||||
|
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
||||||
|
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
||||||
|
|
@ -0,0 +1,70 @@
|
||||||
|
# This file must be used with "source bin/activate" *from bash*
|
||||||
|
# You cannot run it directly
|
||||||
|
|
||||||
|
deactivate () {
|
||||||
|
# reset old environment variables
|
||||||
|
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
||||||
|
PATH="${_OLD_VIRTUAL_PATH:-}"
|
||||||
|
export PATH
|
||||||
|
unset _OLD_VIRTUAL_PATH
|
||||||
|
fi
|
||||||
|
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
||||||
|
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
||||||
|
export PYTHONHOME
|
||||||
|
unset _OLD_VIRTUAL_PYTHONHOME
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Call hash to forget past commands. Without forgetting
|
||||||
|
# past commands the $PATH changes we made may not be respected
|
||||||
|
hash -r 2> /dev/null
|
||||||
|
|
||||||
|
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
||||||
|
PS1="${_OLD_VIRTUAL_PS1:-}"
|
||||||
|
export PS1
|
||||||
|
unset _OLD_VIRTUAL_PS1
|
||||||
|
fi
|
||||||
|
|
||||||
|
unset VIRTUAL_ENV
|
||||||
|
unset VIRTUAL_ENV_PROMPT
|
||||||
|
if [ ! "${1:-}" = "nondestructive" ] ; then
|
||||||
|
# Self destruct!
|
||||||
|
unset -f deactivate
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# unset irrelevant variables
|
||||||
|
deactivate nondestructive
|
||||||
|
|
||||||
|
# on Windows, a path can contain colons and backslashes and has to be converted:
|
||||||
|
if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then
|
||||||
|
# transform D:\path\to\venv to /d/path/to/venv on MSYS
|
||||||
|
# and to /cygdrive/d/path/to/venv on Cygwin
|
||||||
|
export VIRTUAL_ENV=$(cygpath /home/tatvil/trabajo/Oposiciones/TAI/2025/venv)
|
||||||
|
else
|
||||||
|
# use the path as-is
|
||||||
|
export VIRTUAL_ENV=/home/tatvil/trabajo/Oposiciones/TAI/2025/venv
|
||||||
|
fi
|
||||||
|
|
||||||
|
_OLD_VIRTUAL_PATH="$PATH"
|
||||||
|
PATH="$VIRTUAL_ENV/"bin":$PATH"
|
||||||
|
export PATH
|
||||||
|
|
||||||
|
# unset PYTHONHOME if set
|
||||||
|
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
||||||
|
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
||||||
|
if [ -n "${PYTHONHOME:-}" ] ; then
|
||||||
|
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
||||||
|
unset PYTHONHOME
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
||||||
|
_OLD_VIRTUAL_PS1="${PS1:-}"
|
||||||
|
PS1='(venv) '"${PS1:-}"
|
||||||
|
export PS1
|
||||||
|
VIRTUAL_ENV_PROMPT='(venv) '
|
||||||
|
export VIRTUAL_ENV_PROMPT
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Call hash to forget past commands. Without forgetting
|
||||||
|
# past commands the $PATH changes we made may not be respected
|
||||||
|
hash -r 2> /dev/null
|
||||||
|
|
@ -0,0 +1,27 @@
|
||||||
|
# This file must be used with "source bin/activate.csh" *from csh*.
|
||||||
|
# You cannot run it directly.
|
||||||
|
|
||||||
|
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
||||||
|
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
|
||||||
|
|
||||||
|
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
||||||
|
|
||||||
|
# Unset irrelevant variables.
|
||||||
|
deactivate nondestructive
|
||||||
|
|
||||||
|
setenv VIRTUAL_ENV /home/tatvil/trabajo/Oposiciones/TAI/2025/venv
|
||||||
|
|
||||||
|
set _OLD_VIRTUAL_PATH="$PATH"
|
||||||
|
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
|
||||||
|
|
||||||
|
|
||||||
|
set _OLD_VIRTUAL_PROMPT="$prompt"
|
||||||
|
|
||||||
|
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
||||||
|
set prompt = '(venv) '"$prompt"
|
||||||
|
setenv VIRTUAL_ENV_PROMPT '(venv) '
|
||||||
|
endif
|
||||||
|
|
||||||
|
alias pydoc python -m pydoc
|
||||||
|
|
||||||
|
rehash
|
||||||
|
|
@ -0,0 +1,69 @@
|
||||||
|
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
|
||||||
|
# (https://fishshell.com/). You cannot run it directly.
|
||||||
|
|
||||||
|
function deactivate -d "Exit virtual environment and return to normal shell environment"
|
||||||
|
# reset old environment variables
|
||||||
|
if test -n "$_OLD_VIRTUAL_PATH"
|
||||||
|
set -gx PATH $_OLD_VIRTUAL_PATH
|
||||||
|
set -e _OLD_VIRTUAL_PATH
|
||||||
|
end
|
||||||
|
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
||||||
|
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
||||||
|
set -e _OLD_VIRTUAL_PYTHONHOME
|
||||||
|
end
|
||||||
|
|
||||||
|
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
||||||
|
set -e _OLD_FISH_PROMPT_OVERRIDE
|
||||||
|
# prevents error when using nested fish instances (Issue #93858)
|
||||||
|
if functions -q _old_fish_prompt
|
||||||
|
functions -e fish_prompt
|
||||||
|
functions -c _old_fish_prompt fish_prompt
|
||||||
|
functions -e _old_fish_prompt
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
set -e VIRTUAL_ENV
|
||||||
|
set -e VIRTUAL_ENV_PROMPT
|
||||||
|
if test "$argv[1]" != "nondestructive"
|
||||||
|
# Self-destruct!
|
||||||
|
functions -e deactivate
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Unset irrelevant variables.
|
||||||
|
deactivate nondestructive
|
||||||
|
|
||||||
|
set -gx VIRTUAL_ENV /home/tatvil/trabajo/Oposiciones/TAI/2025/venv
|
||||||
|
|
||||||
|
set -gx _OLD_VIRTUAL_PATH $PATH
|
||||||
|
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
|
||||||
|
|
||||||
|
# Unset PYTHONHOME if set.
|
||||||
|
if set -q PYTHONHOME
|
||||||
|
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
||||||
|
set -e PYTHONHOME
|
||||||
|
end
|
||||||
|
|
||||||
|
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
||||||
|
# fish uses a function instead of an env var to generate the prompt.
|
||||||
|
|
||||||
|
# Save the current fish_prompt function as the function _old_fish_prompt.
|
||||||
|
functions -c fish_prompt _old_fish_prompt
|
||||||
|
|
||||||
|
# With the original prompt function renamed, we can override with our own.
|
||||||
|
function fish_prompt
|
||||||
|
# Save the return status of the last command.
|
||||||
|
set -l old_status $status
|
||||||
|
|
||||||
|
# Output the venv prompt; color taken from the blue of the Python logo.
|
||||||
|
printf "%s%s%s" (set_color 4B8BBE) '(venv) ' (set_color normal)
|
||||||
|
|
||||||
|
# Restore the return status of the previous command.
|
||||||
|
echo "exit $old_status" | .
|
||||||
|
# Output the original/"old" prompt.
|
||||||
|
_old_fish_prompt
|
||||||
|
end
|
||||||
|
|
||||||
|
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
||||||
|
set -gx VIRTUAL_ENV_PROMPT '(venv) '
|
||||||
|
end
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
#!/home/tatvil/trabajo/Oposiciones/TAI/2025/venv/bin/python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from gtts.cli import tts_cli
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||||
|
sys.exit(tts_cli())
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
#!/home/tatvil/trabajo/Oposiciones/TAI/2025/venv/bin/python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from charset_normalizer.cli import cli_detect
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||||
|
sys.exit(cli_detect())
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
#!/home/tatvil/trabajo/Oposiciones/TAI/2025/venv/bin/python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from pip._internal.cli.main import main
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||||
|
sys.exit(main())
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
#!/home/tatvil/trabajo/Oposiciones/TAI/2025/venv/bin/python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from pip._internal.cli.main import main
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||||
|
sys.exit(main())
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
#!/home/tatvil/trabajo/Oposiciones/TAI/2025/venv/bin/python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from pip._internal.cli.main import main
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||||
|
sys.exit(main())
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
python3
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
/usr/bin/python3
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
python3
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
"""
|
||||||
|
PyPDF2 is a free and open-source pure-python PDF library capable of splitting,
|
||||||
|
merging, cropping, and transforming the pages of PDF files. It can also add
|
||||||
|
custom data, viewing options, and passwords to PDF files. PyPDF2 can retrieve
|
||||||
|
text and metadata from PDFs as well.
|
||||||
|
|
||||||
|
You can read the full docs at https://pypdf2.readthedocs.io/.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from ._encryption import PasswordType
|
||||||
|
from ._merger import PdfFileMerger, PdfMerger
|
||||||
|
from ._page import PageObject, Transformation
|
||||||
|
from ._reader import DocumentInformation, PdfFileReader, PdfReader
|
||||||
|
from ._version import __version__
|
||||||
|
from ._writer import PdfFileWriter, PdfWriter
|
||||||
|
from .pagerange import PageRange, parse_filename_page_ranges
|
||||||
|
from .papersizes import PaperSize
|
||||||
|
|
||||||
|
warnings.warn(
|
||||||
|
message="PyPDF2 is deprecated. Please move to the pypdf library instead.",
|
||||||
|
category=DeprecationWarning,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"__version__",
|
||||||
|
"PageRange",
|
||||||
|
"PaperSize",
|
||||||
|
"DocumentInformation",
|
||||||
|
"parse_filename_page_ranges",
|
||||||
|
"PdfFileMerger", # will be removed in PyPDF2 3.0.0; use PdfMerger instead
|
||||||
|
"PdfFileReader", # will be removed in PyPDF2 3.0.0; use PdfReader instead
|
||||||
|
"PdfFileWriter", # will be removed in PyPDF2 3.0.0; use PdfWriter instead
|
||||||
|
"PdfMerger",
|
||||||
|
"PdfReader",
|
||||||
|
"PdfWriter",
|
||||||
|
"Transformation",
|
||||||
|
"PageObject",
|
||||||
|
"PasswordType",
|
||||||
|
]
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,413 @@
|
||||||
|
import warnings
|
||||||
|
from binascii import unhexlify
|
||||||
|
from math import ceil
|
||||||
|
from typing import Any, Dict, List, Tuple, Union, cast
|
||||||
|
|
||||||
|
from ._codecs import adobe_glyphs, charset_encoding
|
||||||
|
from ._utils import logger_warning
|
||||||
|
from .errors import PdfReadWarning
|
||||||
|
from .generic import DecodedStreamObject, DictionaryObject, StreamObject
|
||||||
|
|
||||||
|
|
||||||
|
# code freely inspired from @twiggy ; see #711
|
||||||
|
def build_char_map(
|
||||||
|
font_name: str, space_width: float, obj: DictionaryObject
|
||||||
|
) -> Tuple[
|
||||||
|
str, float, Union[str, Dict[int, str]], Dict, DictionaryObject
|
||||||
|
]: # font_type,space_width /2, encoding, cmap
|
||||||
|
"""Determine information about a font.
|
||||||
|
|
||||||
|
This function returns a tuple consisting of:
|
||||||
|
font sub-type, space_width/2, encoding, map character-map, font-dictionary.
|
||||||
|
The font-dictionary itself is suitable for the curious."""
|
||||||
|
ft: DictionaryObject = obj["/Resources"]["/Font"][font_name] # type: ignore
|
||||||
|
font_type: str = cast(str, ft["/Subtype"])
|
||||||
|
|
||||||
|
space_code = 32
|
||||||
|
encoding, space_code = parse_encoding(ft, space_code)
|
||||||
|
map_dict, space_code, int_entry = parse_to_unicode(ft, space_code)
|
||||||
|
|
||||||
|
# encoding can be either a string for decode (on 1,2 or a variable number of bytes) of a char table (for 1 byte only for me)
|
||||||
|
# if empty string, it means it is than encoding field is not present and we have to select the good encoding from cmap input data
|
||||||
|
if encoding == "":
|
||||||
|
if -1 not in map_dict or map_dict[-1] == 1:
|
||||||
|
# I have not been able to find any rule for no /Encoding nor /ToUnicode
|
||||||
|
# One example shows /Symbol,bold I consider 8 bits encoding default
|
||||||
|
encoding = "charmap"
|
||||||
|
else:
|
||||||
|
encoding = "utf-16-be"
|
||||||
|
# apply rule from PDF ref 1.7 §5.9.1, 1st bullet : if cmap not empty encoding should be discarded (here transformed into identity for those characters)
|
||||||
|
# if encoding is an str it is expected to be a identity translation
|
||||||
|
elif isinstance(encoding, dict):
|
||||||
|
for x in int_entry:
|
||||||
|
if x <= 255:
|
||||||
|
encoding[x] = chr(x)
|
||||||
|
try:
|
||||||
|
# override space_width with new params
|
||||||
|
space_width = _default_fonts_space_width[cast(str, ft["/BaseFont"])]
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
# I conside the space_code is available on one byte
|
||||||
|
if isinstance(space_code, str):
|
||||||
|
try: # one byte
|
||||||
|
sp = space_code.encode("charmap")[0]
|
||||||
|
except Exception:
|
||||||
|
sp = space_code.encode("utf-16-be")
|
||||||
|
sp = sp[0] + 256 * sp[1]
|
||||||
|
else:
|
||||||
|
sp = space_code
|
||||||
|
sp_width = compute_space_width(ft, sp, space_width)
|
||||||
|
|
||||||
|
return (
|
||||||
|
font_type,
|
||||||
|
float(sp_width / 2),
|
||||||
|
encoding,
|
||||||
|
# https://github.com/python/mypy/issues/4374
|
||||||
|
map_dict,
|
||||||
|
ft,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# used when missing data, e.g. font def missing
|
||||||
|
unknown_char_map: Tuple[str, float, Union[str, Dict[int, str]], Dict[Any, Any]] = (
|
||||||
|
"Unknown",
|
||||||
|
9999,
|
||||||
|
dict(zip(range(256), ["<EFBFBD>"] * 256)),
|
||||||
|
{},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_predefined_cmap: Dict[str, str] = {
|
||||||
|
"/Identity-H": "utf-16-be",
|
||||||
|
"/Identity-V": "utf-16-be",
|
||||||
|
"/GB-EUC-H": "gbk", # TBC
|
||||||
|
"/GB-EUC-V": "gbk", # TBC
|
||||||
|
"/GBpc-EUC-H": "gb2312", # TBC
|
||||||
|
"/GBpc-EUC-V": "gb2312", # TBC
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# manually extracted from http://mirrors.ctan.org/fonts/adobe/afm/Adobe-Core35_AFMs-229.tar.gz
|
||||||
|
_default_fonts_space_width: Dict[str, int] = {
|
||||||
|
"/Courrier": 600,
|
||||||
|
"/Courier-Bold": 600,
|
||||||
|
"/Courier-BoldOblique": 600,
|
||||||
|
"/Courier-Oblique": 600,
|
||||||
|
"/Helvetica": 278,
|
||||||
|
"/Helvetica-Bold": 278,
|
||||||
|
"/Helvetica-BoldOblique": 278,
|
||||||
|
"/Helvetica-Oblique": 278,
|
||||||
|
"/Helvetica-Narrow": 228,
|
||||||
|
"/Helvetica-NarrowBold": 228,
|
||||||
|
"/Helvetica-NarrowBoldOblique": 228,
|
||||||
|
"/Helvetica-NarrowOblique": 228,
|
||||||
|
"/Times-Roman": 250,
|
||||||
|
"/Times-Bold": 250,
|
||||||
|
"/Times-BoldItalic": 250,
|
||||||
|
"/Times-Italic": 250,
|
||||||
|
"/Symbol": 250,
|
||||||
|
"/ZapfDingbats": 278,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def parse_encoding(
|
||||||
|
ft: DictionaryObject, space_code: int
|
||||||
|
) -> Tuple[Union[str, Dict[int, str]], int]:
|
||||||
|
encoding: Union[str, List[str], Dict[int, str]] = []
|
||||||
|
if "/Encoding" not in ft:
|
||||||
|
try:
|
||||||
|
if "/BaseFont" in ft and cast(str, ft["/BaseFont"]) in charset_encoding:
|
||||||
|
encoding = dict(
|
||||||
|
zip(range(256), charset_encoding[cast(str, ft["/BaseFont"])])
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
encoding = "charmap"
|
||||||
|
return encoding, _default_fonts_space_width[cast(str, ft["/BaseFont"])]
|
||||||
|
except Exception:
|
||||||
|
if cast(str, ft["/Subtype"]) == "/Type1":
|
||||||
|
return "charmap", space_code
|
||||||
|
else:
|
||||||
|
return "", space_code
|
||||||
|
enc: Union(str, DictionaryObject) = ft["/Encoding"].get_object() # type: ignore
|
||||||
|
if isinstance(enc, str):
|
||||||
|
try:
|
||||||
|
# allready done : enc = NameObject.unnumber(enc.encode()).decode() # for #xx decoding
|
||||||
|
if enc in charset_encoding:
|
||||||
|
encoding = charset_encoding[enc].copy()
|
||||||
|
elif enc in _predefined_cmap:
|
||||||
|
encoding = _predefined_cmap[enc]
|
||||||
|
else:
|
||||||
|
raise Exception("not found")
|
||||||
|
except Exception:
|
||||||
|
warnings.warn(
|
||||||
|
f"Advanced encoding {enc} not implemented yet",
|
||||||
|
PdfReadWarning,
|
||||||
|
)
|
||||||
|
encoding = enc
|
||||||
|
elif isinstance(enc, DictionaryObject) and "/BaseEncoding" in enc:
|
||||||
|
try:
|
||||||
|
encoding = charset_encoding[cast(str, enc["/BaseEncoding"])].copy()
|
||||||
|
except Exception:
|
||||||
|
warnings.warn(
|
||||||
|
f"Advanced encoding {encoding} not implemented yet",
|
||||||
|
PdfReadWarning,
|
||||||
|
)
|
||||||
|
encoding = charset_encoding["/StandardCoding"].copy()
|
||||||
|
else:
|
||||||
|
encoding = charset_encoding["/StandardCoding"].copy()
|
||||||
|
if "/Differences" in enc:
|
||||||
|
x: int = 0
|
||||||
|
o: Union[int, str]
|
||||||
|
for o in cast(DictionaryObject, cast(DictionaryObject, enc)["/Differences"]):
|
||||||
|
if isinstance(o, int):
|
||||||
|
x = o
|
||||||
|
else: # isinstance(o,str):
|
||||||
|
try:
|
||||||
|
encoding[x] = adobe_glyphs[o] # type: ignore
|
||||||
|
except Exception:
|
||||||
|
encoding[x] = o # type: ignore
|
||||||
|
if o == " ":
|
||||||
|
space_code = x
|
||||||
|
x += 1
|
||||||
|
if isinstance(encoding, list):
|
||||||
|
encoding = dict(zip(range(256), encoding))
|
||||||
|
return encoding, space_code
|
||||||
|
|
||||||
|
|
||||||
|
def parse_to_unicode(
|
||||||
|
ft: DictionaryObject, space_code: int
|
||||||
|
) -> Tuple[Dict[Any, Any], int, List[int]]:
|
||||||
|
# will store all translation code
|
||||||
|
# and map_dict[-1] we will have the number of bytes to convert
|
||||||
|
map_dict: Dict[Any, Any] = {}
|
||||||
|
|
||||||
|
# will provide the list of cmap keys as int to correct encoding
|
||||||
|
int_entry: List[int] = []
|
||||||
|
|
||||||
|
if "/ToUnicode" not in ft:
|
||||||
|
return {}, space_code, []
|
||||||
|
process_rg: bool = False
|
||||||
|
process_char: bool = False
|
||||||
|
multiline_rg: Union[
|
||||||
|
None, Tuple[int, int]
|
||||||
|
] = None # tuple = (current_char, remaining size) ; cf #1285 for example of file
|
||||||
|
cm = prepare_cm(ft)
|
||||||
|
for l in cm.split(b"\n"):
|
||||||
|
process_rg, process_char, multiline_rg = process_cm_line(
|
||||||
|
l.strip(b" "), process_rg, process_char, multiline_rg, map_dict, int_entry
|
||||||
|
)
|
||||||
|
|
||||||
|
for a, value in map_dict.items():
|
||||||
|
if value == " ":
|
||||||
|
space_code = a
|
||||||
|
return map_dict, space_code, int_entry
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_cm(ft: DictionaryObject) -> bytes:
|
||||||
|
tu = ft["/ToUnicode"]
|
||||||
|
cm: bytes
|
||||||
|
if isinstance(tu, StreamObject):
|
||||||
|
cm = cast(DecodedStreamObject, ft["/ToUnicode"]).get_data()
|
||||||
|
elif isinstance(tu, str) and tu.startswith("/Identity"):
|
||||||
|
cm = b"beginbfrange\n<0000> <0001> <0000>\nendbfrange" # the full range 0000-FFFF will be processed
|
||||||
|
if isinstance(cm, str):
|
||||||
|
cm = cm.encode()
|
||||||
|
# we need to prepare cm before due to missing return line in pdf printed to pdf from word
|
||||||
|
cm = (
|
||||||
|
cm.strip()
|
||||||
|
.replace(b"beginbfchar", b"\nbeginbfchar\n")
|
||||||
|
.replace(b"endbfchar", b"\nendbfchar\n")
|
||||||
|
.replace(b"beginbfrange", b"\nbeginbfrange\n")
|
||||||
|
.replace(b"endbfrange", b"\nendbfrange\n")
|
||||||
|
.replace(b"<<", b"\n{\n") # text between << and >> not used but
|
||||||
|
.replace(b">>", b"\n}\n") # some solution to find it back
|
||||||
|
)
|
||||||
|
ll = cm.split(b"<")
|
||||||
|
for i in range(len(ll)):
|
||||||
|
j = ll[i].find(b">")
|
||||||
|
if j >= 0:
|
||||||
|
if j == 0:
|
||||||
|
# string is empty: stash a placeholder here (see below)
|
||||||
|
# see https://github.com/py-pdf/PyPDF2/issues/1111
|
||||||
|
content = b"."
|
||||||
|
else:
|
||||||
|
content = ll[i][:j].replace(b" ", b"")
|
||||||
|
ll[i] = content + b" " + ll[i][j + 1 :]
|
||||||
|
cm = (
|
||||||
|
(b" ".join(ll))
|
||||||
|
.replace(b"[", b" [ ")
|
||||||
|
.replace(b"]", b" ]\n ")
|
||||||
|
.replace(b"\r", b"\n")
|
||||||
|
)
|
||||||
|
return cm
|
||||||
|
|
||||||
|
|
||||||
|
def process_cm_line(
|
||||||
|
l: bytes,
|
||||||
|
process_rg: bool,
|
||||||
|
process_char: bool,
|
||||||
|
multiline_rg: Union[None, Tuple[int, int]],
|
||||||
|
map_dict: Dict[Any, Any],
|
||||||
|
int_entry: List[int],
|
||||||
|
) -> Tuple[bool, bool, Union[None, Tuple[int, int]]]:
|
||||||
|
if l in (b"", b" ") or l[0] == 37: # 37 = %
|
||||||
|
return process_rg, process_char, multiline_rg
|
||||||
|
if b"beginbfrange" in l:
|
||||||
|
process_rg = True
|
||||||
|
elif b"endbfrange" in l:
|
||||||
|
process_rg = False
|
||||||
|
elif b"beginbfchar" in l:
|
||||||
|
process_char = True
|
||||||
|
elif b"endbfchar" in l:
|
||||||
|
process_char = False
|
||||||
|
elif process_rg:
|
||||||
|
multiline_rg = parse_bfrange(l, map_dict, int_entry, multiline_rg)
|
||||||
|
elif process_char:
|
||||||
|
parse_bfchar(l, map_dict, int_entry)
|
||||||
|
return process_rg, process_char, multiline_rg
|
||||||
|
|
||||||
|
|
||||||
|
def parse_bfrange(
|
||||||
|
l: bytes,
|
||||||
|
map_dict: Dict[Any, Any],
|
||||||
|
int_entry: List[int],
|
||||||
|
multiline_rg: Union[None, Tuple[int, int]],
|
||||||
|
) -> Union[None, Tuple[int, int]]:
|
||||||
|
lst = [x for x in l.split(b" ") if x]
|
||||||
|
closure_found = False
|
||||||
|
nbi = max(len(lst[0]), len(lst[1]))
|
||||||
|
map_dict[-1] = ceil(nbi / 2)
|
||||||
|
fmt = b"%%0%dX" % (map_dict[-1] * 2)
|
||||||
|
if multiline_rg is not None:
|
||||||
|
a = multiline_rg[0] # a, b not in the current line
|
||||||
|
b = multiline_rg[1]
|
||||||
|
for sq in lst[1:]:
|
||||||
|
if sq == b"]":
|
||||||
|
closure_found = True
|
||||||
|
break
|
||||||
|
map_dict[
|
||||||
|
unhexlify(fmt % a).decode(
|
||||||
|
"charmap" if map_dict[-1] == 1 else "utf-16-be",
|
||||||
|
"surrogatepass",
|
||||||
|
)
|
||||||
|
] = unhexlify(sq).decode("utf-16-be", "surrogatepass")
|
||||||
|
int_entry.append(a)
|
||||||
|
a += 1
|
||||||
|
else:
|
||||||
|
a = int(lst[0], 16)
|
||||||
|
b = int(lst[1], 16)
|
||||||
|
if lst[2] == b"[":
|
||||||
|
for sq in lst[3:]:
|
||||||
|
if sq == b"]":
|
||||||
|
closure_found = True
|
||||||
|
break
|
||||||
|
map_dict[
|
||||||
|
unhexlify(fmt % a).decode(
|
||||||
|
"charmap" if map_dict[-1] == 1 else "utf-16-be",
|
||||||
|
"surrogatepass",
|
||||||
|
)
|
||||||
|
] = unhexlify(sq).decode("utf-16-be", "surrogatepass")
|
||||||
|
int_entry.append(a)
|
||||||
|
a += 1
|
||||||
|
else: # case without list
|
||||||
|
c = int(lst[2], 16)
|
||||||
|
fmt2 = b"%%0%dX" % max(4, len(lst[2]))
|
||||||
|
closure_found = True
|
||||||
|
while a <= b:
|
||||||
|
map_dict[
|
||||||
|
unhexlify(fmt % a).decode(
|
||||||
|
"charmap" if map_dict[-1] == 1 else "utf-16-be",
|
||||||
|
"surrogatepass",
|
||||||
|
)
|
||||||
|
] = unhexlify(fmt2 % c).decode("utf-16-be", "surrogatepass")
|
||||||
|
int_entry.append(a)
|
||||||
|
a += 1
|
||||||
|
c += 1
|
||||||
|
return None if closure_found else (a, b)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_bfchar(l: bytes, map_dict: Dict[Any, Any], int_entry: List[int]) -> None:
|
||||||
|
lst = [x for x in l.split(b" ") if x]
|
||||||
|
map_dict[-1] = len(lst[0]) // 2
|
||||||
|
while len(lst) > 1:
|
||||||
|
map_to = ""
|
||||||
|
# placeholder (see above) means empty string
|
||||||
|
if lst[1] != b".":
|
||||||
|
map_to = unhexlify(lst[1]).decode(
|
||||||
|
"charmap" if len(lst[1]) < 4 else "utf-16-be", "surrogatepass"
|
||||||
|
) # join is here as some cases where the code was split
|
||||||
|
map_dict[
|
||||||
|
unhexlify(lst[0]).decode(
|
||||||
|
"charmap" if map_dict[-1] == 1 else "utf-16-be", "surrogatepass"
|
||||||
|
)
|
||||||
|
] = map_to
|
||||||
|
int_entry.append(int(lst[0], 16))
|
||||||
|
lst = lst[2:]
|
||||||
|
|
||||||
|
|
||||||
|
def compute_space_width(
|
||||||
|
ft: DictionaryObject, space_code: int, space_width: float
|
||||||
|
) -> float:
|
||||||
|
sp_width: float = space_width * 2 # default value
|
||||||
|
w = []
|
||||||
|
w1 = {}
|
||||||
|
st: int = 0
|
||||||
|
if "/DescendantFonts" in ft: # ft["/Subtype"].startswith("/CIDFontType"):
|
||||||
|
ft1 = ft["/DescendantFonts"][0].get_object() # type: ignore
|
||||||
|
try:
|
||||||
|
w1[-1] = cast(float, ft1["/DW"])
|
||||||
|
except Exception:
|
||||||
|
w1[-1] = 1000.0
|
||||||
|
if "/W" in ft1:
|
||||||
|
w = list(ft1["/W"])
|
||||||
|
else:
|
||||||
|
w = []
|
||||||
|
while len(w) > 0:
|
||||||
|
st = w[0]
|
||||||
|
second = w[1]
|
||||||
|
if isinstance(second, int):
|
||||||
|
for x in range(st, second):
|
||||||
|
w1[x] = w[2]
|
||||||
|
w = w[3:]
|
||||||
|
elif isinstance(second, list):
|
||||||
|
for y in second:
|
||||||
|
w1[st] = y
|
||||||
|
st += 1
|
||||||
|
w = w[2:]
|
||||||
|
else:
|
||||||
|
logger_warning(
|
||||||
|
"unknown widths : \n" + (ft1["/W"]).__repr__(),
|
||||||
|
__name__,
|
||||||
|
)
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
sp_width = w1[space_code]
|
||||||
|
except Exception:
|
||||||
|
sp_width = (
|
||||||
|
w1[-1] / 2.0
|
||||||
|
) # if using default we consider space will be only half size
|
||||||
|
elif "/Widths" in ft:
|
||||||
|
w = list(ft["/Widths"]) # type: ignore
|
||||||
|
try:
|
||||||
|
st = cast(int, ft["/FirstChar"])
|
||||||
|
en: int = cast(int, ft["/LastChar"])
|
||||||
|
if st > space_code or en < space_code:
|
||||||
|
raise Exception("Not in range")
|
||||||
|
if w[space_code - st] == 0:
|
||||||
|
raise Exception("null width")
|
||||||
|
sp_width = w[space_code - st]
|
||||||
|
except Exception:
|
||||||
|
if "/FontDescriptor" in ft and "/MissingWidth" in cast(
|
||||||
|
DictionaryObject, ft["/FontDescriptor"]
|
||||||
|
):
|
||||||
|
sp_width = ft["/FontDescriptor"]["/MissingWidth"] # type: ignore
|
||||||
|
else:
|
||||||
|
# will consider width of char as avg(width)/2
|
||||||
|
m = 0
|
||||||
|
cpt = 0
|
||||||
|
for x in w:
|
||||||
|
if x > 0:
|
||||||
|
m += x
|
||||||
|
cpt += 1
|
||||||
|
sp_width = m / max(1, cpt) / 2
|
||||||
|
return sp_width
|
||||||
|
|
@ -0,0 +1,63 @@
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
from .adobe_glyphs import adobe_glyphs
|
||||||
|
from .pdfdoc import _pdfdoc_encoding
|
||||||
|
from .std import _std_encoding
|
||||||
|
from .symbol import _symbol_encoding
|
||||||
|
from .zapfding import _zapfding_encoding
|
||||||
|
|
||||||
|
|
||||||
|
def fill_from_encoding(enc: str) -> List[str]:
|
||||||
|
lst: List[str] = []
|
||||||
|
for x in range(256):
|
||||||
|
try:
|
||||||
|
lst += (bytes((x,)).decode(enc),)
|
||||||
|
except Exception:
|
||||||
|
lst += (chr(x),)
|
||||||
|
return lst
|
||||||
|
|
||||||
|
|
||||||
|
def rev_encoding(enc: List[str]) -> Dict[str, int]:
|
||||||
|
rev: Dict[str, int] = {}
|
||||||
|
for i in range(256):
|
||||||
|
char = enc[i]
|
||||||
|
if char == "\u0000":
|
||||||
|
continue
|
||||||
|
assert char not in rev, (
|
||||||
|
str(char) + " at " + str(i) + " already at " + str(rev[char])
|
||||||
|
)
|
||||||
|
rev[char] = i
|
||||||
|
return rev
|
||||||
|
|
||||||
|
|
||||||
|
_win_encoding = fill_from_encoding("cp1252")
|
||||||
|
_mac_encoding = fill_from_encoding("mac_roman")
|
||||||
|
|
||||||
|
|
||||||
|
_win_encoding_rev: Dict[str, int] = rev_encoding(_win_encoding)
|
||||||
|
_mac_encoding_rev: Dict[str, int] = rev_encoding(_mac_encoding)
|
||||||
|
_symbol_encoding_rev: Dict[str, int] = rev_encoding(_symbol_encoding)
|
||||||
|
_zapfding_encoding_rev: Dict[str, int] = rev_encoding(_zapfding_encoding)
|
||||||
|
_pdfdoc_encoding_rev: Dict[str, int] = rev_encoding(_pdfdoc_encoding)
|
||||||
|
|
||||||
|
|
||||||
|
charset_encoding: Dict[str, List[str]] = {
|
||||||
|
"/StandardCoding": _std_encoding,
|
||||||
|
"/WinAnsiEncoding": _win_encoding,
|
||||||
|
"/MacRomanEncoding": _mac_encoding,
|
||||||
|
"/PDFDocEncoding": _pdfdoc_encoding,
|
||||||
|
"/Symbol": _symbol_encoding,
|
||||||
|
"/ZapfDingbats": _zapfding_encoding,
|
||||||
|
}
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"adobe_glyphs",
|
||||||
|
"_std_encoding",
|
||||||
|
"_symbol_encoding",
|
||||||
|
"_zapfding_encoding",
|
||||||
|
"_pdfdoc_encoding",
|
||||||
|
"_pdfdoc_encoding_rev",
|
||||||
|
"_win_encoding",
|
||||||
|
"_mac_encoding",
|
||||||
|
"charset_encoding",
|
||||||
|
]
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,264 @@
|
||||||
|
# PDFDocEncoding Character Set: Table D.2 of PDF Reference 1.7
|
||||||
|
# C.1 Predefined encodings sorted by character name of another PDF reference
|
||||||
|
# Some indices have '\u0000' although they should have something else:
|
||||||
|
# 22: should be '\u0017'
|
||||||
|
_pdfdoc_encoding = [
|
||||||
|
"\u0000",
|
||||||
|
"\u0001",
|
||||||
|
"\u0002",
|
||||||
|
"\u0003",
|
||||||
|
"\u0004",
|
||||||
|
"\u0005",
|
||||||
|
"\u0006",
|
||||||
|
"\u0007", # 0 - 7
|
||||||
|
"\u0008",
|
||||||
|
"\u0009",
|
||||||
|
"\u000a",
|
||||||
|
"\u000b",
|
||||||
|
"\u000c",
|
||||||
|
"\u000d",
|
||||||
|
"\u000e",
|
||||||
|
"\u000f", # 8 - 15
|
||||||
|
"\u0010",
|
||||||
|
"\u0011",
|
||||||
|
"\u0012",
|
||||||
|
"\u0013",
|
||||||
|
"\u0014",
|
||||||
|
"\u0015",
|
||||||
|
"\u0000",
|
||||||
|
"\u0017", # 16 - 23
|
||||||
|
"\u02d8",
|
||||||
|
"\u02c7",
|
||||||
|
"\u02c6",
|
||||||
|
"\u02d9",
|
||||||
|
"\u02dd",
|
||||||
|
"\u02db",
|
||||||
|
"\u02da",
|
||||||
|
"\u02dc", # 24 - 31
|
||||||
|
"\u0020",
|
||||||
|
"\u0021",
|
||||||
|
"\u0022",
|
||||||
|
"\u0023",
|
||||||
|
"\u0024",
|
||||||
|
"\u0025",
|
||||||
|
"\u0026",
|
||||||
|
"\u0027", # 32 - 39
|
||||||
|
"\u0028",
|
||||||
|
"\u0029",
|
||||||
|
"\u002a",
|
||||||
|
"\u002b",
|
||||||
|
"\u002c",
|
||||||
|
"\u002d",
|
||||||
|
"\u002e",
|
||||||
|
"\u002f", # 40 - 47
|
||||||
|
"\u0030",
|
||||||
|
"\u0031",
|
||||||
|
"\u0032",
|
||||||
|
"\u0033",
|
||||||
|
"\u0034",
|
||||||
|
"\u0035",
|
||||||
|
"\u0036",
|
||||||
|
"\u0037", # 48 - 55
|
||||||
|
"\u0038",
|
||||||
|
"\u0039",
|
||||||
|
"\u003a",
|
||||||
|
"\u003b",
|
||||||
|
"\u003c",
|
||||||
|
"\u003d",
|
||||||
|
"\u003e",
|
||||||
|
"\u003f", # 56 - 63
|
||||||
|
"\u0040",
|
||||||
|
"\u0041",
|
||||||
|
"\u0042",
|
||||||
|
"\u0043",
|
||||||
|
"\u0044",
|
||||||
|
"\u0045",
|
||||||
|
"\u0046",
|
||||||
|
"\u0047", # 64 - 71
|
||||||
|
"\u0048",
|
||||||
|
"\u0049",
|
||||||
|
"\u004a",
|
||||||
|
"\u004b",
|
||||||
|
"\u004c",
|
||||||
|
"\u004d",
|
||||||
|
"\u004e",
|
||||||
|
"\u004f", # 72 - 79
|
||||||
|
"\u0050",
|
||||||
|
"\u0051",
|
||||||
|
"\u0052",
|
||||||
|
"\u0053",
|
||||||
|
"\u0054",
|
||||||
|
"\u0055",
|
||||||
|
"\u0056",
|
||||||
|
"\u0057", # 80 - 87
|
||||||
|
"\u0058",
|
||||||
|
"\u0059",
|
||||||
|
"\u005a",
|
||||||
|
"\u005b",
|
||||||
|
"\u005c",
|
||||||
|
"\u005d",
|
||||||
|
"\u005e",
|
||||||
|
"\u005f", # 88 - 95
|
||||||
|
"\u0060",
|
||||||
|
"\u0061",
|
||||||
|
"\u0062",
|
||||||
|
"\u0063",
|
||||||
|
"\u0064",
|
||||||
|
"\u0065",
|
||||||
|
"\u0066",
|
||||||
|
"\u0067", # 96 - 103
|
||||||
|
"\u0068",
|
||||||
|
"\u0069",
|
||||||
|
"\u006a",
|
||||||
|
"\u006b",
|
||||||
|
"\u006c",
|
||||||
|
"\u006d",
|
||||||
|
"\u006e",
|
||||||
|
"\u006f", # 104 - 111
|
||||||
|
"\u0070",
|
||||||
|
"\u0071",
|
||||||
|
"\u0072",
|
||||||
|
"\u0073",
|
||||||
|
"\u0074",
|
||||||
|
"\u0075",
|
||||||
|
"\u0076",
|
||||||
|
"\u0077", # 112 - 119
|
||||||
|
"\u0078",
|
||||||
|
"\u0079",
|
||||||
|
"\u007a",
|
||||||
|
"\u007b",
|
||||||
|
"\u007c",
|
||||||
|
"\u007d",
|
||||||
|
"\u007e",
|
||||||
|
"\u0000", # 120 - 127
|
||||||
|
"\u2022",
|
||||||
|
"\u2020",
|
||||||
|
"\u2021",
|
||||||
|
"\u2026",
|
||||||
|
"\u2014",
|
||||||
|
"\u2013",
|
||||||
|
"\u0192",
|
||||||
|
"\u2044", # 128 - 135
|
||||||
|
"\u2039",
|
||||||
|
"\u203a",
|
||||||
|
"\u2212",
|
||||||
|
"\u2030",
|
||||||
|
"\u201e",
|
||||||
|
"\u201c",
|
||||||
|
"\u201d",
|
||||||
|
"\u2018", # 136 - 143
|
||||||
|
"\u2019",
|
||||||
|
"\u201a",
|
||||||
|
"\u2122",
|
||||||
|
"\ufb01",
|
||||||
|
"\ufb02",
|
||||||
|
"\u0141",
|
||||||
|
"\u0152",
|
||||||
|
"\u0160", # 144 - 151
|
||||||
|
"\u0178",
|
||||||
|
"\u017d",
|
||||||
|
"\u0131",
|
||||||
|
"\u0142",
|
||||||
|
"\u0153",
|
||||||
|
"\u0161",
|
||||||
|
"\u017e",
|
||||||
|
"\u0000", # 152 - 159
|
||||||
|
"\u20ac",
|
||||||
|
"\u00a1",
|
||||||
|
"\u00a2",
|
||||||
|
"\u00a3",
|
||||||
|
"\u00a4",
|
||||||
|
"\u00a5",
|
||||||
|
"\u00a6",
|
||||||
|
"\u00a7", # 160 - 167
|
||||||
|
"\u00a8",
|
||||||
|
"\u00a9",
|
||||||
|
"\u00aa",
|
||||||
|
"\u00ab",
|
||||||
|
"\u00ac",
|
||||||
|
"\u0000",
|
||||||
|
"\u00ae",
|
||||||
|
"\u00af", # 168 - 175
|
||||||
|
"\u00b0",
|
||||||
|
"\u00b1",
|
||||||
|
"\u00b2",
|
||||||
|
"\u00b3",
|
||||||
|
"\u00b4",
|
||||||
|
"\u00b5",
|
||||||
|
"\u00b6",
|
||||||
|
"\u00b7", # 176 - 183
|
||||||
|
"\u00b8",
|
||||||
|
"\u00b9",
|
||||||
|
"\u00ba",
|
||||||
|
"\u00bb",
|
||||||
|
"\u00bc",
|
||||||
|
"\u00bd",
|
||||||
|
"\u00be",
|
||||||
|
"\u00bf", # 184 - 191
|
||||||
|
"\u00c0",
|
||||||
|
"\u00c1",
|
||||||
|
"\u00c2",
|
||||||
|
"\u00c3",
|
||||||
|
"\u00c4",
|
||||||
|
"\u00c5",
|
||||||
|
"\u00c6",
|
||||||
|
"\u00c7", # 192 - 199
|
||||||
|
"\u00c8",
|
||||||
|
"\u00c9",
|
||||||
|
"\u00ca",
|
||||||
|
"\u00cb",
|
||||||
|
"\u00cc",
|
||||||
|
"\u00cd",
|
||||||
|
"\u00ce",
|
||||||
|
"\u00cf", # 200 - 207
|
||||||
|
"\u00d0",
|
||||||
|
"\u00d1",
|
||||||
|
"\u00d2",
|
||||||
|
"\u00d3",
|
||||||
|
"\u00d4",
|
||||||
|
"\u00d5",
|
||||||
|
"\u00d6",
|
||||||
|
"\u00d7", # 208 - 215
|
||||||
|
"\u00d8",
|
||||||
|
"\u00d9",
|
||||||
|
"\u00da",
|
||||||
|
"\u00db",
|
||||||
|
"\u00dc",
|
||||||
|
"\u00dd",
|
||||||
|
"\u00de",
|
||||||
|
"\u00df", # 216 - 223
|
||||||
|
"\u00e0",
|
||||||
|
"\u00e1",
|
||||||
|
"\u00e2",
|
||||||
|
"\u00e3",
|
||||||
|
"\u00e4",
|
||||||
|
"\u00e5",
|
||||||
|
"\u00e6",
|
||||||
|
"\u00e7", # 224 - 231
|
||||||
|
"\u00e8",
|
||||||
|
"\u00e9",
|
||||||
|
"\u00ea",
|
||||||
|
"\u00eb",
|
||||||
|
"\u00ec",
|
||||||
|
"\u00ed",
|
||||||
|
"\u00ee",
|
||||||
|
"\u00ef", # 232 - 239
|
||||||
|
"\u00f0",
|
||||||
|
"\u00f1",
|
||||||
|
"\u00f2",
|
||||||
|
"\u00f3",
|
||||||
|
"\u00f4",
|
||||||
|
"\u00f5",
|
||||||
|
"\u00f6",
|
||||||
|
"\u00f7", # 240 - 247
|
||||||
|
"\u00f8",
|
||||||
|
"\u00f9",
|
||||||
|
"\u00fa",
|
||||||
|
"\u00fb",
|
||||||
|
"\u00fc",
|
||||||
|
"\u00fd",
|
||||||
|
"\u00fe",
|
||||||
|
"\u00ff", # 248 - 255
|
||||||
|
]
|
||||||
|
|
||||||
|
assert len(_pdfdoc_encoding) == 256
|
||||||
|
|
@ -0,0 +1,258 @@
|
||||||
|
_std_encoding = [
|
||||||
|
"\x00",
|
||||||
|
"\x01",
|
||||||
|
"\x02",
|
||||||
|
"\x03",
|
||||||
|
"\x04",
|
||||||
|
"\x05",
|
||||||
|
"\x06",
|
||||||
|
"\x07",
|
||||||
|
"\x08",
|
||||||
|
"\t",
|
||||||
|
"\n",
|
||||||
|
"\x0b",
|
||||||
|
"\x0c",
|
||||||
|
"\r",
|
||||||
|
"\x0e",
|
||||||
|
"\x0f",
|
||||||
|
"\x10",
|
||||||
|
"\x11",
|
||||||
|
"\x12",
|
||||||
|
"\x13",
|
||||||
|
"\x14",
|
||||||
|
"\x15",
|
||||||
|
"\x16",
|
||||||
|
"\x17",
|
||||||
|
"\x18",
|
||||||
|
"\x19",
|
||||||
|
"\x1a",
|
||||||
|
"\x1b",
|
||||||
|
"\x1c",
|
||||||
|
"\x1d",
|
||||||
|
"\x1e",
|
||||||
|
"\x1f",
|
||||||
|
" ",
|
||||||
|
"!",
|
||||||
|
'"',
|
||||||
|
"#",
|
||||||
|
"$",
|
||||||
|
"%",
|
||||||
|
"&",
|
||||||
|
"’",
|
||||||
|
"(",
|
||||||
|
")",
|
||||||
|
"*",
|
||||||
|
"+",
|
||||||
|
",",
|
||||||
|
"-",
|
||||||
|
".",
|
||||||
|
"/",
|
||||||
|
"0",
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
"4",
|
||||||
|
"5",
|
||||||
|
"6",
|
||||||
|
"7",
|
||||||
|
"8",
|
||||||
|
"9",
|
||||||
|
":",
|
||||||
|
";",
|
||||||
|
"<",
|
||||||
|
"=",
|
||||||
|
">",
|
||||||
|
"?",
|
||||||
|
"@",
|
||||||
|
"A",
|
||||||
|
"B",
|
||||||
|
"C",
|
||||||
|
"D",
|
||||||
|
"E",
|
||||||
|
"F",
|
||||||
|
"G",
|
||||||
|
"H",
|
||||||
|
"I",
|
||||||
|
"J",
|
||||||
|
"K",
|
||||||
|
"L",
|
||||||
|
"M",
|
||||||
|
"N",
|
||||||
|
"O",
|
||||||
|
"P",
|
||||||
|
"Q",
|
||||||
|
"R",
|
||||||
|
"S",
|
||||||
|
"T",
|
||||||
|
"U",
|
||||||
|
"V",
|
||||||
|
"W",
|
||||||
|
"X",
|
||||||
|
"Y",
|
||||||
|
"Z",
|
||||||
|
"[",
|
||||||
|
"\\",
|
||||||
|
"]",
|
||||||
|
"^",
|
||||||
|
"_",
|
||||||
|
"‘",
|
||||||
|
"a",
|
||||||
|
"b",
|
||||||
|
"c",
|
||||||
|
"d",
|
||||||
|
"e",
|
||||||
|
"f",
|
||||||
|
"g",
|
||||||
|
"h",
|
||||||
|
"i",
|
||||||
|
"j",
|
||||||
|
"k",
|
||||||
|
"l",
|
||||||
|
"m",
|
||||||
|
"n",
|
||||||
|
"o",
|
||||||
|
"p",
|
||||||
|
"q",
|
||||||
|
"r",
|
||||||
|
"s",
|
||||||
|
"t",
|
||||||
|
"u",
|
||||||
|
"v",
|
||||||
|
"w",
|
||||||
|
"x",
|
||||||
|
"y",
|
||||||
|
"z",
|
||||||
|
"{",
|
||||||
|
"|",
|
||||||
|
"}",
|
||||||
|
"~",
|
||||||
|
"\x7f",
|
||||||
|
"\x80",
|
||||||
|
"\x81",
|
||||||
|
"\x82",
|
||||||
|
"\x83",
|
||||||
|
"\x84",
|
||||||
|
"\x85",
|
||||||
|
"\x86",
|
||||||
|
"\x87",
|
||||||
|
"\x88",
|
||||||
|
"\x89",
|
||||||
|
"\x8a",
|
||||||
|
"\x8b",
|
||||||
|
"\x8c",
|
||||||
|
"\x8d",
|
||||||
|
"\x8e",
|
||||||
|
"\x8f",
|
||||||
|
"\x90",
|
||||||
|
"\x91",
|
||||||
|
"\x92",
|
||||||
|
"\x93",
|
||||||
|
"\x94",
|
||||||
|
"\x95",
|
||||||
|
"\x96",
|
||||||
|
"\x97",
|
||||||
|
"\x98",
|
||||||
|
"\x99",
|
||||||
|
"\x9a",
|
||||||
|
"\x9b",
|
||||||
|
"\x9c",
|
||||||
|
"\x9d",
|
||||||
|
"\x9e",
|
||||||
|
"\x9f",
|
||||||
|
"\xa0",
|
||||||
|
"¡",
|
||||||
|
"¢",
|
||||||
|
"£",
|
||||||
|
"⁄",
|
||||||
|
"¥",
|
||||||
|
"ƒ",
|
||||||
|
"§",
|
||||||
|
"¤",
|
||||||
|
"'",
|
||||||
|
"“",
|
||||||
|
"«",
|
||||||
|
"‹",
|
||||||
|
"›",
|
||||||
|
"fi",
|
||||||
|
"fl",
|
||||||
|
"°",
|
||||||
|
"–",
|
||||||
|
"†",
|
||||||
|
"‡",
|
||||||
|
"·",
|
||||||
|
"µ",
|
||||||
|
"¶",
|
||||||
|
"•",
|
||||||
|
"‚",
|
||||||
|
"„",
|
||||||
|
"”",
|
||||||
|
"»",
|
||||||
|
"…",
|
||||||
|
"‰",
|
||||||
|
"¾",
|
||||||
|
"¿",
|
||||||
|
"À",
|
||||||
|
"`",
|
||||||
|
"´",
|
||||||
|
"ˆ",
|
||||||
|
"˜",
|
||||||
|
"¯",
|
||||||
|
"˘",
|
||||||
|
"˙",
|
||||||
|
"¨",
|
||||||
|
"É",
|
||||||
|
"˚",
|
||||||
|
"¸",
|
||||||
|
"Ì",
|
||||||
|
"˝",
|
||||||
|
"˛",
|
||||||
|
"ˇ",
|
||||||
|
"—",
|
||||||
|
"Ñ",
|
||||||
|
"Ò",
|
||||||
|
"Ó",
|
||||||
|
"Ô",
|
||||||
|
"Õ",
|
||||||
|
"Ö",
|
||||||
|
"×",
|
||||||
|
"Ø",
|
||||||
|
"Ù",
|
||||||
|
"Ú",
|
||||||
|
"Û",
|
||||||
|
"Ü",
|
||||||
|
"Ý",
|
||||||
|
"Þ",
|
||||||
|
"ß",
|
||||||
|
"à",
|
||||||
|
"Æ",
|
||||||
|
"â",
|
||||||
|
"ª",
|
||||||
|
"ä",
|
||||||
|
"å",
|
||||||
|
"æ",
|
||||||
|
"ç",
|
||||||
|
"Ł",
|
||||||
|
"Ø",
|
||||||
|
"Œ",
|
||||||
|
"º",
|
||||||
|
"ì",
|
||||||
|
"í",
|
||||||
|
"î",
|
||||||
|
"ï",
|
||||||
|
"ð",
|
||||||
|
"æ",
|
||||||
|
"ò",
|
||||||
|
"ó",
|
||||||
|
"ô",
|
||||||
|
"ı",
|
||||||
|
"ö",
|
||||||
|
"÷",
|
||||||
|
"ł",
|
||||||
|
"ø",
|
||||||
|
"œ",
|
||||||
|
"ß",
|
||||||
|
"ü",
|
||||||
|
"ý",
|
||||||
|
"þ",
|
||||||
|
"ÿ",
|
||||||
|
]
|
||||||
|
|
@ -0,0 +1,260 @@
|
||||||
|
# manually generated from https://www.unicode.org/Public/MAPPINGS/VENDORS/ADOBE/symbol.txt
|
||||||
|
_symbol_encoding = [
|
||||||
|
"\u0000",
|
||||||
|
"\u0001",
|
||||||
|
"\u0002",
|
||||||
|
"\u0003",
|
||||||
|
"\u0004",
|
||||||
|
"\u0005",
|
||||||
|
"\u0006",
|
||||||
|
"\u0007",
|
||||||
|
"\u0008",
|
||||||
|
"\u0009",
|
||||||
|
"\u000A",
|
||||||
|
"\u000B",
|
||||||
|
"\u000C",
|
||||||
|
"\u000D",
|
||||||
|
"\u000E",
|
||||||
|
"\u000F",
|
||||||
|
"\u0010",
|
||||||
|
"\u0011",
|
||||||
|
"\u0012",
|
||||||
|
"\u0013",
|
||||||
|
"\u0014",
|
||||||
|
"\u0015",
|
||||||
|
"\u0016",
|
||||||
|
"\u0017",
|
||||||
|
"\u0018",
|
||||||
|
"\u0019",
|
||||||
|
"\u001A",
|
||||||
|
"\u001B",
|
||||||
|
"\u001C",
|
||||||
|
"\u001D",
|
||||||
|
"\u001E",
|
||||||
|
"\u001F",
|
||||||
|
"\u0020",
|
||||||
|
"\u0021",
|
||||||
|
"\u2200",
|
||||||
|
"\u0023",
|
||||||
|
"\u2203",
|
||||||
|
"\u0025",
|
||||||
|
"\u0026",
|
||||||
|
"\u220B",
|
||||||
|
"\u0028",
|
||||||
|
"\u0029",
|
||||||
|
"\u2217",
|
||||||
|
"\u002B",
|
||||||
|
"\u002C",
|
||||||
|
"\u2212",
|
||||||
|
"\u002E",
|
||||||
|
"\u002F",
|
||||||
|
"\u0030",
|
||||||
|
"\u0031",
|
||||||
|
"\u0032",
|
||||||
|
"\u0033",
|
||||||
|
"\u0034",
|
||||||
|
"\u0035",
|
||||||
|
"\u0036",
|
||||||
|
"\u0037",
|
||||||
|
"\u0038",
|
||||||
|
"\u0039",
|
||||||
|
"\u003A",
|
||||||
|
"\u003B",
|
||||||
|
"\u003C",
|
||||||
|
"\u003D",
|
||||||
|
"\u003E",
|
||||||
|
"\u003F",
|
||||||
|
"\u2245",
|
||||||
|
"\u0391",
|
||||||
|
"\u0392",
|
||||||
|
"\u03A7",
|
||||||
|
"\u0394",
|
||||||
|
"\u0395",
|
||||||
|
"\u03A6",
|
||||||
|
"\u0393",
|
||||||
|
"\u0397",
|
||||||
|
"\u0399",
|
||||||
|
"\u03D1",
|
||||||
|
"\u039A",
|
||||||
|
"\u039B",
|
||||||
|
"\u039C",
|
||||||
|
"\u039D",
|
||||||
|
"\u039F",
|
||||||
|
"\u03A0",
|
||||||
|
"\u0398",
|
||||||
|
"\u03A1",
|
||||||
|
"\u03A3",
|
||||||
|
"\u03A4",
|
||||||
|
"\u03A5",
|
||||||
|
"\u03C2",
|
||||||
|
"\u03A9",
|
||||||
|
"\u039E",
|
||||||
|
"\u03A8",
|
||||||
|
"\u0396",
|
||||||
|
"\u005B",
|
||||||
|
"\u2234",
|
||||||
|
"\u005D",
|
||||||
|
"\u22A5",
|
||||||
|
"\u005F",
|
||||||
|
"\uF8E5",
|
||||||
|
"\u03B1",
|
||||||
|
"\u03B2",
|
||||||
|
"\u03C7",
|
||||||
|
"\u03B4",
|
||||||
|
"\u03B5",
|
||||||
|
"\u03C6",
|
||||||
|
"\u03B3",
|
||||||
|
"\u03B7",
|
||||||
|
"\u03B9",
|
||||||
|
"\u03D5",
|
||||||
|
"\u03BA",
|
||||||
|
"\u03BB",
|
||||||
|
"\u00B5",
|
||||||
|
"\u03BD",
|
||||||
|
"\u03BF",
|
||||||
|
"\u03C0",
|
||||||
|
"\u03B8",
|
||||||
|
"\u03C1",
|
||||||
|
"\u03C3",
|
||||||
|
"\u03C4",
|
||||||
|
"\u03C5",
|
||||||
|
"\u03D6",
|
||||||
|
"\u03C9",
|
||||||
|
"\u03BE",
|
||||||
|
"\u03C8",
|
||||||
|
"\u03B6",
|
||||||
|
"\u007B",
|
||||||
|
"\u007C",
|
||||||
|
"\u007D",
|
||||||
|
"\u223C",
|
||||||
|
"\u007F",
|
||||||
|
"\u0080",
|
||||||
|
"\u0081",
|
||||||
|
"\u0082",
|
||||||
|
"\u0083",
|
||||||
|
"\u0084",
|
||||||
|
"\u0085",
|
||||||
|
"\u0086",
|
||||||
|
"\u0087",
|
||||||
|
"\u0088",
|
||||||
|
"\u0089",
|
||||||
|
"\u008A",
|
||||||
|
"\u008B",
|
||||||
|
"\u008C",
|
||||||
|
"\u008D",
|
||||||
|
"\u008E",
|
||||||
|
"\u008F",
|
||||||
|
"\u0090",
|
||||||
|
"\u0091",
|
||||||
|
"\u0092",
|
||||||
|
"\u0093",
|
||||||
|
"\u0094",
|
||||||
|
"\u0095",
|
||||||
|
"\u0096",
|
||||||
|
"\u0097",
|
||||||
|
"\u0098",
|
||||||
|
"\u0099",
|
||||||
|
"\u009A",
|
||||||
|
"\u009B",
|
||||||
|
"\u009C",
|
||||||
|
"\u009D",
|
||||||
|
"\u009E",
|
||||||
|
"\u009F",
|
||||||
|
"\u20AC",
|
||||||
|
"\u03D2",
|
||||||
|
"\u2032",
|
||||||
|
"\u2264",
|
||||||
|
"\u2044",
|
||||||
|
"\u221E",
|
||||||
|
"\u0192",
|
||||||
|
"\u2663",
|
||||||
|
"\u2666",
|
||||||
|
"\u2665",
|
||||||
|
"\u2660",
|
||||||
|
"\u2194",
|
||||||
|
"\u2190",
|
||||||
|
"\u2191",
|
||||||
|
"\u2192",
|
||||||
|
"\u2193",
|
||||||
|
"\u00B0",
|
||||||
|
"\u00B1",
|
||||||
|
"\u2033",
|
||||||
|
"\u2265",
|
||||||
|
"\u00D7",
|
||||||
|
"\u221D",
|
||||||
|
"\u2202",
|
||||||
|
"\u2022",
|
||||||
|
"\u00F7",
|
||||||
|
"\u2260",
|
||||||
|
"\u2261",
|
||||||
|
"\u2248",
|
||||||
|
"\u2026",
|
||||||
|
"\uF8E6",
|
||||||
|
"\uF8E7",
|
||||||
|
"\u21B5",
|
||||||
|
"\u2135",
|
||||||
|
"\u2111",
|
||||||
|
"\u211C",
|
||||||
|
"\u2118",
|
||||||
|
"\u2297",
|
||||||
|
"\u2295",
|
||||||
|
"\u2205",
|
||||||
|
"\u2229",
|
||||||
|
"\u222A",
|
||||||
|
"\u2283",
|
||||||
|
"\u2287",
|
||||||
|
"\u2284",
|
||||||
|
"\u2282",
|
||||||
|
"\u2286",
|
||||||
|
"\u2208",
|
||||||
|
"\u2209",
|
||||||
|
"\u2220",
|
||||||
|
"\u2207",
|
||||||
|
"\uF6DA",
|
||||||
|
"\uF6D9",
|
||||||
|
"\uF6DB",
|
||||||
|
"\u220F",
|
||||||
|
"\u221A",
|
||||||
|
"\u22C5",
|
||||||
|
"\u00AC",
|
||||||
|
"\u2227",
|
||||||
|
"\u2228",
|
||||||
|
"\u21D4",
|
||||||
|
"\u21D0",
|
||||||
|
"\u21D1",
|
||||||
|
"\u21D2",
|
||||||
|
"\u21D3",
|
||||||
|
"\u25CA",
|
||||||
|
"\u2329",
|
||||||
|
"\uF8E8",
|
||||||
|
"\uF8E9",
|
||||||
|
"\uF8EA",
|
||||||
|
"\u2211",
|
||||||
|
"\uF8EB",
|
||||||
|
"\uF8EC",
|
||||||
|
"\uF8ED",
|
||||||
|
"\uF8EE",
|
||||||
|
"\uF8EF",
|
||||||
|
"\uF8F0",
|
||||||
|
"\uF8F1",
|
||||||
|
"\uF8F2",
|
||||||
|
"\uF8F3",
|
||||||
|
"\uF8F4",
|
||||||
|
"\u00F0",
|
||||||
|
"\u232A",
|
||||||
|
"\u222B",
|
||||||
|
"\u2320",
|
||||||
|
"\uF8F5",
|
||||||
|
"\u2321",
|
||||||
|
"\uF8F6",
|
||||||
|
"\uF8F7",
|
||||||
|
"\uF8F8",
|
||||||
|
"\uF8F9",
|
||||||
|
"\uF8FA",
|
||||||
|
"\uF8FB",
|
||||||
|
"\uF8FC",
|
||||||
|
"\uF8FD",
|
||||||
|
"\uF8FE",
|
||||||
|
"\u00FF",
|
||||||
|
]
|
||||||
|
assert len(_symbol_encoding) == 256
|
||||||
|
|
@ -0,0 +1,261 @@
|
||||||
|
# manually generated from https://www.unicode.org/Public/MAPPINGS/VENDORS/ADOBE/zdingbat.txt
|
||||||
|
|
||||||
|
_zapfding_encoding = [
|
||||||
|
"\u0000",
|
||||||
|
"\u0001",
|
||||||
|
"\u0002",
|
||||||
|
"\u0003",
|
||||||
|
"\u0004",
|
||||||
|
"\u0005",
|
||||||
|
"\u0006",
|
||||||
|
"\u0007",
|
||||||
|
"\u0008",
|
||||||
|
"\u0009",
|
||||||
|
"\u000A",
|
||||||
|
"\u000B",
|
||||||
|
"\u000C",
|
||||||
|
"\u000D",
|
||||||
|
"\u000E",
|
||||||
|
"\u000F",
|
||||||
|
"\u0010",
|
||||||
|
"\u0011",
|
||||||
|
"\u0012",
|
||||||
|
"\u0013",
|
||||||
|
"\u0014",
|
||||||
|
"\u0015",
|
||||||
|
"\u0016",
|
||||||
|
"\u0017",
|
||||||
|
"\u0018",
|
||||||
|
"\u0019",
|
||||||
|
"\u001A",
|
||||||
|
"\u001B",
|
||||||
|
"\u001C",
|
||||||
|
"\u001D",
|
||||||
|
"\u001E",
|
||||||
|
"\u001F",
|
||||||
|
"\u0020",
|
||||||
|
"\u2701",
|
||||||
|
"\u2702",
|
||||||
|
"\u2703",
|
||||||
|
"\u2704",
|
||||||
|
"\u260E",
|
||||||
|
"\u2706",
|
||||||
|
"\u2707",
|
||||||
|
"\u2708",
|
||||||
|
"\u2709",
|
||||||
|
"\u261B",
|
||||||
|
"\u261E",
|
||||||
|
"\u270C",
|
||||||
|
"\u270D",
|
||||||
|
"\u270E",
|
||||||
|
"\u270F",
|
||||||
|
"\u2710",
|
||||||
|
"\u2711",
|
||||||
|
"\u2712",
|
||||||
|
"\u2713",
|
||||||
|
"\u2714",
|
||||||
|
"\u2715",
|
||||||
|
"\u2716",
|
||||||
|
"\u2717",
|
||||||
|
"\u2718",
|
||||||
|
"\u2719",
|
||||||
|
"\u271A",
|
||||||
|
"\u271B",
|
||||||
|
"\u271C",
|
||||||
|
"\u271D",
|
||||||
|
"\u271E",
|
||||||
|
"\u271F",
|
||||||
|
"\u2720",
|
||||||
|
"\u2721",
|
||||||
|
"\u2722",
|
||||||
|
"\u2723",
|
||||||
|
"\u2724",
|
||||||
|
"\u2725",
|
||||||
|
"\u2726",
|
||||||
|
"\u2727",
|
||||||
|
"\u2605",
|
||||||
|
"\u2729",
|
||||||
|
"\u272A",
|
||||||
|
"\u272B",
|
||||||
|
"\u272C",
|
||||||
|
"\u272D",
|
||||||
|
"\u272E",
|
||||||
|
"\u272F",
|
||||||
|
"\u2730",
|
||||||
|
"\u2731",
|
||||||
|
"\u2732",
|
||||||
|
"\u2733",
|
||||||
|
"\u2734",
|
||||||
|
"\u2735",
|
||||||
|
"\u2736",
|
||||||
|
"\u2737",
|
||||||
|
"\u2738",
|
||||||
|
"\u2739",
|
||||||
|
"\u273A",
|
||||||
|
"\u273B",
|
||||||
|
"\u273C",
|
||||||
|
"\u273D",
|
||||||
|
"\u273E",
|
||||||
|
"\u273F",
|
||||||
|
"\u2740",
|
||||||
|
"\u2741",
|
||||||
|
"\u2742",
|
||||||
|
"\u2743",
|
||||||
|
"\u2744",
|
||||||
|
"\u2745",
|
||||||
|
"\u2746",
|
||||||
|
"\u2747",
|
||||||
|
"\u2748",
|
||||||
|
"\u2749",
|
||||||
|
"\u274A",
|
||||||
|
"\u274B",
|
||||||
|
"\u25CF",
|
||||||
|
"\u274D",
|
||||||
|
"\u25A0",
|
||||||
|
"\u274F",
|
||||||
|
"\u2750",
|
||||||
|
"\u2751",
|
||||||
|
"\u2752",
|
||||||
|
"\u25B2",
|
||||||
|
"\u25BC",
|
||||||
|
"\u25C6",
|
||||||
|
"\u2756",
|
||||||
|
"\u25D7",
|
||||||
|
"\u2758",
|
||||||
|
"\u2759",
|
||||||
|
"\u275A",
|
||||||
|
"\u275B",
|
||||||
|
"\u275C",
|
||||||
|
"\u275D",
|
||||||
|
"\u275E",
|
||||||
|
"\u007F",
|
||||||
|
"\uF8D7",
|
||||||
|
"\uF8D8",
|
||||||
|
"\uF8D9",
|
||||||
|
"\uF8DA",
|
||||||
|
"\uF8DB",
|
||||||
|
"\uF8DC",
|
||||||
|
"\uF8DD",
|
||||||
|
"\uF8DE",
|
||||||
|
"\uF8DF",
|
||||||
|
"\uF8E0",
|
||||||
|
"\uF8E1",
|
||||||
|
"\uF8E2",
|
||||||
|
"\uF8E3",
|
||||||
|
"\uF8E4",
|
||||||
|
"\u008E",
|
||||||
|
"\u008F",
|
||||||
|
"\u0090",
|
||||||
|
"\u0091",
|
||||||
|
"\u0092",
|
||||||
|
"\u0093",
|
||||||
|
"\u0094",
|
||||||
|
"\u0095",
|
||||||
|
"\u0096",
|
||||||
|
"\u0097",
|
||||||
|
"\u0098",
|
||||||
|
"\u0099",
|
||||||
|
"\u009A",
|
||||||
|
"\u009B",
|
||||||
|
"\u009C",
|
||||||
|
"\u009D",
|
||||||
|
"\u009E",
|
||||||
|
"\u009F",
|
||||||
|
"\u00A0",
|
||||||
|
"\u2761",
|
||||||
|
"\u2762",
|
||||||
|
"\u2763",
|
||||||
|
"\u2764",
|
||||||
|
"\u2765",
|
||||||
|
"\u2766",
|
||||||
|
"\u2767",
|
||||||
|
"\u2663",
|
||||||
|
"\u2666",
|
||||||
|
"\u2665",
|
||||||
|
"\u2660",
|
||||||
|
"\u2460",
|
||||||
|
"\u2461",
|
||||||
|
"\u2462",
|
||||||
|
"\u2463",
|
||||||
|
"\u2464",
|
||||||
|
"\u2465",
|
||||||
|
"\u2466",
|
||||||
|
"\u2467",
|
||||||
|
"\u2468",
|
||||||
|
"\u2469",
|
||||||
|
"\u2776",
|
||||||
|
"\u2777",
|
||||||
|
"\u2778",
|
||||||
|
"\u2779",
|
||||||
|
"\u277A",
|
||||||
|
"\u277B",
|
||||||
|
"\u277C",
|
||||||
|
"\u277D",
|
||||||
|
"\u277E",
|
||||||
|
"\u277F",
|
||||||
|
"\u2780",
|
||||||
|
"\u2781",
|
||||||
|
"\u2782",
|
||||||
|
"\u2783",
|
||||||
|
"\u2784",
|
||||||
|
"\u2785",
|
||||||
|
"\u2786",
|
||||||
|
"\u2787",
|
||||||
|
"\u2788",
|
||||||
|
"\u2789",
|
||||||
|
"\u278A",
|
||||||
|
"\u278B",
|
||||||
|
"\u278C",
|
||||||
|
"\u278D",
|
||||||
|
"\u278E",
|
||||||
|
"\u278F",
|
||||||
|
"\u2790",
|
||||||
|
"\u2791",
|
||||||
|
"\u2792",
|
||||||
|
"\u2793",
|
||||||
|
"\u2794",
|
||||||
|
"\u2192",
|
||||||
|
"\u2194",
|
||||||
|
"\u2195",
|
||||||
|
"\u2798",
|
||||||
|
"\u2799",
|
||||||
|
"\u279A",
|
||||||
|
"\u279B",
|
||||||
|
"\u279C",
|
||||||
|
"\u279D",
|
||||||
|
"\u279E",
|
||||||
|
"\u279F",
|
||||||
|
"\u27A0",
|
||||||
|
"\u27A1",
|
||||||
|
"\u27A2",
|
||||||
|
"\u27A3",
|
||||||
|
"\u27A4",
|
||||||
|
"\u27A5",
|
||||||
|
"\u27A6",
|
||||||
|
"\u27A7",
|
||||||
|
"\u27A8",
|
||||||
|
"\u27A9",
|
||||||
|
"\u27AA",
|
||||||
|
"\u27AB",
|
||||||
|
"\u27AC",
|
||||||
|
"\u27AD",
|
||||||
|
"\u27AE",
|
||||||
|
"\u27AF",
|
||||||
|
"\u00F0",
|
||||||
|
"\u27B1",
|
||||||
|
"\u27B2",
|
||||||
|
"\u27B3",
|
||||||
|
"\u27B4",
|
||||||
|
"\u27B5",
|
||||||
|
"\u27B6",
|
||||||
|
"\u27B7",
|
||||||
|
"\u27B8",
|
||||||
|
"\u27B9",
|
||||||
|
"\u27BA",
|
||||||
|
"\u27BB",
|
||||||
|
"\u27BC",
|
||||||
|
"\u27BD",
|
||||||
|
"\u27BE",
|
||||||
|
"\u00FF",
|
||||||
|
]
|
||||||
|
assert len(_zapfding_encoding) == 256
|
||||||
|
|
@ -0,0 +1,895 @@
|
||||||
|
# Copyright (c) 2022, exiledkingcc
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
# * The name of the author may not be used to endorse or promote products
|
||||||
|
# derived from this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
# POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import random
|
||||||
|
import struct
|
||||||
|
from enum import IntEnum
|
||||||
|
from typing import Any, Dict, Optional, Tuple, Union, cast
|
||||||
|
|
||||||
|
from ._utils import logger_warning
|
||||||
|
from .errors import DependencyError
|
||||||
|
from .generic import (
|
||||||
|
ArrayObject,
|
||||||
|
ByteStringObject,
|
||||||
|
DictionaryObject,
|
||||||
|
PdfObject,
|
||||||
|
StreamObject,
|
||||||
|
TextStringObject,
|
||||||
|
create_string_object,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CryptBase:
|
||||||
|
def encrypt(self, data: bytes) -> bytes: # pragma: no cover
|
||||||
|
return data
|
||||||
|
|
||||||
|
def decrypt(self, data: bytes) -> bytes: # pragma: no cover
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class CryptIdentity(CryptBase):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from Crypto.Cipher import AES, ARC4 # type: ignore[import]
|
||||||
|
from Crypto.Util.Padding import pad # type: ignore[import]
|
||||||
|
|
||||||
|
class CryptRC4(CryptBase):
|
||||||
|
def __init__(self, key: bytes) -> None:
|
||||||
|
self.key = key
|
||||||
|
|
||||||
|
def encrypt(self, data: bytes) -> bytes:
|
||||||
|
return ARC4.ARC4Cipher(self.key).encrypt(data)
|
||||||
|
|
||||||
|
def decrypt(self, data: bytes) -> bytes:
|
||||||
|
return ARC4.ARC4Cipher(self.key).decrypt(data)
|
||||||
|
|
||||||
|
class CryptAES(CryptBase):
|
||||||
|
def __init__(self, key: bytes) -> None:
|
||||||
|
self.key = key
|
||||||
|
|
||||||
|
def encrypt(self, data: bytes) -> bytes:
|
||||||
|
iv = bytes(bytearray(random.randint(0, 255) for _ in range(16)))
|
||||||
|
p = 16 - len(data) % 16
|
||||||
|
data += bytes(bytearray(p for _ in range(p)))
|
||||||
|
aes = AES.new(self.key, AES.MODE_CBC, iv)
|
||||||
|
return iv + aes.encrypt(data)
|
||||||
|
|
||||||
|
def decrypt(self, data: bytes) -> bytes:
|
||||||
|
iv = data[:16]
|
||||||
|
data = data[16:]
|
||||||
|
aes = AES.new(self.key, AES.MODE_CBC, iv)
|
||||||
|
if len(data) % 16:
|
||||||
|
data = pad(data, 16)
|
||||||
|
d = aes.decrypt(data)
|
||||||
|
if len(d) == 0:
|
||||||
|
return d
|
||||||
|
else:
|
||||||
|
return d[: -d[-1]]
|
||||||
|
|
||||||
|
def RC4_encrypt(key: bytes, data: bytes) -> bytes:
|
||||||
|
return ARC4.ARC4Cipher(key).encrypt(data)
|
||||||
|
|
||||||
|
def RC4_decrypt(key: bytes, data: bytes) -> bytes:
|
||||||
|
return ARC4.ARC4Cipher(key).decrypt(data)
|
||||||
|
|
||||||
|
def AES_ECB_encrypt(key: bytes, data: bytes) -> bytes:
|
||||||
|
return AES.new(key, AES.MODE_ECB).encrypt(data)
|
||||||
|
|
||||||
|
def AES_ECB_decrypt(key: bytes, data: bytes) -> bytes:
|
||||||
|
return AES.new(key, AES.MODE_ECB).decrypt(data)
|
||||||
|
|
||||||
|
def AES_CBC_encrypt(key: bytes, iv: bytes, data: bytes) -> bytes:
|
||||||
|
return AES.new(key, AES.MODE_CBC, iv).encrypt(data)
|
||||||
|
|
||||||
|
def AES_CBC_decrypt(key: bytes, iv: bytes, data: bytes) -> bytes:
|
||||||
|
return AES.new(key, AES.MODE_CBC, iv).decrypt(data)
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
|
||||||
|
class CryptRC4(CryptBase): # type: ignore
|
||||||
|
def __init__(self, key: bytes) -> None:
|
||||||
|
self.S = list(range(256))
|
||||||
|
j = 0
|
||||||
|
for i in range(256):
|
||||||
|
j = (j + self.S[i] + key[i % len(key)]) % 256
|
||||||
|
self.S[i], self.S[j] = self.S[j], self.S[i]
|
||||||
|
|
||||||
|
def encrypt(self, data: bytes) -> bytes:
|
||||||
|
S = list(self.S)
|
||||||
|
out = list(0 for _ in range(len(data)))
|
||||||
|
i, j = 0, 0
|
||||||
|
for k in range(len(data)):
|
||||||
|
i = (i + 1) % 256
|
||||||
|
j = (j + S[i]) % 256
|
||||||
|
S[i], S[j] = S[j], S[i]
|
||||||
|
x = S[(S[i] + S[j]) % 256]
|
||||||
|
out[k] = data[k] ^ x
|
||||||
|
return bytes(bytearray(out))
|
||||||
|
|
||||||
|
def decrypt(self, data: bytes) -> bytes:
|
||||||
|
return self.encrypt(data)
|
||||||
|
|
||||||
|
class CryptAES(CryptBase): # type: ignore
|
||||||
|
def __init__(self, key: bytes) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def encrypt(self, data: bytes) -> bytes:
|
||||||
|
raise DependencyError("PyCryptodome is required for AES algorithm")
|
||||||
|
|
||||||
|
def decrypt(self, data: bytes) -> bytes:
|
||||||
|
raise DependencyError("PyCryptodome is required for AES algorithm")
|
||||||
|
|
||||||
|
def RC4_encrypt(key: bytes, data: bytes) -> bytes:
|
||||||
|
return CryptRC4(key).encrypt(data)
|
||||||
|
|
||||||
|
def RC4_decrypt(key: bytes, data: bytes) -> bytes:
|
||||||
|
return CryptRC4(key).decrypt(data)
|
||||||
|
|
||||||
|
def AES_ECB_encrypt(key: bytes, data: bytes) -> bytes:
|
||||||
|
raise DependencyError("PyCryptodome is required for AES algorithm")
|
||||||
|
|
||||||
|
def AES_ECB_decrypt(key: bytes, data: bytes) -> bytes:
|
||||||
|
raise DependencyError("PyCryptodome is required for AES algorithm")
|
||||||
|
|
||||||
|
def AES_CBC_encrypt(key: bytes, iv: bytes, data: bytes) -> bytes:
|
||||||
|
raise DependencyError("PyCryptodome is required for AES algorithm")
|
||||||
|
|
||||||
|
def AES_CBC_decrypt(key: bytes, iv: bytes, data: bytes) -> bytes:
|
||||||
|
raise DependencyError("PyCryptodome is required for AES algorithm")
|
||||||
|
|
||||||
|
|
||||||
|
class CryptFilter:
|
||||||
|
def __init__(
|
||||||
|
self, stmCrypt: CryptBase, strCrypt: CryptBase, efCrypt: CryptBase
|
||||||
|
) -> None:
|
||||||
|
self.stmCrypt = stmCrypt
|
||||||
|
self.strCrypt = strCrypt
|
||||||
|
self.efCrypt = efCrypt
|
||||||
|
|
||||||
|
def encrypt_object(self, obj: PdfObject) -> PdfObject:
|
||||||
|
# TODO
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
def decrypt_object(self, obj: PdfObject) -> PdfObject:
|
||||||
|
if isinstance(obj, (ByteStringObject, TextStringObject)):
|
||||||
|
data = self.strCrypt.decrypt(obj.original_bytes)
|
||||||
|
obj = create_string_object(data)
|
||||||
|
elif isinstance(obj, StreamObject):
|
||||||
|
obj._data = self.stmCrypt.decrypt(obj._data)
|
||||||
|
elif isinstance(obj, DictionaryObject):
|
||||||
|
for dictkey, value in list(obj.items()):
|
||||||
|
obj[dictkey] = self.decrypt_object(value)
|
||||||
|
elif isinstance(obj, ArrayObject):
|
||||||
|
for i in range(len(obj)):
|
||||||
|
obj[i] = self.decrypt_object(obj[i])
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
_PADDING = bytes(
|
||||||
|
[
|
||||||
|
0x28,
|
||||||
|
0xBF,
|
||||||
|
0x4E,
|
||||||
|
0x5E,
|
||||||
|
0x4E,
|
||||||
|
0x75,
|
||||||
|
0x8A,
|
||||||
|
0x41,
|
||||||
|
0x64,
|
||||||
|
0x00,
|
||||||
|
0x4E,
|
||||||
|
0x56,
|
||||||
|
0xFF,
|
||||||
|
0xFA,
|
||||||
|
0x01,
|
||||||
|
0x08,
|
||||||
|
0x2E,
|
||||||
|
0x2E,
|
||||||
|
0x00,
|
||||||
|
0xB6,
|
||||||
|
0xD0,
|
||||||
|
0x68,
|
||||||
|
0x3E,
|
||||||
|
0x80,
|
||||||
|
0x2F,
|
||||||
|
0x0C,
|
||||||
|
0xA9,
|
||||||
|
0xFE,
|
||||||
|
0x64,
|
||||||
|
0x53,
|
||||||
|
0x69,
|
||||||
|
0x7A,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _padding(data: bytes) -> bytes:
|
||||||
|
return (data + _PADDING)[:32]
|
||||||
|
|
||||||
|
|
||||||
|
class AlgV4:
|
||||||
|
@staticmethod
|
||||||
|
def compute_key(
|
||||||
|
password: bytes,
|
||||||
|
rev: int,
|
||||||
|
key_size: int,
|
||||||
|
o_entry: bytes,
|
||||||
|
P: int,
|
||||||
|
id1_entry: bytes,
|
||||||
|
metadata_encrypted: bool,
|
||||||
|
) -> bytes:
|
||||||
|
"""
|
||||||
|
Algorithm 2: Computing an encryption key.
|
||||||
|
|
||||||
|
a) Pad or truncate the password string to exactly 32 bytes. If the
|
||||||
|
password string is more than 32 bytes long,
|
||||||
|
use only its first 32 bytes; if it is less than 32 bytes long, pad it
|
||||||
|
by appending the required number of
|
||||||
|
additional bytes from the beginning of the following padding string:
|
||||||
|
< 28 BF 4E 5E 4E 75 8A 41 64 00 4E 56 FF FA 01 08
|
||||||
|
2E 2E 00 B6 D0 68 3E 80 2F 0C A9 FE 64 53 69 7A >
|
||||||
|
That is, if the password string is n bytes long, append
|
||||||
|
the first 32 - n bytes of the padding string to the end
|
||||||
|
of the password string. If the password string is empty (zero-length),
|
||||||
|
meaning there is no user password,
|
||||||
|
substitute the entire padding string in its place.
|
||||||
|
|
||||||
|
b) Initialize the MD5 hash function and pass the result of step (a)
|
||||||
|
as input to this function.
|
||||||
|
c) Pass the value of the encryption dictionary’s O entry to the
|
||||||
|
MD5 hash function. ("Algorithm 3: Computing
|
||||||
|
the encryption dictionary’s O (owner password) value" shows how the
|
||||||
|
O value is computed.)
|
||||||
|
d) Convert the integer value of the P entry to a 32-bit unsigned binary
|
||||||
|
number and pass these bytes to the
|
||||||
|
MD5 hash function, low-order byte first.
|
||||||
|
e) Pass the first element of the file’s file identifier array (the value
|
||||||
|
of the ID entry in the document’s trailer
|
||||||
|
dictionary; see Table 15) to the MD5 hash function.
|
||||||
|
f) (Security handlers of revision 4 or greater) If document metadata is
|
||||||
|
not being encrypted, pass 4 bytes with
|
||||||
|
the value 0xFFFFFFFF to the MD5 hash function.
|
||||||
|
g) Finish the hash.
|
||||||
|
h) (Security handlers of revision 3 or greater) Do the following
|
||||||
|
50 times: Take the output from the previous
|
||||||
|
MD5 hash and pass the first n bytes of the output as input into a new
|
||||||
|
MD5 hash, where n is the number of
|
||||||
|
bytes of the encryption key as defined by the value of the encryption
|
||||||
|
dictionary’s Length entry.
|
||||||
|
i) Set the encryption key to the first n bytes of the output from the
|
||||||
|
final MD5 hash, where n shall always be 5
|
||||||
|
for security handlers of revision 2 but, for security handlers of
|
||||||
|
revision 3 or greater, shall depend on the
|
||||||
|
value of the encryption dictionary’s Length entry.
|
||||||
|
"""
|
||||||
|
a = _padding(password)
|
||||||
|
u_hash = hashlib.md5(a)
|
||||||
|
u_hash.update(o_entry)
|
||||||
|
u_hash.update(struct.pack("<I", P))
|
||||||
|
u_hash.update(id1_entry)
|
||||||
|
if rev >= 4 and metadata_encrypted is False:
|
||||||
|
u_hash.update(b"\xff\xff\xff\xff")
|
||||||
|
u_hash_digest = u_hash.digest()
|
||||||
|
length = key_size // 8
|
||||||
|
if rev >= 3:
|
||||||
|
for _ in range(50):
|
||||||
|
u_hash_digest = hashlib.md5(u_hash_digest[:length]).digest()
|
||||||
|
return u_hash_digest[:length]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def compute_O_value_key(owner_password: bytes, rev: int, key_size: int) -> bytes:
|
||||||
|
"""
|
||||||
|
Algorithm 3: Computing the encryption dictionary’s O (owner password) value.
|
||||||
|
|
||||||
|
a) Pad or truncate the owner password string as described in step (a)
|
||||||
|
of "Algorithm 2: Computing an encryption key".
|
||||||
|
If there is no owner password, use the user password instead.
|
||||||
|
b) Initialize the MD5 hash function and pass the result of step (a) as
|
||||||
|
input to this function.
|
||||||
|
c) (Security handlers of revision 3 or greater) Do the following 50 times:
|
||||||
|
Take the output from the previous
|
||||||
|
MD5 hash and pass it as input into a new MD5 hash.
|
||||||
|
d) Create an RC4 encryption key using the first n bytes of the output
|
||||||
|
from the final MD5 hash, where n shall
|
||||||
|
always be 5 for security handlers of revision 2 but, for security
|
||||||
|
handlers of revision 3 or greater, shall
|
||||||
|
depend on the value of the encryption dictionary’s Length entry.
|
||||||
|
e) Pad or truncate the user password string as described in step (a) of
|
||||||
|
"Algorithm 2: Computing an encryption key".
|
||||||
|
f) Encrypt the result of step (e), using an RC4 encryption function with
|
||||||
|
the encryption key obtained in step (d).
|
||||||
|
g) (Security handlers of revision 3 or greater) Do the following 19 times:
|
||||||
|
Take the output from the previous
|
||||||
|
invocation of the RC4 function and pass it as input to a new
|
||||||
|
invocation of the function; use an encryption
|
||||||
|
key generated by taking each byte of the encryption key obtained in
|
||||||
|
step (d) and performing an XOR
|
||||||
|
(exclusive or) operation between that byte and the single-byte value
|
||||||
|
of the iteration counter (from 1 to 19).
|
||||||
|
h) Store the output from the final invocation of the RC4 function as
|
||||||
|
the value of the O entry in the encryption dictionary.
|
||||||
|
"""
|
||||||
|
a = _padding(owner_password)
|
||||||
|
o_hash_digest = hashlib.md5(a).digest()
|
||||||
|
|
||||||
|
if rev >= 3:
|
||||||
|
for _ in range(50):
|
||||||
|
o_hash_digest = hashlib.md5(o_hash_digest).digest()
|
||||||
|
|
||||||
|
rc4_key = o_hash_digest[: key_size // 8]
|
||||||
|
return rc4_key
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def compute_O_value(rc4_key: bytes, user_password: bytes, rev: int) -> bytes:
|
||||||
|
"""See :func:`compute_O_value_key`."""
|
||||||
|
a = _padding(user_password)
|
||||||
|
rc4_enc = RC4_encrypt(rc4_key, a)
|
||||||
|
if rev >= 3:
|
||||||
|
for i in range(1, 20):
|
||||||
|
key = bytes(bytearray(x ^ i for x in rc4_key))
|
||||||
|
rc4_enc = RC4_encrypt(key, rc4_enc)
|
||||||
|
return rc4_enc
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def compute_U_value(key: bytes, rev: int, id1_entry: bytes) -> bytes:
|
||||||
|
"""
|
||||||
|
Algorithm 4: Computing the encryption dictionary’s U (user password) value.
|
||||||
|
|
||||||
|
(Security handlers of revision 2)
|
||||||
|
|
||||||
|
a) Create an encryption key based on the user password string, as
|
||||||
|
described in "Algorithm 2: Computing an encryption key".
|
||||||
|
b) Encrypt the 32-byte padding string shown in step (a) of
|
||||||
|
"Algorithm 2: Computing an encryption key", using an RC4 encryption
|
||||||
|
function with the encryption key from the preceding step.
|
||||||
|
c) Store the result of step (b) as the value of the U entry in the
|
||||||
|
encryption dictionary.
|
||||||
|
"""
|
||||||
|
if rev <= 2:
|
||||||
|
value = RC4_encrypt(key, _PADDING)
|
||||||
|
return value
|
||||||
|
|
||||||
|
"""
|
||||||
|
Algorithm 5: Computing the encryption dictionary’s U (user password) value.
|
||||||
|
|
||||||
|
(Security handlers of revision 3 or greater)
|
||||||
|
|
||||||
|
a) Create an encryption key based on the user password string, as
|
||||||
|
described in "Algorithm 2: Computing an encryption key".
|
||||||
|
b) Initialize the MD5 hash function and pass the 32-byte padding string
|
||||||
|
shown in step (a) of "Algorithm 2:
|
||||||
|
Computing an encryption key" as input to this function.
|
||||||
|
c) Pass the first element of the file’s file identifier array (the value
|
||||||
|
of the ID entry in the document’s trailer
|
||||||
|
dictionary; see Table 15) to the hash function and finish the hash.
|
||||||
|
d) Encrypt the 16-byte result of the hash, using an RC4 encryption
|
||||||
|
function with the encryption key from step (a).
|
||||||
|
e) Do the following 19 times: Take the output from the previous
|
||||||
|
invocation of the RC4 function and pass it as input to a new
|
||||||
|
invocation of the function; use an encryption key generated by
|
||||||
|
taking each byte of the original encryption key obtained in
|
||||||
|
step (a) and performing an XOR (exclusive or) operation between that
|
||||||
|
byte and the single-byte value of the iteration counter (from 1 to 19).
|
||||||
|
f) Append 16 bytes of arbitrary padding to the output from the final
|
||||||
|
invocation of the RC4 function and store the 32-byte result as the
|
||||||
|
value of the U entry in the encryption dictionary.
|
||||||
|
"""
|
||||||
|
u_hash = hashlib.md5(_PADDING)
|
||||||
|
u_hash.update(id1_entry)
|
||||||
|
rc4_enc = RC4_encrypt(key, u_hash.digest())
|
||||||
|
for i in range(1, 20):
|
||||||
|
rc4_key = bytes(bytearray(x ^ i for x in key))
|
||||||
|
rc4_enc = RC4_encrypt(rc4_key, rc4_enc)
|
||||||
|
return _padding(rc4_enc)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def verify_user_password(
|
||||||
|
user_password: bytes,
|
||||||
|
rev: int,
|
||||||
|
key_size: int,
|
||||||
|
o_entry: bytes,
|
||||||
|
u_entry: bytes,
|
||||||
|
P: int,
|
||||||
|
id1_entry: bytes,
|
||||||
|
metadata_encrypted: bool,
|
||||||
|
) -> bytes:
|
||||||
|
"""
|
||||||
|
Algorithm 6: Authenticating the user password.
|
||||||
|
|
||||||
|
a) Perform all but the last step of "Algorithm 4: Computing the encryption dictionary’s U (user password)
|
||||||
|
value (Security handlers of revision 2)" or "Algorithm 5: Computing the encryption dictionary’s U (user
|
||||||
|
password) value (Security handlers of revision 3 or greater)" using the supplied password string.
|
||||||
|
b) If the result of step (a) is equal to the value of the encryption dictionary’s U entry (comparing on the first 16
|
||||||
|
bytes in the case of security handlers of revision 3 or greater), the password supplied is the correct user
|
||||||
|
password. The key obtained in step (a) (that is, in the first step of "Algorithm 4: Computing the encryption
|
||||||
|
dictionary’s U (user password) value (Security handlers of revision 2)" or "Algorithm 5: Computing the
|
||||||
|
encryption dictionary’s U (user password) value (Security handlers of revision 3 or greater)") shall be used
|
||||||
|
to decrypt the document.
|
||||||
|
"""
|
||||||
|
key = AlgV4.compute_key(
|
||||||
|
user_password, rev, key_size, o_entry, P, id1_entry, metadata_encrypted
|
||||||
|
)
|
||||||
|
u_value = AlgV4.compute_U_value(key, rev, id1_entry)
|
||||||
|
if rev >= 3:
|
||||||
|
u_value = u_value[:16]
|
||||||
|
u_entry = u_entry[:16]
|
||||||
|
if u_value != u_entry:
|
||||||
|
key = b""
|
||||||
|
return key
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def verify_owner_password(
|
||||||
|
owner_password: bytes,
|
||||||
|
rev: int,
|
||||||
|
key_size: int,
|
||||||
|
o_entry: bytes,
|
||||||
|
u_entry: bytes,
|
||||||
|
P: int,
|
||||||
|
id1_entry: bytes,
|
||||||
|
metadata_encrypted: bool,
|
||||||
|
) -> bytes:
|
||||||
|
"""
|
||||||
|
Algorithm 7: Authenticating the owner password.
|
||||||
|
|
||||||
|
a) Compute an encryption key from the supplied password string, as described in steps (a) to (d) of
|
||||||
|
"Algorithm 3: Computing the encryption dictionary’s O (owner password) value".
|
||||||
|
b) (Security handlers of revision 2 only) Decrypt the value of the encryption dictionary’s O entry, using an RC4
|
||||||
|
encryption function with the encryption key computed in step (a).
|
||||||
|
(Security handlers of revision 3 or greater) Do the following 20 times: Decrypt the value of the encryption
|
||||||
|
dictionary’s O entry (first iteration) or the output from the previous iteration (all subsequent iterations),
|
||||||
|
using an RC4 encryption function with a different encryption key at each iteration. The key shall be
|
||||||
|
generated by taking the original key (obtained in step (a)) and performing an XOR (exclusive or) operation
|
||||||
|
between each byte of the key and the single-byte value of the iteration counter (from 19 to 0).
|
||||||
|
c) The result of step (b) purports to be the user password. Authenticate this user password using "Algorithm 6:
|
||||||
|
Authenticating the user password". If it is correct, the password supplied is the correct owner password.
|
||||||
|
"""
|
||||||
|
rc4_key = AlgV4.compute_O_value_key(owner_password, rev, key_size)
|
||||||
|
|
||||||
|
if rev <= 2:
|
||||||
|
user_password = RC4_decrypt(rc4_key, o_entry)
|
||||||
|
else:
|
||||||
|
user_password = o_entry
|
||||||
|
for i in range(19, -1, -1):
|
||||||
|
key = bytes(bytearray(x ^ i for x in rc4_key))
|
||||||
|
user_password = RC4_decrypt(key, user_password)
|
||||||
|
return AlgV4.verify_user_password(
|
||||||
|
user_password,
|
||||||
|
rev,
|
||||||
|
key_size,
|
||||||
|
o_entry,
|
||||||
|
u_entry,
|
||||||
|
P,
|
||||||
|
id1_entry,
|
||||||
|
metadata_encrypted,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AlgV5:
|
||||||
|
@staticmethod
|
||||||
|
def verify_owner_password(
|
||||||
|
R: int, password: bytes, o_value: bytes, oe_value: bytes, u_value: bytes
|
||||||
|
) -> bytes:
|
||||||
|
"""
|
||||||
|
Algorithm 3.2a Computing an encryption key.
|
||||||
|
|
||||||
|
To understand the algorithm below, it is necessary to treat the O and U strings in the Encrypt dictionary
|
||||||
|
as made up of three sections. The first 32 bytes are a hash value (explained below). The next 8 bytes are
|
||||||
|
called the Validation Salt. The final 8 bytes are called the Key Salt.
|
||||||
|
|
||||||
|
1. The password string is generated from Unicode input by processing the input string with the SASLprep
|
||||||
|
(IETF RFC 4013) profile of stringprep (IETF RFC 3454), and then converting to a UTF-8 representation.
|
||||||
|
2. Truncate the UTF-8 representation to 127 bytes if it is longer than 127 bytes.
|
||||||
|
3. Test the password against the owner key by computing the SHA-256 hash of the UTF-8 password
|
||||||
|
concatenated with the 8 bytes of owner Validation Salt, concatenated with the 48-byte U string. If the
|
||||||
|
32-byte result matches the first 32 bytes of the O string, this is the owner password.
|
||||||
|
Compute an intermediate owner key by computing the SHA-256 hash of the UTF-8 password
|
||||||
|
concatenated with the 8 bytes of owner Key Salt, concatenated with the 48-byte U string. The 32-byte
|
||||||
|
result is the key used to decrypt the 32-byte OE string using AES-256 in CBC mode with no padding and
|
||||||
|
an initialization vector of zero. The 32-byte result is the file encryption key.
|
||||||
|
4. Test the password against the user key by computing the SHA-256 hash of the UTF-8 password
|
||||||
|
concatenated with the 8 bytes of user Validation Salt. If the 32 byte result matches the first 32 bytes of
|
||||||
|
the U string, this is the user password.
|
||||||
|
Compute an intermediate user key by computing the SHA-256 hash of the UTF-8 password
|
||||||
|
concatenated with the 8 bytes of user Key Salt. The 32-byte result is the key used to decrypt the 32-byte
|
||||||
|
UE string using AES-256 in CBC mode with no padding and an initialization vector of zero. The 32-byte
|
||||||
|
result is the file encryption key.
|
||||||
|
5. Decrypt the 16-byte Perms string using AES-256 in ECB mode with an initialization vector of zero and
|
||||||
|
the file encryption key as the key. Verify that bytes 9-11 of the result are the characters ‘a’, ‘d’, ‘b’. Bytes
|
||||||
|
0-3 of the decrypted Perms entry, treated as a little-endian integer, are the user permissions. They
|
||||||
|
should match the value in the P key.
|
||||||
|
"""
|
||||||
|
password = password[:127]
|
||||||
|
if (
|
||||||
|
AlgV5.calculate_hash(R, password, o_value[32:40], u_value[:48])
|
||||||
|
!= o_value[:32]
|
||||||
|
):
|
||||||
|
return b""
|
||||||
|
iv = bytes(0 for _ in range(16))
|
||||||
|
tmp_key = AlgV5.calculate_hash(R, password, o_value[40:48], u_value[:48])
|
||||||
|
key = AES_CBC_decrypt(tmp_key, iv, oe_value)
|
||||||
|
return key
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def verify_user_password(
|
||||||
|
R: int, password: bytes, u_value: bytes, ue_value: bytes
|
||||||
|
) -> bytes:
|
||||||
|
"""See :func:`verify_owner_password`."""
|
||||||
|
password = password[:127]
|
||||||
|
if AlgV5.calculate_hash(R, password, u_value[32:40], b"") != u_value[:32]:
|
||||||
|
return b""
|
||||||
|
iv = bytes(0 for _ in range(16))
|
||||||
|
tmp_key = AlgV5.calculate_hash(R, password, u_value[40:48], b"")
|
||||||
|
return AES_CBC_decrypt(tmp_key, iv, ue_value)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def calculate_hash(R: int, password: bytes, salt: bytes, udata: bytes) -> bytes:
|
||||||
|
# from https://github.com/qpdf/qpdf/blob/main/libqpdf/QPDF_encryption.cc
|
||||||
|
K = hashlib.sha256(password + salt + udata).digest()
|
||||||
|
if R < 6:
|
||||||
|
return K
|
||||||
|
count = 0
|
||||||
|
while True:
|
||||||
|
count += 1
|
||||||
|
K1 = password + K + udata
|
||||||
|
E = AES_CBC_encrypt(K[:16], K[16:32], K1 * 64)
|
||||||
|
hash_fn = (
|
||||||
|
hashlib.sha256,
|
||||||
|
hashlib.sha384,
|
||||||
|
hashlib.sha512,
|
||||||
|
)[sum(E[:16]) % 3]
|
||||||
|
K = hash_fn(E).digest()
|
||||||
|
if count >= 64 and E[-1] <= count - 32:
|
||||||
|
break
|
||||||
|
return K[:32]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def verify_perms(
|
||||||
|
key: bytes, perms: bytes, p: int, metadata_encrypted: bool
|
||||||
|
) -> bool:
|
||||||
|
"""See :func:`verify_owner_password` and :func:`compute_Perms_value`."""
|
||||||
|
b8 = b"T" if metadata_encrypted else b"F"
|
||||||
|
p1 = struct.pack("<I", p) + b"\xff\xff\xff\xff" + b8 + b"adb"
|
||||||
|
p2 = AES_ECB_decrypt(key, perms)
|
||||||
|
return p1 == p2[:12]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def generate_values(
|
||||||
|
user_password: bytes,
|
||||||
|
owner_password: bytes,
|
||||||
|
key: bytes,
|
||||||
|
p: int,
|
||||||
|
metadata_encrypted: bool,
|
||||||
|
) -> Dict[Any, Any]:
|
||||||
|
u_value, ue_value = AlgV5.compute_U_value(user_password, key)
|
||||||
|
o_value, oe_value = AlgV5.compute_O_value(owner_password, key, u_value)
|
||||||
|
perms = AlgV5.compute_Perms_value(key, p, metadata_encrypted)
|
||||||
|
return {
|
||||||
|
"/U": u_value,
|
||||||
|
"/UE": ue_value,
|
||||||
|
"/O": o_value,
|
||||||
|
"/OE": oe_value,
|
||||||
|
"/Perms": perms,
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def compute_U_value(password: bytes, key: bytes) -> Tuple[bytes, bytes]:
|
||||||
|
"""
|
||||||
|
Algorithm 3.8 Computing the encryption dictionary’s U (user password) and UE (user encryption key) values
|
||||||
|
|
||||||
|
1. Generate 16 random bytes of data using a strong random number generator. The first 8 bytes are the
|
||||||
|
User Validation Salt. The second 8 bytes are the User Key Salt. Compute the 32-byte SHA-256 hash of
|
||||||
|
the password concatenated with the User Validation Salt. The 48-byte string consisting of the 32-byte
|
||||||
|
hash followed by the User Validation Salt followed by the User Key Salt is stored as the U key.
|
||||||
|
2. Compute the 32-byte SHA-256 hash of the password concatenated with the User Key Salt. Using this
|
||||||
|
hash as the key, encrypt the file encryption key using AES-256 in CBC mode with no padding and an
|
||||||
|
initialization vector of zero. The resulting 32-byte string is stored as the UE key.
|
||||||
|
"""
|
||||||
|
random_bytes = bytes(random.randrange(0, 256) for _ in range(16))
|
||||||
|
val_salt = random_bytes[:8]
|
||||||
|
key_salt = random_bytes[8:]
|
||||||
|
u_value = hashlib.sha256(password + val_salt).digest() + val_salt + key_salt
|
||||||
|
|
||||||
|
tmp_key = hashlib.sha256(password + key_salt).digest()
|
||||||
|
iv = bytes(0 for _ in range(16))
|
||||||
|
ue_value = AES_CBC_encrypt(tmp_key, iv, key)
|
||||||
|
return u_value, ue_value
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def compute_O_value(
|
||||||
|
password: bytes, key: bytes, u_value: bytes
|
||||||
|
) -> Tuple[bytes, bytes]:
|
||||||
|
"""
|
||||||
|
Algorithm 3.9 Computing the encryption dictionary’s O (owner password) and OE (owner encryption key) values.
|
||||||
|
|
||||||
|
1. Generate 16 random bytes of data using a strong random number generator. The first 8 bytes are the
|
||||||
|
Owner Validation Salt. The second 8 bytes are the Owner Key Salt. Compute the 32-byte SHA-256 hash
|
||||||
|
of the password concatenated with the Owner Validation Salt and then concatenated with the 48-byte
|
||||||
|
U string as generated in Algorithm 3.8. The 48-byte string consisting of the 32-byte hash followed by
|
||||||
|
the Owner Validation Salt followed by the Owner Key Salt is stored as the O key.
|
||||||
|
2. Compute the 32-byte SHA-256 hash of the password concatenated with the Owner Key Salt and then
|
||||||
|
concatenated with the 48-byte U string as generated in Algorithm 3.8. Using this hash as the key,
|
||||||
|
encrypt the file encryption key using AES-256 in CBC mode with no padding and an initialization vector
|
||||||
|
of zero. The resulting 32-byte string is stored as the OE key.
|
||||||
|
"""
|
||||||
|
random_bytes = bytes(random.randrange(0, 256) for _ in range(16))
|
||||||
|
val_salt = random_bytes[:8]
|
||||||
|
key_salt = random_bytes[8:]
|
||||||
|
o_value = (
|
||||||
|
hashlib.sha256(password + val_salt + u_value).digest() + val_salt + key_salt
|
||||||
|
)
|
||||||
|
|
||||||
|
tmp_key = hashlib.sha256(password + key_salt + u_value).digest()
|
||||||
|
iv = bytes(0 for _ in range(16))
|
||||||
|
oe_value = AES_CBC_encrypt(tmp_key, iv, key)
|
||||||
|
return o_value, oe_value
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def compute_Perms_value(key: bytes, p: int, metadata_encrypted: bool) -> bytes:
|
||||||
|
"""
|
||||||
|
Algorithm 3.10 Computing the encryption dictionary’s Perms (permissions) value
|
||||||
|
|
||||||
|
1. Extend the permissions (contents of the P integer) to 64 bits by setting the upper 32 bits to all 1’s. (This
|
||||||
|
allows for future extension without changing the format.)
|
||||||
|
2. Record the 8 bytes of permission in the bytes 0-7 of the block, low order byte first.
|
||||||
|
3. Set byte 8 to the ASCII value ' T ' or ' F ' according to the EncryptMetadata Boolean.
|
||||||
|
4. Set bytes 9-11 to the ASCII characters ' a ', ' d ', ' b '.
|
||||||
|
5. Set bytes 12-15 to 4 bytes of random data, which will be ignored.
|
||||||
|
6. Encrypt the 16-byte block using AES-256 in ECB mode with an initialization vector of zero, using the file
|
||||||
|
encryption key as the key. The result (16 bytes) is stored as the Perms string, and checked for validity
|
||||||
|
when the file is opened.
|
||||||
|
"""
|
||||||
|
b8 = b"T" if metadata_encrypted else b"F"
|
||||||
|
rr = bytes(random.randrange(0, 256) for _ in range(4))
|
||||||
|
data = struct.pack("<I", p) + b"\xff\xff\xff\xff" + b8 + b"adb" + rr
|
||||||
|
perms = AES_ECB_encrypt(key, data)
|
||||||
|
return perms
|
||||||
|
|
||||||
|
|
||||||
|
class PasswordType(IntEnum):
|
||||||
|
NOT_DECRYPTED = 0
|
||||||
|
USER_PASSWORD = 1
|
||||||
|
OWNER_PASSWORD = 2
|
||||||
|
|
||||||
|
|
||||||
|
class Encryption:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
algV: int,
|
||||||
|
algR: int,
|
||||||
|
entry: DictionaryObject,
|
||||||
|
first_id_entry: bytes,
|
||||||
|
StmF: str,
|
||||||
|
StrF: str,
|
||||||
|
EFF: str,
|
||||||
|
) -> None:
|
||||||
|
# See TABLE 3.18 Entries common to all encryption dictionaries
|
||||||
|
self.algV = algV
|
||||||
|
self.algR = algR
|
||||||
|
self.entry = entry
|
||||||
|
self.key_size = entry.get("/Length", 40)
|
||||||
|
self.id1_entry = first_id_entry
|
||||||
|
self.StmF = StmF
|
||||||
|
self.StrF = StrF
|
||||||
|
self.EFF = EFF
|
||||||
|
|
||||||
|
# 1 => owner password
|
||||||
|
# 2 => user password
|
||||||
|
self._password_type = PasswordType.NOT_DECRYPTED
|
||||||
|
self._key: Optional[bytes] = None
|
||||||
|
|
||||||
|
def is_decrypted(self) -> bool:
|
||||||
|
return self._password_type != PasswordType.NOT_DECRYPTED
|
||||||
|
|
||||||
|
def decrypt_object(self, obj: PdfObject, idnum: int, generation: int) -> PdfObject:
|
||||||
|
"""
|
||||||
|
Algorithm 1: Encryption of data using the RC4 or AES algorithms.
|
||||||
|
|
||||||
|
a) Obtain the object number and generation number from the object identifier of the string or stream to be
|
||||||
|
encrypted (see 7.3.10, "Indirect Objects"). If the string is a direct object, use the identifier of the indirect
|
||||||
|
object containing it.
|
||||||
|
b) For all strings and streams without crypt filter specifier; treating the object number and generation number
|
||||||
|
as binary integers, extend the original n-byte encryption key to n + 5 bytes by appending the low-order 3
|
||||||
|
bytes of the object number and the low-order 2 bytes of the generation number in that order, low-order byte
|
||||||
|
first. (n is 5 unless the value of V in the encryption dictionary is greater than 1, in which case n is the value
|
||||||
|
of Length divided by 8.)
|
||||||
|
If using the AES algorithm, extend the encryption key an additional 4 bytes by adding the value “sAlT”,
|
||||||
|
which corresponds to the hexadecimal values 0x73, 0x41, 0x6C, 0x54. (This addition is done for backward
|
||||||
|
compatibility and is not intended to provide additional security.)
|
||||||
|
c) Initialize the MD5 hash function and pass the result of step (b) as input to this function.
|
||||||
|
d) Use the first (n + 5) bytes, up to a maximum of 16, of the output from the MD5 hash as the key for the RC4
|
||||||
|
or AES symmetric key algorithms, along with the string or stream data to be encrypted.
|
||||||
|
If using the AES algorithm, the Cipher Block Chaining (CBC) mode, which requires an initialization vector,
|
||||||
|
is used. The block size parameter is set to 16 bytes, and the initialization vector is a 16-byte random
|
||||||
|
number that is stored as the first 16 bytes of the encrypted stream or string.
|
||||||
|
|
||||||
|
Algorithm 3.1a Encryption of data using the AES algorithm
|
||||||
|
1. Use the 32-byte file encryption key for the AES-256 symmetric key algorithm, along with the string or
|
||||||
|
stream data to be encrypted.
|
||||||
|
Use the AES algorithm in Cipher Block Chaining (CBC) mode, which requires an initialization vector. The
|
||||||
|
block size parameter is set to 16 bytes, and the initialization vector is a 16-byte random number that is
|
||||||
|
stored as the first 16 bytes of the encrypted stream or string.
|
||||||
|
The output is the encrypted data to be stored in the PDF file.
|
||||||
|
"""
|
||||||
|
pack1 = struct.pack("<i", idnum)[:3]
|
||||||
|
pack2 = struct.pack("<i", generation)[:2]
|
||||||
|
|
||||||
|
assert self._key
|
||||||
|
key = self._key
|
||||||
|
n = 5 if self.algV == 1 else self.key_size // 8
|
||||||
|
key_data = key[:n] + pack1 + pack2
|
||||||
|
key_hash = hashlib.md5(key_data)
|
||||||
|
rc4_key = key_hash.digest()[: min(n + 5, 16)]
|
||||||
|
# for AES-128
|
||||||
|
key_hash.update(b"sAlT")
|
||||||
|
aes128_key = key_hash.digest()[: min(n + 5, 16)]
|
||||||
|
|
||||||
|
# for AES-256
|
||||||
|
aes256_key = key
|
||||||
|
|
||||||
|
stmCrypt = self._get_crypt(self.StmF, rc4_key, aes128_key, aes256_key)
|
||||||
|
StrCrypt = self._get_crypt(self.StrF, rc4_key, aes128_key, aes256_key)
|
||||||
|
efCrypt = self._get_crypt(self.EFF, rc4_key, aes128_key, aes256_key)
|
||||||
|
|
||||||
|
cf = CryptFilter(stmCrypt, StrCrypt, efCrypt)
|
||||||
|
return cf.decrypt_object(obj)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_crypt(
|
||||||
|
method: str, rc4_key: bytes, aes128_key: bytes, aes256_key: bytes
|
||||||
|
) -> CryptBase:
|
||||||
|
if method == "/AESV3":
|
||||||
|
return CryptAES(aes256_key)
|
||||||
|
if method == "/AESV2":
|
||||||
|
return CryptAES(aes128_key)
|
||||||
|
elif method == "/Identity":
|
||||||
|
return CryptIdentity()
|
||||||
|
else:
|
||||||
|
return CryptRC4(rc4_key)
|
||||||
|
|
||||||
|
def verify(self, password: Union[bytes, str]) -> PasswordType:
|
||||||
|
if isinstance(password, str):
|
||||||
|
try:
|
||||||
|
pwd = password.encode("latin-1")
|
||||||
|
except Exception: # noqa
|
||||||
|
pwd = password.encode("utf-8")
|
||||||
|
else:
|
||||||
|
pwd = password
|
||||||
|
|
||||||
|
key, rc = self.verify_v4(pwd) if self.algV <= 4 else self.verify_v5(pwd)
|
||||||
|
if rc != PasswordType.NOT_DECRYPTED:
|
||||||
|
self._password_type = rc
|
||||||
|
self._key = key
|
||||||
|
return rc
|
||||||
|
|
||||||
|
def verify_v4(self, password: bytes) -> Tuple[bytes, PasswordType]:
|
||||||
|
R = cast(int, self.entry["/R"])
|
||||||
|
P = cast(int, self.entry["/P"])
|
||||||
|
P = (P + 0x100000000) % 0x100000000 # maybe < 0
|
||||||
|
# make type(metadata_encrypted) == bool
|
||||||
|
em = self.entry.get("/EncryptMetadata")
|
||||||
|
metadata_encrypted = em.value if em else True
|
||||||
|
o_entry = cast(ByteStringObject, self.entry["/O"].get_object()).original_bytes
|
||||||
|
u_entry = cast(ByteStringObject, self.entry["/U"].get_object()).original_bytes
|
||||||
|
|
||||||
|
# verify owner password first
|
||||||
|
key = AlgV4.verify_owner_password(
|
||||||
|
password,
|
||||||
|
R,
|
||||||
|
self.key_size,
|
||||||
|
o_entry,
|
||||||
|
u_entry,
|
||||||
|
P,
|
||||||
|
self.id1_entry,
|
||||||
|
metadata_encrypted,
|
||||||
|
)
|
||||||
|
if key:
|
||||||
|
return key, PasswordType.OWNER_PASSWORD
|
||||||
|
key = AlgV4.verify_user_password(
|
||||||
|
password,
|
||||||
|
R,
|
||||||
|
self.key_size,
|
||||||
|
o_entry,
|
||||||
|
u_entry,
|
||||||
|
P,
|
||||||
|
self.id1_entry,
|
||||||
|
metadata_encrypted,
|
||||||
|
)
|
||||||
|
if key:
|
||||||
|
return key, PasswordType.USER_PASSWORD
|
||||||
|
return b"", PasswordType.NOT_DECRYPTED
|
||||||
|
|
||||||
|
def verify_v5(self, password: bytes) -> Tuple[bytes, PasswordType]:
|
||||||
|
# TODO: use SASLprep process
|
||||||
|
o_entry = cast(ByteStringObject, self.entry["/O"].get_object()).original_bytes
|
||||||
|
u_entry = cast(ByteStringObject, self.entry["/U"].get_object()).original_bytes
|
||||||
|
oe_entry = cast(ByteStringObject, self.entry["/OE"].get_object()).original_bytes
|
||||||
|
ue_entry = cast(ByteStringObject, self.entry["/UE"].get_object()).original_bytes
|
||||||
|
|
||||||
|
# verify owner password first
|
||||||
|
key = AlgV5.verify_owner_password(
|
||||||
|
self.algR, password, o_entry, oe_entry, u_entry
|
||||||
|
)
|
||||||
|
rc = PasswordType.OWNER_PASSWORD
|
||||||
|
if not key:
|
||||||
|
key = AlgV5.verify_user_password(self.algR, password, u_entry, ue_entry)
|
||||||
|
rc = PasswordType.USER_PASSWORD
|
||||||
|
if not key:
|
||||||
|
return b"", PasswordType.NOT_DECRYPTED
|
||||||
|
|
||||||
|
# verify Perms
|
||||||
|
perms = cast(ByteStringObject, self.entry["/Perms"].get_object()).original_bytes
|
||||||
|
P = cast(int, self.entry["/P"])
|
||||||
|
P = (P + 0x100000000) % 0x100000000 # maybe < 0
|
||||||
|
metadata_encrypted = self.entry.get("/EncryptMetadata", True)
|
||||||
|
if not AlgV5.verify_perms(key, perms, P, metadata_encrypted):
|
||||||
|
logger_warning("ignore '/Perms' verify failed", __name__)
|
||||||
|
return key, rc
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def read(encryption_entry: DictionaryObject, first_id_entry: bytes) -> "Encryption":
|
||||||
|
filter = encryption_entry.get("/Filter")
|
||||||
|
if filter != "/Standard":
|
||||||
|
raise NotImplementedError(
|
||||||
|
"only Standard PDF encryption handler is available"
|
||||||
|
)
|
||||||
|
if "/SubFilter" in encryption_entry:
|
||||||
|
raise NotImplementedError("/SubFilter NOT supported")
|
||||||
|
|
||||||
|
StmF = "/V2"
|
||||||
|
StrF = "/V2"
|
||||||
|
EFF = "/V2"
|
||||||
|
|
||||||
|
V = encryption_entry.get("/V", 0)
|
||||||
|
if V not in (1, 2, 3, 4, 5):
|
||||||
|
raise NotImplementedError(f"Encryption V={V} NOT supported")
|
||||||
|
if V >= 4:
|
||||||
|
filters = encryption_entry["/CF"]
|
||||||
|
|
||||||
|
StmF = encryption_entry.get("/StmF", "/Identity")
|
||||||
|
StrF = encryption_entry.get("/StrF", "/Identity")
|
||||||
|
EFF = encryption_entry.get("/EFF", StmF)
|
||||||
|
|
||||||
|
if StmF != "/Identity":
|
||||||
|
StmF = filters[StmF]["/CFM"] # type: ignore
|
||||||
|
if StrF != "/Identity":
|
||||||
|
StrF = filters[StrF]["/CFM"] # type: ignore
|
||||||
|
if EFF != "/Identity":
|
||||||
|
EFF = filters[EFF]["/CFM"] # type: ignore
|
||||||
|
|
||||||
|
allowed_methods = ("/Identity", "/V2", "/AESV2", "/AESV3")
|
||||||
|
if StmF not in allowed_methods:
|
||||||
|
raise NotImplementedError("StmF Method {StmF} NOT supported!")
|
||||||
|
if StrF not in allowed_methods:
|
||||||
|
raise NotImplementedError(f"StrF Method {StrF} NOT supported!")
|
||||||
|
if EFF not in allowed_methods:
|
||||||
|
raise NotImplementedError(f"EFF Method {EFF} NOT supported!")
|
||||||
|
|
||||||
|
R = cast(int, encryption_entry["/R"])
|
||||||
|
return Encryption(V, R, encryption_entry, first_id_entry, StmF, StrF, EFF)
|
||||||
|
|
@ -0,0 +1,821 @@
|
||||||
|
# Copyright (c) 2006, Mathieu Fenniak
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
# * The name of the author may not be used to endorse or promote products
|
||||||
|
# derived from this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
# POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
from io import BytesIO, FileIO, IOBase
|
||||||
|
from pathlib import Path
|
||||||
|
from types import TracebackType
|
||||||
|
from typing import (
|
||||||
|
Any,
|
||||||
|
Dict,
|
||||||
|
Iterable,
|
||||||
|
List,
|
||||||
|
Optional,
|
||||||
|
Tuple,
|
||||||
|
Type,
|
||||||
|
Union,
|
||||||
|
cast,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ._encryption import Encryption
|
||||||
|
from ._page import PageObject
|
||||||
|
from ._reader import PdfReader
|
||||||
|
from ._utils import (
|
||||||
|
StrByteType,
|
||||||
|
deprecation_bookmark,
|
||||||
|
deprecation_with_replacement,
|
||||||
|
str_,
|
||||||
|
)
|
||||||
|
from ._writer import PdfWriter
|
||||||
|
from .constants import GoToActionArguments
|
||||||
|
from .constants import PagesAttributes as PA
|
||||||
|
from .constants import TypArguments, TypFitArguments
|
||||||
|
from .generic import (
|
||||||
|
PAGE_FIT,
|
||||||
|
ArrayObject,
|
||||||
|
Destination,
|
||||||
|
DictionaryObject,
|
||||||
|
Fit,
|
||||||
|
FloatObject,
|
||||||
|
IndirectObject,
|
||||||
|
NameObject,
|
||||||
|
NullObject,
|
||||||
|
NumberObject,
|
||||||
|
OutlineItem,
|
||||||
|
TextStringObject,
|
||||||
|
TreeObject,
|
||||||
|
)
|
||||||
|
from .pagerange import PageRange, PageRangeSpec
|
||||||
|
from .types import FitType, LayoutType, OutlineType, PagemodeType, ZoomArgType
|
||||||
|
|
||||||
|
ERR_CLOSED_WRITER = "close() was called and thus the writer cannot be used anymore"
|
||||||
|
|
||||||
|
|
||||||
|
class _MergedPage:
|
||||||
|
"""Collect necessary information on each page that is being merged."""
|
||||||
|
|
||||||
|
def __init__(self, pagedata: PageObject, src: PdfReader, id: int) -> None:
|
||||||
|
self.src = src
|
||||||
|
self.pagedata = pagedata
|
||||||
|
self.out_pagedata = None
|
||||||
|
self.id = id
|
||||||
|
|
||||||
|
|
||||||
|
class PdfMerger:
|
||||||
|
"""
|
||||||
|
Initialize a ``PdfMerger`` object.
|
||||||
|
|
||||||
|
``PdfMerger`` merges multiple PDFs into a single PDF.
|
||||||
|
It can concatenate, slice, insert, or any combination of the above.
|
||||||
|
|
||||||
|
See the functions :meth:`merge()<merge>` (or :meth:`append()<append>`)
|
||||||
|
and :meth:`write()<write>` for usage information.
|
||||||
|
|
||||||
|
:param bool strict: Determines whether user should be warned of all
|
||||||
|
problems and also causes some correctable problems to be fatal.
|
||||||
|
Defaults to ``False``.
|
||||||
|
:param fileobj: Output file. Can be a filename or any kind of
|
||||||
|
file-like object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@deprecation_bookmark(bookmarks="outline")
|
||||||
|
def __init__(
|
||||||
|
self, strict: bool = False, fileobj: Union[Path, StrByteType] = ""
|
||||||
|
) -> None:
|
||||||
|
self.inputs: List[Tuple[Any, PdfReader]] = []
|
||||||
|
self.pages: List[Any] = []
|
||||||
|
self.output: Optional[PdfWriter] = PdfWriter()
|
||||||
|
self.outline: OutlineType = []
|
||||||
|
self.named_dests: List[Any] = []
|
||||||
|
self.id_count = 0
|
||||||
|
self.fileobj = fileobj
|
||||||
|
self.strict = strict
|
||||||
|
|
||||||
|
def __enter__(self) -> "PdfMerger":
|
||||||
|
# There is nothing to do.
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(
|
||||||
|
self,
|
||||||
|
exc_type: Optional[Type[BaseException]],
|
||||||
|
exc: Optional[BaseException],
|
||||||
|
traceback: Optional[TracebackType],
|
||||||
|
) -> None:
|
||||||
|
"""Write to the fileobj and close the merger."""
|
||||||
|
if self.fileobj:
|
||||||
|
self.write(self.fileobj)
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
@deprecation_bookmark(bookmark="outline_item", import_bookmarks="import_outline")
|
||||||
|
def merge(
|
||||||
|
self,
|
||||||
|
page_number: Optional[int] = None,
|
||||||
|
fileobj: Union[Path, StrByteType, PdfReader] = None,
|
||||||
|
outline_item: Optional[str] = None,
|
||||||
|
pages: Optional[PageRangeSpec] = None,
|
||||||
|
import_outline: bool = True,
|
||||||
|
position: Optional[int] = None, # deprecated
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Merge the pages from the given file into the output file at the
|
||||||
|
specified page number.
|
||||||
|
|
||||||
|
:param int page_number: The *page number* to insert this file. File will
|
||||||
|
be inserted after the given number.
|
||||||
|
|
||||||
|
:param fileobj: A File Object or an object that supports the standard
|
||||||
|
read and seek methods similar to a File Object. Could also be a
|
||||||
|
string representing a path to a PDF file.
|
||||||
|
|
||||||
|
:param str outline_item: Optionally, you may specify an outline item
|
||||||
|
(previously referred to as a 'bookmark') to be applied at the
|
||||||
|
beginning of the included file by supplying the text of the outline item.
|
||||||
|
|
||||||
|
:param pages: can be a :class:`PageRange<PyPDF2.pagerange.PageRange>`
|
||||||
|
or a ``(start, stop[, step])`` tuple
|
||||||
|
to merge only the specified range of pages from the source
|
||||||
|
document into the output document.
|
||||||
|
Can also be a list of pages to merge.
|
||||||
|
|
||||||
|
:param bool import_outline: You may prevent the source document's
|
||||||
|
outline (collection of outline items, previously referred to as
|
||||||
|
'bookmarks') from being imported by specifying this as ``False``.
|
||||||
|
"""
|
||||||
|
if position is not None: # deprecated
|
||||||
|
if page_number is None:
|
||||||
|
page_number = position
|
||||||
|
old_term = "position"
|
||||||
|
new_term = "page_number"
|
||||||
|
warnings.warn(
|
||||||
|
(
|
||||||
|
f"{old_term} is deprecated as an argument and will be "
|
||||||
|
f"removed in PyPDF2=4.0.0. Use {new_term} instead"
|
||||||
|
),
|
||||||
|
DeprecationWarning,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"The argument position of merge is deprecated. Use page_number only."
|
||||||
|
)
|
||||||
|
|
||||||
|
if page_number is None: # deprecated
|
||||||
|
# The paremter is only marked as Optional as long as
|
||||||
|
# position is not fully deprecated
|
||||||
|
raise ValueError("page_number may not be None")
|
||||||
|
if fileobj is None: # deprecated
|
||||||
|
# The argument is only Optional due to the deprecated position
|
||||||
|
# argument
|
||||||
|
raise ValueError("fileobj may not be None")
|
||||||
|
|
||||||
|
stream, encryption_obj = self._create_stream(fileobj)
|
||||||
|
|
||||||
|
# Create a new PdfReader instance using the stream
|
||||||
|
# (either file or BytesIO or StringIO) created above
|
||||||
|
reader = PdfReader(stream, strict=self.strict) # type: ignore[arg-type]
|
||||||
|
self.inputs.append((stream, reader))
|
||||||
|
if encryption_obj is not None:
|
||||||
|
reader._encryption = encryption_obj
|
||||||
|
|
||||||
|
# Find the range of pages to merge.
|
||||||
|
if pages is None:
|
||||||
|
pages = (0, len(reader.pages))
|
||||||
|
elif isinstance(pages, PageRange):
|
||||||
|
pages = pages.indices(len(reader.pages))
|
||||||
|
elif isinstance(pages, list):
|
||||||
|
pass
|
||||||
|
elif not isinstance(pages, tuple):
|
||||||
|
raise TypeError('"pages" must be a tuple of (start, stop[, step])')
|
||||||
|
|
||||||
|
srcpages = []
|
||||||
|
|
||||||
|
outline = []
|
||||||
|
if import_outline:
|
||||||
|
outline = reader.outline
|
||||||
|
outline = self._trim_outline(reader, outline, pages)
|
||||||
|
|
||||||
|
if outline_item:
|
||||||
|
outline_item_typ = OutlineItem(
|
||||||
|
TextStringObject(outline_item),
|
||||||
|
NumberObject(self.id_count),
|
||||||
|
Fit.fit(),
|
||||||
|
)
|
||||||
|
self.outline += [outline_item_typ, outline] # type: ignore
|
||||||
|
else:
|
||||||
|
self.outline += outline
|
||||||
|
|
||||||
|
dests = reader.named_destinations
|
||||||
|
trimmed_dests = self._trim_dests(reader, dests, pages)
|
||||||
|
self.named_dests += trimmed_dests
|
||||||
|
|
||||||
|
# Gather all the pages that are going to be merged
|
||||||
|
for i in range(*pages):
|
||||||
|
page = reader.pages[i]
|
||||||
|
|
||||||
|
id = self.id_count
|
||||||
|
self.id_count += 1
|
||||||
|
|
||||||
|
mp = _MergedPage(page, reader, id)
|
||||||
|
|
||||||
|
srcpages.append(mp)
|
||||||
|
|
||||||
|
self._associate_dests_to_pages(srcpages)
|
||||||
|
self._associate_outline_items_to_pages(srcpages)
|
||||||
|
|
||||||
|
# Slice to insert the pages at the specified page_number
|
||||||
|
self.pages[page_number:page_number] = srcpages
|
||||||
|
|
||||||
|
def _create_stream(
|
||||||
|
self, fileobj: Union[Path, StrByteType, PdfReader]
|
||||||
|
) -> Tuple[IOBase, Optional[Encryption]]:
|
||||||
|
# If the fileobj parameter is a string, assume it is a path
|
||||||
|
# and create a file object at that location. If it is a file,
|
||||||
|
# copy the file's contents into a BytesIO stream object; if
|
||||||
|
# it is a PdfReader, copy that reader's stream into a
|
||||||
|
# BytesIO stream.
|
||||||
|
# If fileobj is none of the above types, it is not modified
|
||||||
|
encryption_obj = None
|
||||||
|
stream: IOBase
|
||||||
|
if isinstance(fileobj, (str, Path)):
|
||||||
|
stream = FileIO(fileobj, "rb")
|
||||||
|
elif isinstance(fileobj, PdfReader):
|
||||||
|
if fileobj._encryption:
|
||||||
|
encryption_obj = fileobj._encryption
|
||||||
|
orig_tell = fileobj.stream.tell()
|
||||||
|
fileobj.stream.seek(0)
|
||||||
|
stream = BytesIO(fileobj.stream.read())
|
||||||
|
|
||||||
|
# reset the stream to its original location
|
||||||
|
fileobj.stream.seek(orig_tell)
|
||||||
|
elif hasattr(fileobj, "seek") and hasattr(fileobj, "read"):
|
||||||
|
fileobj.seek(0)
|
||||||
|
filecontent = fileobj.read()
|
||||||
|
stream = BytesIO(filecontent)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
"PdfMerger.merge requires an object that PdfReader can parse. "
|
||||||
|
"Typically, that is a Path or a string representing a Path, "
|
||||||
|
"a file object, or an object implementing .seek and .read. "
|
||||||
|
"Passing a PdfReader directly works as well."
|
||||||
|
)
|
||||||
|
return stream, encryption_obj
|
||||||
|
|
||||||
|
@deprecation_bookmark(bookmark="outline_item", import_bookmarks="import_outline")
|
||||||
|
def append(
|
||||||
|
self,
|
||||||
|
fileobj: Union[StrByteType, PdfReader, Path],
|
||||||
|
outline_item: Optional[str] = None,
|
||||||
|
pages: Union[
|
||||||
|
None, PageRange, Tuple[int, int], Tuple[int, int, int], List[int]
|
||||||
|
] = None,
|
||||||
|
import_outline: bool = True,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Identical to the :meth:`merge()<merge>` method, but assumes you want to
|
||||||
|
concatenate all pages onto the end of the file instead of specifying a
|
||||||
|
position.
|
||||||
|
|
||||||
|
:param fileobj: A File Object or an object that supports the standard
|
||||||
|
read and seek methods similar to a File Object. Could also be a
|
||||||
|
string representing a path to a PDF file.
|
||||||
|
|
||||||
|
:param str outline_item: Optionally, you may specify an outline item
|
||||||
|
(previously referred to as a 'bookmark') to be applied at the
|
||||||
|
beginning of the included file by supplying the text of the outline item.
|
||||||
|
|
||||||
|
:param pages: can be a :class:`PageRange<PyPDF2.pagerange.PageRange>`
|
||||||
|
or a ``(start, stop[, step])`` tuple
|
||||||
|
to merge only the specified range of pages from the source
|
||||||
|
document into the output document.
|
||||||
|
Can also be a list of pages to append.
|
||||||
|
|
||||||
|
:param bool import_outline: You may prevent the source document's
|
||||||
|
outline (collection of outline items, previously referred to as
|
||||||
|
'bookmarks') from being imported by specifying this as ``False``.
|
||||||
|
"""
|
||||||
|
self.merge(len(self.pages), fileobj, outline_item, pages, import_outline)
|
||||||
|
|
||||||
|
def write(self, fileobj: Union[Path, StrByteType]) -> None:
|
||||||
|
"""
|
||||||
|
Write all data that has been merged to the given output file.
|
||||||
|
|
||||||
|
:param fileobj: Output file. Can be a filename or any kind of
|
||||||
|
file-like object.
|
||||||
|
"""
|
||||||
|
if self.output is None:
|
||||||
|
raise RuntimeError(ERR_CLOSED_WRITER)
|
||||||
|
|
||||||
|
# Add pages to the PdfWriter
|
||||||
|
# The commented out line below was replaced with the two lines below it
|
||||||
|
# to allow PdfMerger to work with PyPdf 1.13
|
||||||
|
for page in self.pages:
|
||||||
|
self.output.add_page(page.pagedata)
|
||||||
|
pages_obj = cast(Dict[str, Any], self.output._pages.get_object())
|
||||||
|
page.out_pagedata = self.output.get_reference(
|
||||||
|
pages_obj[PA.KIDS][-1].get_object()
|
||||||
|
)
|
||||||
|
# idnum = self.output._objects.index(self.output._pages.get_object()[PA.KIDS][-1].get_object()) + 1
|
||||||
|
# page.out_pagedata = IndirectObject(idnum, 0, self.output)
|
||||||
|
|
||||||
|
# Once all pages are added, create outline items to point at those pages
|
||||||
|
self._write_dests()
|
||||||
|
self._write_outline()
|
||||||
|
|
||||||
|
# Write the output to the file
|
||||||
|
my_file, ret_fileobj = self.output.write(fileobj)
|
||||||
|
|
||||||
|
if my_file:
|
||||||
|
ret_fileobj.close()
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
"""Shut all file descriptors (input and output) and clear all memory usage."""
|
||||||
|
self.pages = []
|
||||||
|
for fo, _reader in self.inputs:
|
||||||
|
fo.close()
|
||||||
|
|
||||||
|
self.inputs = []
|
||||||
|
self.output = None
|
||||||
|
|
||||||
|
def add_metadata(self, infos: Dict[str, Any]) -> None:
|
||||||
|
"""
|
||||||
|
Add custom metadata to the output.
|
||||||
|
|
||||||
|
:param dict infos: a Python dictionary where each key is a field
|
||||||
|
and each value is your new metadata.
|
||||||
|
Example: ``{u'/Title': u'My title'}``
|
||||||
|
"""
|
||||||
|
if self.output is None:
|
||||||
|
raise RuntimeError(ERR_CLOSED_WRITER)
|
||||||
|
self.output.add_metadata(infos)
|
||||||
|
|
||||||
|
def addMetadata(self, infos: Dict[str, Any]) -> None: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.28.0
|
||||||
|
|
||||||
|
Use :meth:`add_metadata` instead.
|
||||||
|
"""
|
||||||
|
deprecation_with_replacement("addMetadata", "add_metadata")
|
||||||
|
self.add_metadata(infos)
|
||||||
|
|
||||||
|
def setPageLayout(self, layout: LayoutType) -> None: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.28.0
|
||||||
|
|
||||||
|
Use :meth:`set_page_layout` instead.
|
||||||
|
"""
|
||||||
|
deprecation_with_replacement("setPageLayout", "set_page_layout")
|
||||||
|
self.set_page_layout(layout)
|
||||||
|
|
||||||
|
def set_page_layout(self, layout: LayoutType) -> None:
|
||||||
|
"""
|
||||||
|
Set the page layout.
|
||||||
|
|
||||||
|
:param str layout: The page layout to be used
|
||||||
|
|
||||||
|
.. list-table:: Valid ``layout`` arguments
|
||||||
|
:widths: 50 200
|
||||||
|
|
||||||
|
* - /NoLayout
|
||||||
|
- Layout explicitly not specified
|
||||||
|
* - /SinglePage
|
||||||
|
- Show one page at a time
|
||||||
|
* - /OneColumn
|
||||||
|
- Show one column at a time
|
||||||
|
* - /TwoColumnLeft
|
||||||
|
- Show pages in two columns, odd-numbered pages on the left
|
||||||
|
* - /TwoColumnRight
|
||||||
|
- Show pages in two columns, odd-numbered pages on the right
|
||||||
|
* - /TwoPageLeft
|
||||||
|
- Show two pages at a time, odd-numbered pages on the left
|
||||||
|
* - /TwoPageRight
|
||||||
|
- Show two pages at a time, odd-numbered pages on the right
|
||||||
|
"""
|
||||||
|
if self.output is None:
|
||||||
|
raise RuntimeError(ERR_CLOSED_WRITER)
|
||||||
|
self.output._set_page_layout(layout)
|
||||||
|
|
||||||
|
def setPageMode(self, mode: PagemodeType) -> None: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.28.0
|
||||||
|
|
||||||
|
Use :meth:`set_page_mode` instead.
|
||||||
|
"""
|
||||||
|
deprecation_with_replacement("setPageMode", "set_page_mode", "3.0.0")
|
||||||
|
self.set_page_mode(mode)
|
||||||
|
|
||||||
|
def set_page_mode(self, mode: PagemodeType) -> None:
|
||||||
|
"""
|
||||||
|
Set the page mode.
|
||||||
|
|
||||||
|
:param str mode: The page mode to use.
|
||||||
|
|
||||||
|
.. list-table:: Valid ``mode`` arguments
|
||||||
|
:widths: 50 200
|
||||||
|
|
||||||
|
* - /UseNone
|
||||||
|
- Do not show outline or thumbnails panels
|
||||||
|
* - /UseOutlines
|
||||||
|
- Show outline (aka bookmarks) panel
|
||||||
|
* - /UseThumbs
|
||||||
|
- Show page thumbnails panel
|
||||||
|
* - /FullScreen
|
||||||
|
- Fullscreen view
|
||||||
|
* - /UseOC
|
||||||
|
- Show Optional Content Group (OCG) panel
|
||||||
|
* - /UseAttachments
|
||||||
|
- Show attachments panel
|
||||||
|
"""
|
||||||
|
if self.output is None:
|
||||||
|
raise RuntimeError(ERR_CLOSED_WRITER)
|
||||||
|
self.output.set_page_mode(mode)
|
||||||
|
|
||||||
|
def _trim_dests(
|
||||||
|
self,
|
||||||
|
pdf: PdfReader,
|
||||||
|
dests: Dict[str, Dict[str, Any]],
|
||||||
|
pages: Union[Tuple[int, int], Tuple[int, int, int], List[int]],
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""Remove named destinations that are not a part of the specified page set."""
|
||||||
|
new_dests = []
|
||||||
|
lst = pages if isinstance(pages, list) else list(range(*pages))
|
||||||
|
for key, obj in dests.items():
|
||||||
|
for j in lst:
|
||||||
|
if pdf.pages[j].get_object() == obj["/Page"].get_object():
|
||||||
|
obj[NameObject("/Page")] = obj["/Page"].get_object()
|
||||||
|
assert str_(key) == str_(obj["/Title"])
|
||||||
|
new_dests.append(obj)
|
||||||
|
break
|
||||||
|
return new_dests
|
||||||
|
|
||||||
|
def _trim_outline(
|
||||||
|
self,
|
||||||
|
pdf: PdfReader,
|
||||||
|
outline: OutlineType,
|
||||||
|
pages: Union[Tuple[int, int], Tuple[int, int, int], List[int]],
|
||||||
|
) -> OutlineType:
|
||||||
|
"""Remove outline item entries that are not a part of the specified page set."""
|
||||||
|
new_outline = []
|
||||||
|
prev_header_added = True
|
||||||
|
lst = pages if isinstance(pages, list) else list(range(*pages))
|
||||||
|
for i, outline_item in enumerate(outline):
|
||||||
|
if isinstance(outline_item, list):
|
||||||
|
sub = self._trim_outline(pdf, outline_item, lst) # type: ignore
|
||||||
|
if sub:
|
||||||
|
if not prev_header_added:
|
||||||
|
new_outline.append(outline[i - 1])
|
||||||
|
new_outline.append(sub) # type: ignore
|
||||||
|
else:
|
||||||
|
prev_header_added = False
|
||||||
|
for j in lst:
|
||||||
|
if outline_item["/Page"] is None:
|
||||||
|
continue
|
||||||
|
if pdf.pages[j].get_object() == outline_item["/Page"].get_object():
|
||||||
|
outline_item[NameObject("/Page")] = outline_item[
|
||||||
|
"/Page"
|
||||||
|
].get_object()
|
||||||
|
new_outline.append(outline_item)
|
||||||
|
prev_header_added = True
|
||||||
|
break
|
||||||
|
return new_outline
|
||||||
|
|
||||||
|
def _write_dests(self) -> None:
|
||||||
|
if self.output is None:
|
||||||
|
raise RuntimeError(ERR_CLOSED_WRITER)
|
||||||
|
for named_dest in self.named_dests:
|
||||||
|
pageno = None
|
||||||
|
if "/Page" in named_dest:
|
||||||
|
for pageno, page in enumerate(self.pages): # noqa: B007
|
||||||
|
if page.id == named_dest["/Page"]:
|
||||||
|
named_dest[NameObject("/Page")] = page.out_pagedata
|
||||||
|
break
|
||||||
|
|
||||||
|
if pageno is not None:
|
||||||
|
self.output.add_named_destination_object(named_dest)
|
||||||
|
|
||||||
|
@deprecation_bookmark(bookmarks="outline")
|
||||||
|
def _write_outline(
|
||||||
|
self,
|
||||||
|
outline: Optional[Iterable[OutlineItem]] = None,
|
||||||
|
parent: Optional[TreeObject] = None,
|
||||||
|
) -> None:
|
||||||
|
if self.output is None:
|
||||||
|
raise RuntimeError(ERR_CLOSED_WRITER)
|
||||||
|
if outline is None:
|
||||||
|
outline = self.outline # type: ignore
|
||||||
|
assert outline is not None, "hint for mypy" # TODO: is that true?
|
||||||
|
|
||||||
|
last_added = None
|
||||||
|
for outline_item in outline:
|
||||||
|
if isinstance(outline_item, list):
|
||||||
|
self._write_outline(outline_item, last_added)
|
||||||
|
continue
|
||||||
|
|
||||||
|
page_no = None
|
||||||
|
if "/Page" in outline_item:
|
||||||
|
for page_no, page in enumerate(self.pages): # noqa: B007
|
||||||
|
if page.id == outline_item["/Page"]:
|
||||||
|
self._write_outline_item_on_page(outline_item, page)
|
||||||
|
break
|
||||||
|
if page_no is not None:
|
||||||
|
del outline_item["/Page"], outline_item["/Type"]
|
||||||
|
last_added = self.output.add_outline_item_dict(outline_item, parent)
|
||||||
|
|
||||||
|
@deprecation_bookmark(bookmark="outline_item")
|
||||||
|
def _write_outline_item_on_page(
|
||||||
|
self, outline_item: Union[OutlineItem, Destination], page: _MergedPage
|
||||||
|
) -> None:
|
||||||
|
oi_type = cast(str, outline_item["/Type"])
|
||||||
|
args = [NumberObject(page.id), NameObject(oi_type)]
|
||||||
|
fit2arg_keys: Dict[str, Tuple[str, ...]] = {
|
||||||
|
TypFitArguments.FIT_H: (TypArguments.TOP,),
|
||||||
|
TypFitArguments.FIT_BH: (TypArguments.TOP,),
|
||||||
|
TypFitArguments.FIT_V: (TypArguments.LEFT,),
|
||||||
|
TypFitArguments.FIT_BV: (TypArguments.LEFT,),
|
||||||
|
TypFitArguments.XYZ: (TypArguments.LEFT, TypArguments.TOP, "/Zoom"),
|
||||||
|
TypFitArguments.FIT_R: (
|
||||||
|
TypArguments.LEFT,
|
||||||
|
TypArguments.BOTTOM,
|
||||||
|
TypArguments.RIGHT,
|
||||||
|
TypArguments.TOP,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
for arg_key in fit2arg_keys.get(oi_type, tuple()):
|
||||||
|
if arg_key in outline_item and not isinstance(
|
||||||
|
outline_item[arg_key], NullObject
|
||||||
|
):
|
||||||
|
args.append(FloatObject(outline_item[arg_key]))
|
||||||
|
else:
|
||||||
|
args.append(FloatObject(0))
|
||||||
|
del outline_item[arg_key]
|
||||||
|
|
||||||
|
outline_item[NameObject("/A")] = DictionaryObject(
|
||||||
|
{
|
||||||
|
NameObject(GoToActionArguments.S): NameObject("/GoTo"),
|
||||||
|
NameObject(GoToActionArguments.D): ArrayObject(args),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def _associate_dests_to_pages(self, pages: List[_MergedPage]) -> None:
|
||||||
|
for named_dest in self.named_dests:
|
||||||
|
pageno = None
|
||||||
|
np = named_dest["/Page"]
|
||||||
|
|
||||||
|
if isinstance(np, NumberObject):
|
||||||
|
continue
|
||||||
|
|
||||||
|
for page in pages:
|
||||||
|
if np.get_object() == page.pagedata.get_object():
|
||||||
|
pageno = page.id
|
||||||
|
|
||||||
|
if pageno is None:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unresolved named destination '{named_dest['/Title']}'"
|
||||||
|
)
|
||||||
|
named_dest[NameObject("/Page")] = NumberObject(pageno)
|
||||||
|
|
||||||
|
@deprecation_bookmark(bookmarks="outline")
|
||||||
|
def _associate_outline_items_to_pages(
|
||||||
|
self, pages: List[_MergedPage], outline: Optional[Iterable[OutlineItem]] = None
|
||||||
|
) -> None:
|
||||||
|
if outline is None:
|
||||||
|
outline = self.outline # type: ignore # TODO: self.bookmarks can be None!
|
||||||
|
assert outline is not None, "hint for mypy"
|
||||||
|
for outline_item in outline:
|
||||||
|
if isinstance(outline_item, list):
|
||||||
|
self._associate_outline_items_to_pages(pages, outline_item)
|
||||||
|
continue
|
||||||
|
|
||||||
|
pageno = None
|
||||||
|
outline_item_page = outline_item["/Page"]
|
||||||
|
|
||||||
|
if isinstance(outline_item_page, NumberObject):
|
||||||
|
continue
|
||||||
|
|
||||||
|
for p in pages:
|
||||||
|
if outline_item_page.get_object() == p.pagedata.get_object():
|
||||||
|
pageno = p.id
|
||||||
|
|
||||||
|
if pageno is not None:
|
||||||
|
outline_item[NameObject("/Page")] = NumberObject(pageno)
|
||||||
|
|
||||||
|
@deprecation_bookmark(bookmark="outline_item")
|
||||||
|
def find_outline_item(
|
||||||
|
self,
|
||||||
|
outline_item: Dict[str, Any],
|
||||||
|
root: Optional[OutlineType] = None,
|
||||||
|
) -> Optional[List[int]]:
|
||||||
|
if root is None:
|
||||||
|
root = self.outline
|
||||||
|
|
||||||
|
for i, oi_enum in enumerate(root):
|
||||||
|
if isinstance(oi_enum, list):
|
||||||
|
# oi_enum is still an inner node
|
||||||
|
# (OutlineType, if recursive types were supported by mypy)
|
||||||
|
res = self.find_outline_item(outline_item, oi_enum) # type: ignore
|
||||||
|
if res:
|
||||||
|
return [i] + res
|
||||||
|
elif (
|
||||||
|
oi_enum == outline_item
|
||||||
|
or cast(Dict[Any, Any], oi_enum["/Title"]) == outline_item
|
||||||
|
):
|
||||||
|
# we found a leaf node
|
||||||
|
return [i]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
@deprecation_bookmark(bookmark="outline_item")
|
||||||
|
def find_bookmark(
|
||||||
|
self,
|
||||||
|
outline_item: Dict[str, Any],
|
||||||
|
root: Optional[OutlineType] = None,
|
||||||
|
) -> Optional[List[int]]: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 2.9.0
|
||||||
|
Use :meth:`find_outline_item` instead.
|
||||||
|
"""
|
||||||
|
return self.find_outline_item(outline_item, root)
|
||||||
|
|
||||||
|
def add_outline_item(
|
||||||
|
self,
|
||||||
|
title: str,
|
||||||
|
page_number: Optional[int] = None,
|
||||||
|
parent: Union[None, TreeObject, IndirectObject] = None,
|
||||||
|
color: Optional[Tuple[float, float, float]] = None,
|
||||||
|
bold: bool = False,
|
||||||
|
italic: bool = False,
|
||||||
|
fit: Fit = PAGE_FIT,
|
||||||
|
pagenum: Optional[int] = None, # deprecated
|
||||||
|
) -> IndirectObject:
|
||||||
|
"""
|
||||||
|
Add an outline item (commonly referred to as a "Bookmark") to this PDF file.
|
||||||
|
|
||||||
|
:param str title: Title to use for this outline item.
|
||||||
|
:param int page_number: Page number this outline item will point to.
|
||||||
|
:param parent: A reference to a parent outline item to create nested
|
||||||
|
outline items.
|
||||||
|
:param tuple color: Color of the outline item's font as a red, green, blue tuple
|
||||||
|
from 0.0 to 1.0
|
||||||
|
:param bool bold: Outline item font is bold
|
||||||
|
:param bool italic: Outline item font is italic
|
||||||
|
:param Fit fit: The fit of the destination page.
|
||||||
|
"""
|
||||||
|
if page_number is not None and pagenum is not None:
|
||||||
|
raise ValueError(
|
||||||
|
"The argument pagenum of add_outline_item is deprecated. Use page_number only."
|
||||||
|
)
|
||||||
|
if pagenum is not None:
|
||||||
|
old_term = "pagenum"
|
||||||
|
new_term = "page_number"
|
||||||
|
warnings.warn(
|
||||||
|
(
|
||||||
|
f"{old_term} is deprecated as an argument and will be "
|
||||||
|
f"removed in PyPDF2==4.0.0. Use {new_term} instead"
|
||||||
|
),
|
||||||
|
DeprecationWarning,
|
||||||
|
)
|
||||||
|
page_number = pagenum
|
||||||
|
if page_number is None:
|
||||||
|
raise ValueError("page_number may not be None")
|
||||||
|
writer = self.output
|
||||||
|
if writer is None:
|
||||||
|
raise RuntimeError(ERR_CLOSED_WRITER)
|
||||||
|
return writer.add_outline_item(
|
||||||
|
title,
|
||||||
|
page_number,
|
||||||
|
parent,
|
||||||
|
None,
|
||||||
|
color,
|
||||||
|
bold,
|
||||||
|
italic,
|
||||||
|
fit,
|
||||||
|
)
|
||||||
|
|
||||||
|
def addBookmark(
|
||||||
|
self,
|
||||||
|
title: str,
|
||||||
|
pagenum: int, # deprecated, but the whole method is deprecated
|
||||||
|
parent: Union[None, TreeObject, IndirectObject] = None,
|
||||||
|
color: Optional[Tuple[float, float, float]] = None,
|
||||||
|
bold: bool = False,
|
||||||
|
italic: bool = False,
|
||||||
|
fit: FitType = "/Fit",
|
||||||
|
*args: ZoomArgType,
|
||||||
|
) -> IndirectObject: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.28.0
|
||||||
|
Use :meth:`add_outline_item` instead.
|
||||||
|
"""
|
||||||
|
deprecation_with_replacement("addBookmark", "add_outline_item", "3.0.0")
|
||||||
|
return self.add_outline_item(
|
||||||
|
title,
|
||||||
|
pagenum,
|
||||||
|
parent,
|
||||||
|
color,
|
||||||
|
bold,
|
||||||
|
italic,
|
||||||
|
Fit(fit_type=fit, fit_args=args),
|
||||||
|
)
|
||||||
|
|
||||||
|
def add_bookmark(
|
||||||
|
self,
|
||||||
|
title: str,
|
||||||
|
pagenum: int, # deprecated, but the whole method is deprecated already
|
||||||
|
parent: Union[None, TreeObject, IndirectObject] = None,
|
||||||
|
color: Optional[Tuple[float, float, float]] = None,
|
||||||
|
bold: bool = False,
|
||||||
|
italic: bool = False,
|
||||||
|
fit: FitType = "/Fit",
|
||||||
|
*args: ZoomArgType,
|
||||||
|
) -> IndirectObject: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 2.9.0
|
||||||
|
Use :meth:`add_outline_item` instead.
|
||||||
|
"""
|
||||||
|
deprecation_with_replacement("addBookmark", "add_outline_item", "3.0.0")
|
||||||
|
return self.add_outline_item(
|
||||||
|
title,
|
||||||
|
pagenum,
|
||||||
|
parent,
|
||||||
|
color,
|
||||||
|
bold,
|
||||||
|
italic,
|
||||||
|
Fit(fit_type=fit, fit_args=args),
|
||||||
|
)
|
||||||
|
|
||||||
|
def addNamedDestination(self, title: str, pagenum: int) -> None: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.28.0
|
||||||
|
Use :meth:`add_named_destination` instead.
|
||||||
|
"""
|
||||||
|
deprecation_with_replacement(
|
||||||
|
"addNamedDestination", "add_named_destination", "3.0.0"
|
||||||
|
)
|
||||||
|
return self.add_named_destination(title, pagenum)
|
||||||
|
|
||||||
|
def add_named_destination(
|
||||||
|
self,
|
||||||
|
title: str,
|
||||||
|
page_number: Optional[int] = None,
|
||||||
|
pagenum: Optional[int] = None,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Add a destination to the output.
|
||||||
|
|
||||||
|
:param str title: Title to use
|
||||||
|
:param int page_number: Page number this destination points at.
|
||||||
|
"""
|
||||||
|
if page_number is not None and pagenum is not None:
|
||||||
|
raise ValueError(
|
||||||
|
"The argument pagenum of add_named_destination is deprecated. Use page_number only."
|
||||||
|
)
|
||||||
|
if pagenum is not None:
|
||||||
|
old_term = "pagenum"
|
||||||
|
new_term = "page_number"
|
||||||
|
warnings.warn(
|
||||||
|
(
|
||||||
|
f"{old_term} is deprecated as an argument and will be "
|
||||||
|
f"removed in PyPDF2==4.0.0. Use {new_term} instead"
|
||||||
|
),
|
||||||
|
DeprecationWarning,
|
||||||
|
)
|
||||||
|
page_number = pagenum
|
||||||
|
if page_number is None:
|
||||||
|
raise ValueError("page_number may not be None")
|
||||||
|
dest = Destination(
|
||||||
|
TextStringObject(title),
|
||||||
|
NumberObject(page_number),
|
||||||
|
Fit.fit_horizontally(top=826),
|
||||||
|
)
|
||||||
|
self.named_dests.append(dest)
|
||||||
|
|
||||||
|
|
||||||
|
class PdfFileMerger(PdfMerger): # pragma: no cover
|
||||||
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||||
|
deprecation_with_replacement("PdfFileMerger", "PdfMerger", "3.0.0")
|
||||||
|
|
||||||
|
if "strict" not in kwargs and len(args) < 1:
|
||||||
|
kwargs["strict"] = True # maintain the default
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,62 @@
|
||||||
|
"""Helpers for working with PDF types."""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import IO, Any, Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Python 3.8+: https://peps.python.org/pep-0586
|
||||||
|
from typing import Protocol # type: ignore[attr-defined]
|
||||||
|
except ImportError:
|
||||||
|
from typing_extensions import Protocol # type: ignore[misc]
|
||||||
|
|
||||||
|
from ._utils import StrByteType
|
||||||
|
|
||||||
|
|
||||||
|
class PdfObjectProtocol(Protocol):
|
||||||
|
indirect_reference: Any
|
||||||
|
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: Any,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> Any:
|
||||||
|
...
|
||||||
|
|
||||||
|
def _reference_clone(self, clone: Any, pdf_dest: Any) -> Any:
|
||||||
|
...
|
||||||
|
|
||||||
|
def get_object(self) -> Optional["PdfObjectProtocol"]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
class PdfReaderProtocol(Protocol): # pragma: no cover
|
||||||
|
@property
|
||||||
|
def pdf_header(self) -> str:
|
||||||
|
...
|
||||||
|
|
||||||
|
@property
|
||||||
|
def strict(self) -> bool:
|
||||||
|
...
|
||||||
|
|
||||||
|
@property
|
||||||
|
def xref(self) -> Dict[int, Dict[int, Any]]:
|
||||||
|
...
|
||||||
|
|
||||||
|
@property
|
||||||
|
def pages(self) -> List[Any]:
|
||||||
|
...
|
||||||
|
|
||||||
|
def get_object(self, indirect_reference: Any) -> Optional[PdfObjectProtocol]:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
class PdfWriterProtocol(Protocol): # pragma: no cover
|
||||||
|
_objects: List[Any]
|
||||||
|
_id_translated: Dict[int, Dict[int, int]]
|
||||||
|
|
||||||
|
def get_object(self, indirect_reference: Any) -> Optional[PdfObjectProtocol]:
|
||||||
|
...
|
||||||
|
|
||||||
|
def write(self, stream: Union[Path, StrByteType]) -> Tuple[bool, IO]:
|
||||||
|
...
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,252 @@
|
||||||
|
# Copyright (c) 2006, Mathieu Fenniak
|
||||||
|
# Copyright (c) 2007, Ashish Kulkarni <kulkarni.ashish@gmail.com>
|
||||||
|
#
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
# * The name of the author may not be used to endorse or promote products
|
||||||
|
# derived from this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
# POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
"""Anything related to encryption / decryption."""
|
||||||
|
|
||||||
|
import struct
|
||||||
|
from hashlib import md5
|
||||||
|
from typing import Tuple, Union
|
||||||
|
|
||||||
|
from ._utils import b_, ord_, str_
|
||||||
|
from .generic import ByteStringObject
|
||||||
|
|
||||||
|
try:
|
||||||
|
from typing import Literal # type: ignore[attr-defined]
|
||||||
|
except ImportError:
|
||||||
|
# PEP 586 introduced typing.Literal with Python 3.8
|
||||||
|
# For older Python versions, the backport typing_extensions is necessary:
|
||||||
|
from typing_extensions import Literal # type: ignore[misc]
|
||||||
|
|
||||||
|
# ref: pdf1.8 spec section 3.5.2 algorithm 3.2
|
||||||
|
_encryption_padding = (
|
||||||
|
b"\x28\xbf\x4e\x5e\x4e\x75\x8a\x41\x64\x00\x4e\x56"
|
||||||
|
b"\xff\xfa\x01\x08\x2e\x2e\x00\xb6\xd0\x68\x3e\x80\x2f\x0c"
|
||||||
|
b"\xa9\xfe\x64\x53\x69\x7a"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _alg32(
|
||||||
|
password: str,
|
||||||
|
rev: Literal[2, 3, 4],
|
||||||
|
keylen: int,
|
||||||
|
owner_entry: ByteStringObject,
|
||||||
|
p_entry: int,
|
||||||
|
id1_entry: ByteStringObject,
|
||||||
|
metadata_encrypt: bool = True,
|
||||||
|
) -> bytes:
|
||||||
|
"""
|
||||||
|
Implementation of algorithm 3.2 of the PDF standard security handler.
|
||||||
|
|
||||||
|
See section 3.5.2 of the PDF 1.6 reference.
|
||||||
|
"""
|
||||||
|
# 1. Pad or truncate the password string to exactly 32 bytes. If the
|
||||||
|
# password string is more than 32 bytes long, use only its first 32 bytes;
|
||||||
|
# if it is less than 32 bytes long, pad it by appending the required number
|
||||||
|
# of additional bytes from the beginning of the padding string
|
||||||
|
# (_encryption_padding).
|
||||||
|
password_bytes = b_((str_(password) + str_(_encryption_padding))[:32])
|
||||||
|
# 2. Initialize the MD5 hash function and pass the result of step 1 as
|
||||||
|
# input to this function.
|
||||||
|
m = md5(password_bytes)
|
||||||
|
# 3. Pass the value of the encryption dictionary's /O entry to the MD5 hash
|
||||||
|
# function.
|
||||||
|
m.update(owner_entry.original_bytes)
|
||||||
|
# 4. Treat the value of the /P entry as an unsigned 4-byte integer and pass
|
||||||
|
# these bytes to the MD5 hash function, low-order byte first.
|
||||||
|
p_entry_bytes = struct.pack("<i", p_entry)
|
||||||
|
m.update(p_entry_bytes)
|
||||||
|
# 5. Pass the first element of the file's file identifier array to the MD5
|
||||||
|
# hash function.
|
||||||
|
m.update(id1_entry.original_bytes)
|
||||||
|
# 6. (Revision 3 or greater) If document metadata is not being encrypted,
|
||||||
|
# pass 4 bytes with the value 0xFFFFFFFF to the MD5 hash function.
|
||||||
|
if rev >= 3 and not metadata_encrypt:
|
||||||
|
m.update(b"\xff\xff\xff\xff")
|
||||||
|
# 7. Finish the hash.
|
||||||
|
md5_hash = m.digest()
|
||||||
|
# 8. (Revision 3 or greater) Do the following 50 times: Take the output
|
||||||
|
# from the previous MD5 hash and pass the first n bytes of the output as
|
||||||
|
# input into a new MD5 hash, where n is the number of bytes of the
|
||||||
|
# encryption key as defined by the value of the encryption dictionary's
|
||||||
|
# /Length entry.
|
||||||
|
if rev >= 3:
|
||||||
|
for _ in range(50):
|
||||||
|
md5_hash = md5(md5_hash[:keylen]).digest()
|
||||||
|
# 9. Set the encryption key to the first n bytes of the output from the
|
||||||
|
# final MD5 hash, where n is always 5 for revision 2 but, for revision 3 or
|
||||||
|
# greater, depends on the value of the encryption dictionary's /Length
|
||||||
|
# entry.
|
||||||
|
return md5_hash[:keylen]
|
||||||
|
|
||||||
|
|
||||||
|
def _alg33(
|
||||||
|
owner_password: str, user_password: str, rev: Literal[2, 3, 4], keylen: int
|
||||||
|
) -> bytes:
|
||||||
|
"""
|
||||||
|
Implementation of algorithm 3.3 of the PDF standard security handler,
|
||||||
|
section 3.5.2 of the PDF 1.6 reference.
|
||||||
|
"""
|
||||||
|
# steps 1 - 4
|
||||||
|
key = _alg33_1(owner_password, rev, keylen)
|
||||||
|
# 5. Pad or truncate the user password string as described in step 1 of
|
||||||
|
# algorithm 3.2.
|
||||||
|
user_password_bytes = b_((user_password + str_(_encryption_padding))[:32])
|
||||||
|
# 6. Encrypt the result of step 5, using an RC4 encryption function with
|
||||||
|
# the encryption key obtained in step 4.
|
||||||
|
val = RC4_encrypt(key, user_password_bytes)
|
||||||
|
# 7. (Revision 3 or greater) Do the following 19 times: Take the output
|
||||||
|
# from the previous invocation of the RC4 function and pass it as input to
|
||||||
|
# a new invocation of the function; use an encryption key generated by
|
||||||
|
# taking each byte of the encryption key obtained in step 4 and performing
|
||||||
|
# an XOR operation between that byte and the single-byte value of the
|
||||||
|
# iteration counter (from 1 to 19).
|
||||||
|
if rev >= 3:
|
||||||
|
for i in range(1, 20):
|
||||||
|
new_key = ""
|
||||||
|
for key_char in key:
|
||||||
|
new_key += chr(ord_(key_char) ^ i)
|
||||||
|
val = RC4_encrypt(new_key, val)
|
||||||
|
# 8. Store the output from the final invocation of the RC4 as the value of
|
||||||
|
# the /O entry in the encryption dictionary.
|
||||||
|
return val
|
||||||
|
|
||||||
|
|
||||||
|
def _alg33_1(password: str, rev: Literal[2, 3, 4], keylen: int) -> bytes:
|
||||||
|
"""Steps 1-4 of algorithm 3.3"""
|
||||||
|
# 1. Pad or truncate the owner password string as described in step 1 of
|
||||||
|
# algorithm 3.2. If there is no owner password, use the user password
|
||||||
|
# instead.
|
||||||
|
password_bytes = b_((password + str_(_encryption_padding))[:32])
|
||||||
|
# 2. Initialize the MD5 hash function and pass the result of step 1 as
|
||||||
|
# input to this function.
|
||||||
|
m = md5(password_bytes)
|
||||||
|
# 3. (Revision 3 or greater) Do the following 50 times: Take the output
|
||||||
|
# from the previous MD5 hash and pass it as input into a new MD5 hash.
|
||||||
|
md5_hash = m.digest()
|
||||||
|
if rev >= 3:
|
||||||
|
for _ in range(50):
|
||||||
|
md5_hash = md5(md5_hash).digest()
|
||||||
|
# 4. Create an RC4 encryption key using the first n bytes of the output
|
||||||
|
# from the final MD5 hash, where n is always 5 for revision 2 but, for
|
||||||
|
# revision 3 or greater, depends on the value of the encryption
|
||||||
|
# dictionary's /Length entry.
|
||||||
|
key = md5_hash[:keylen]
|
||||||
|
return key
|
||||||
|
|
||||||
|
|
||||||
|
def _alg34(
|
||||||
|
password: str,
|
||||||
|
owner_entry: ByteStringObject,
|
||||||
|
p_entry: int,
|
||||||
|
id1_entry: ByteStringObject,
|
||||||
|
) -> Tuple[bytes, bytes]:
|
||||||
|
"""
|
||||||
|
Implementation of algorithm 3.4 of the PDF standard security handler.
|
||||||
|
|
||||||
|
See section 3.5.2 of the PDF 1.6 reference.
|
||||||
|
"""
|
||||||
|
# 1. Create an encryption key based on the user password string, as
|
||||||
|
# described in algorithm 3.2.
|
||||||
|
rev: Literal[2] = 2
|
||||||
|
keylen = 5
|
||||||
|
key = _alg32(password, rev, keylen, owner_entry, p_entry, id1_entry)
|
||||||
|
# 2. Encrypt the 32-byte padding string shown in step 1 of algorithm 3.2,
|
||||||
|
# using an RC4 encryption function with the encryption key from the
|
||||||
|
# preceding step.
|
||||||
|
U = RC4_encrypt(key, _encryption_padding)
|
||||||
|
# 3. Store the result of step 2 as the value of the /U entry in the
|
||||||
|
# encryption dictionary.
|
||||||
|
return U, key
|
||||||
|
|
||||||
|
|
||||||
|
def _alg35(
|
||||||
|
password: str,
|
||||||
|
rev: Literal[2, 3, 4],
|
||||||
|
keylen: int,
|
||||||
|
owner_entry: ByteStringObject,
|
||||||
|
p_entry: int,
|
||||||
|
id1_entry: ByteStringObject,
|
||||||
|
metadata_encrypt: bool,
|
||||||
|
) -> Tuple[bytes, bytes]:
|
||||||
|
"""
|
||||||
|
Implementation of algorithm 3.4 of the PDF standard security handler.
|
||||||
|
|
||||||
|
See section 3.5.2 of the PDF 1.6 reference.
|
||||||
|
"""
|
||||||
|
# 1. Create an encryption key based on the user password string, as
|
||||||
|
# described in Algorithm 3.2.
|
||||||
|
key = _alg32(password, rev, keylen, owner_entry, p_entry, id1_entry)
|
||||||
|
# 2. Initialize the MD5 hash function and pass the 32-byte padding string
|
||||||
|
# shown in step 1 of Algorithm 3.2 as input to this function.
|
||||||
|
m = md5()
|
||||||
|
m.update(_encryption_padding)
|
||||||
|
# 3. Pass the first element of the file's file identifier array (the value
|
||||||
|
# of the ID entry in the document's trailer dictionary; see Table 3.13 on
|
||||||
|
# page 73) to the hash function and finish the hash. (See implementation
|
||||||
|
# note 25 in Appendix H.)
|
||||||
|
m.update(id1_entry.original_bytes)
|
||||||
|
md5_hash = m.digest()
|
||||||
|
# 4. Encrypt the 16-byte result of the hash, using an RC4 encryption
|
||||||
|
# function with the encryption key from step 1.
|
||||||
|
val = RC4_encrypt(key, md5_hash)
|
||||||
|
# 5. Do the following 19 times: Take the output from the previous
|
||||||
|
# invocation of the RC4 function and pass it as input to a new invocation
|
||||||
|
# of the function; use an encryption key generated by taking each byte of
|
||||||
|
# the original encryption key (obtained in step 2) and performing an XOR
|
||||||
|
# operation between that byte and the single-byte value of the iteration
|
||||||
|
# counter (from 1 to 19).
|
||||||
|
for i in range(1, 20):
|
||||||
|
new_key = b""
|
||||||
|
for k in key:
|
||||||
|
new_key += b_(chr(ord_(k) ^ i))
|
||||||
|
val = RC4_encrypt(new_key, val)
|
||||||
|
# 6. Append 16 bytes of arbitrary padding to the output from the final
|
||||||
|
# invocation of the RC4 function and store the 32-byte result as the value
|
||||||
|
# of the U entry in the encryption dictionary.
|
||||||
|
# (implementer note: I don't know what "arbitrary padding" is supposed to
|
||||||
|
# mean, so I have used null bytes. This seems to match a few other
|
||||||
|
# people's implementations)
|
||||||
|
return val + (b"\x00" * 16), key
|
||||||
|
|
||||||
|
|
||||||
|
def RC4_encrypt(key: Union[str, bytes], plaintext: bytes) -> bytes: # TODO
|
||||||
|
S = list(range(256))
|
||||||
|
j = 0
|
||||||
|
for i in range(256):
|
||||||
|
j = (j + S[i] + ord_(key[i % len(key)])) % 256
|
||||||
|
S[i], S[j] = S[j], S[i]
|
||||||
|
i, j = 0, 0
|
||||||
|
retval = []
|
||||||
|
for plaintext_char in plaintext:
|
||||||
|
i = (i + 1) % 256
|
||||||
|
j = (j + S[i]) % 256
|
||||||
|
S[i], S[j] = S[j], S[i]
|
||||||
|
t = S[(S[i] + S[j]) % 256]
|
||||||
|
retval.append(b_(chr(ord_(plaintext_char) ^ t)))
|
||||||
|
return b"".join(retval)
|
||||||
|
|
@ -0,0 +1,471 @@
|
||||||
|
# Copyright (c) 2006, Mathieu Fenniak
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
# * The name of the author may not be used to endorse or promote products
|
||||||
|
# derived from this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
# POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
"""Utility functions for PDF library."""
|
||||||
|
__author__ = "Mathieu Fenniak"
|
||||||
|
__author_email__ = "biziqe@mathieu.fenniak.net"
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import logging
|
||||||
|
import warnings
|
||||||
|
from codecs import getencoder
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from io import DEFAULT_BUFFER_SIZE
|
||||||
|
from os import SEEK_CUR
|
||||||
|
from typing import (
|
||||||
|
IO,
|
||||||
|
Any,
|
||||||
|
Callable,
|
||||||
|
Dict,
|
||||||
|
Optional,
|
||||||
|
Pattern,
|
||||||
|
Tuple,
|
||||||
|
Union,
|
||||||
|
overload,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Python 3.10+: https://www.python.org/dev/peps/pep-0484/
|
||||||
|
from typing import TypeAlias # type: ignore[attr-defined]
|
||||||
|
except ImportError:
|
||||||
|
from typing_extensions import TypeAlias
|
||||||
|
|
||||||
|
from .errors import (
|
||||||
|
STREAM_TRUNCATED_PREMATURELY,
|
||||||
|
DeprecationError,
|
||||||
|
PdfStreamError,
|
||||||
|
)
|
||||||
|
|
||||||
|
TransformationMatrixType: TypeAlias = Tuple[
|
||||||
|
Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]
|
||||||
|
]
|
||||||
|
CompressedTransformationMatrix: TypeAlias = Tuple[
|
||||||
|
float, float, float, float, float, float
|
||||||
|
]
|
||||||
|
|
||||||
|
StreamType = IO
|
||||||
|
StrByteType = Union[str, StreamType]
|
||||||
|
|
||||||
|
DEPR_MSG_NO_REPLACEMENT = "{} is deprecated and will be removed in PyPDF2 {}."
|
||||||
|
DEPR_MSG_NO_REPLACEMENT_HAPPENED = "{} is deprecated and was removed in PyPDF2 {}."
|
||||||
|
DEPR_MSG = "{} is deprecated and will be removed in PyPDF2 3.0.0. Use {} instead."
|
||||||
|
DEPR_MSG_HAPPENED = "{} is deprecated and was removed in PyPDF2 {}. Use {} instead."
|
||||||
|
|
||||||
|
|
||||||
|
def _get_max_pdf_version_header(header1: bytes, header2: bytes) -> bytes:
|
||||||
|
versions = (
|
||||||
|
b"%PDF-1.3",
|
||||||
|
b"%PDF-1.4",
|
||||||
|
b"%PDF-1.5",
|
||||||
|
b"%PDF-1.6",
|
||||||
|
b"%PDF-1.7",
|
||||||
|
b"%PDF-2.0",
|
||||||
|
)
|
||||||
|
pdf_header_indices = []
|
||||||
|
if header1 in versions:
|
||||||
|
pdf_header_indices.append(versions.index(header1))
|
||||||
|
if header2 in versions:
|
||||||
|
pdf_header_indices.append(versions.index(header2))
|
||||||
|
if len(pdf_header_indices) == 0:
|
||||||
|
raise ValueError(f"neither {header1!r} nor {header2!r} are proper headers")
|
||||||
|
return versions[max(pdf_header_indices)]
|
||||||
|
|
||||||
|
|
||||||
|
def read_until_whitespace(stream: StreamType, maxchars: Optional[int] = None) -> bytes:
|
||||||
|
"""
|
||||||
|
Read non-whitespace characters and return them.
|
||||||
|
|
||||||
|
Stops upon encountering whitespace or when maxchars is reached.
|
||||||
|
"""
|
||||||
|
txt = b""
|
||||||
|
while True:
|
||||||
|
tok = stream.read(1)
|
||||||
|
if tok.isspace() or not tok:
|
||||||
|
break
|
||||||
|
txt += tok
|
||||||
|
if len(txt) == maxchars:
|
||||||
|
break
|
||||||
|
return txt
|
||||||
|
|
||||||
|
|
||||||
|
def read_non_whitespace(stream: StreamType) -> bytes:
|
||||||
|
"""Find and read the next non-whitespace character (ignores whitespace)."""
|
||||||
|
tok = stream.read(1)
|
||||||
|
while tok in WHITESPACES:
|
||||||
|
tok = stream.read(1)
|
||||||
|
return tok
|
||||||
|
|
||||||
|
|
||||||
|
def skip_over_whitespace(stream: StreamType) -> bool:
|
||||||
|
"""
|
||||||
|
Similar to read_non_whitespace, but return a Boolean if more than
|
||||||
|
one whitespace character was read.
|
||||||
|
"""
|
||||||
|
tok = WHITESPACES[0]
|
||||||
|
cnt = 0
|
||||||
|
while tok in WHITESPACES:
|
||||||
|
tok = stream.read(1)
|
||||||
|
cnt += 1
|
||||||
|
return cnt > 1
|
||||||
|
|
||||||
|
|
||||||
|
def skip_over_comment(stream: StreamType) -> None:
|
||||||
|
tok = stream.read(1)
|
||||||
|
stream.seek(-1, 1)
|
||||||
|
if tok == b"%":
|
||||||
|
while tok not in (b"\n", b"\r"):
|
||||||
|
tok = stream.read(1)
|
||||||
|
|
||||||
|
|
||||||
|
def read_until_regex(
|
||||||
|
stream: StreamType, regex: Pattern[bytes], ignore_eof: bool = False
|
||||||
|
) -> bytes:
|
||||||
|
"""
|
||||||
|
Read until the regular expression pattern matched (ignore the match).
|
||||||
|
|
||||||
|
:raises PdfStreamError: on premature end-of-file
|
||||||
|
:param bool ignore_eof: If true, ignore end-of-line and return immediately
|
||||||
|
:param regex: re.Pattern
|
||||||
|
"""
|
||||||
|
name = b""
|
||||||
|
while True:
|
||||||
|
tok = stream.read(16)
|
||||||
|
if not tok:
|
||||||
|
if ignore_eof:
|
||||||
|
return name
|
||||||
|
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
|
||||||
|
m = regex.search(tok)
|
||||||
|
if m is not None:
|
||||||
|
name += tok[: m.start()]
|
||||||
|
stream.seek(m.start() - len(tok), 1)
|
||||||
|
break
|
||||||
|
name += tok
|
||||||
|
return name
|
||||||
|
|
||||||
|
|
||||||
|
def read_block_backwards(stream: StreamType, to_read: int) -> bytes:
|
||||||
|
"""
|
||||||
|
Given a stream at position X, read a block of size to_read ending at position X.
|
||||||
|
|
||||||
|
This changes the stream's position to the beginning of where the block was
|
||||||
|
read.
|
||||||
|
"""
|
||||||
|
if stream.tell() < to_read:
|
||||||
|
raise PdfStreamError("Could not read malformed PDF file")
|
||||||
|
# Seek to the start of the block we want to read.
|
||||||
|
stream.seek(-to_read, SEEK_CUR)
|
||||||
|
read = stream.read(to_read)
|
||||||
|
# Seek to the start of the block we read after reading it.
|
||||||
|
stream.seek(-to_read, SEEK_CUR)
|
||||||
|
return read
|
||||||
|
|
||||||
|
|
||||||
|
def read_previous_line(stream: StreamType) -> bytes:
|
||||||
|
"""
|
||||||
|
Given a byte stream with current position X, return the previous line.
|
||||||
|
|
||||||
|
All characters between the first CR/LF byte found before X
|
||||||
|
(or, the start of the file, if no such byte is found) and position X
|
||||||
|
After this call, the stream will be positioned one byte after the
|
||||||
|
first non-CRLF character found beyond the first CR/LF byte before X,
|
||||||
|
or, if no such byte is found, at the beginning of the stream.
|
||||||
|
"""
|
||||||
|
line_content = []
|
||||||
|
found_crlf = False
|
||||||
|
if stream.tell() == 0:
|
||||||
|
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
|
||||||
|
while True:
|
||||||
|
to_read = min(DEFAULT_BUFFER_SIZE, stream.tell())
|
||||||
|
if to_read == 0:
|
||||||
|
break
|
||||||
|
# Read the block. After this, our stream will be one
|
||||||
|
# beyond the initial position.
|
||||||
|
block = read_block_backwards(stream, to_read)
|
||||||
|
idx = len(block) - 1
|
||||||
|
if not found_crlf:
|
||||||
|
# We haven't found our first CR/LF yet.
|
||||||
|
# Read off characters until we hit one.
|
||||||
|
while idx >= 0 and block[idx] not in b"\r\n":
|
||||||
|
idx -= 1
|
||||||
|
if idx >= 0:
|
||||||
|
found_crlf = True
|
||||||
|
if found_crlf:
|
||||||
|
# We found our first CR/LF already (on this block or
|
||||||
|
# a previous one).
|
||||||
|
# Our combined line is the remainder of the block
|
||||||
|
# plus any previously read blocks.
|
||||||
|
line_content.append(block[idx + 1 :])
|
||||||
|
# Continue to read off any more CRLF characters.
|
||||||
|
while idx >= 0 and block[idx] in b"\r\n":
|
||||||
|
idx -= 1
|
||||||
|
else:
|
||||||
|
# Didn't find CR/LF yet - add this block to our
|
||||||
|
# previously read blocks and continue.
|
||||||
|
line_content.append(block)
|
||||||
|
if idx >= 0:
|
||||||
|
# We found the next non-CRLF character.
|
||||||
|
# Set the stream position correctly, then break
|
||||||
|
stream.seek(idx + 1, SEEK_CUR)
|
||||||
|
break
|
||||||
|
# Join all the blocks in the line (which are in reverse order)
|
||||||
|
return b"".join(line_content[::-1])
|
||||||
|
|
||||||
|
|
||||||
|
def matrix_multiply(
|
||||||
|
a: TransformationMatrixType, b: TransformationMatrixType
|
||||||
|
) -> TransformationMatrixType:
|
||||||
|
return tuple( # type: ignore[return-value]
|
||||||
|
tuple(sum(float(i) * float(j) for i, j in zip(row, col)) for col in zip(*b))
|
||||||
|
for row in a
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def mark_location(stream: StreamType) -> None:
|
||||||
|
"""Create text file showing current location in context."""
|
||||||
|
# Mainly for debugging
|
||||||
|
radius = 5000
|
||||||
|
stream.seek(-radius, 1)
|
||||||
|
with open("PyPDF2_pdfLocation.txt", "wb") as output_fh:
|
||||||
|
output_fh.write(stream.read(radius))
|
||||||
|
output_fh.write(b"HERE")
|
||||||
|
output_fh.write(stream.read(radius))
|
||||||
|
stream.seek(-radius, 1)
|
||||||
|
|
||||||
|
|
||||||
|
B_CACHE: Dict[Union[str, bytes], bytes] = {}
|
||||||
|
|
||||||
|
|
||||||
|
def b_(s: Union[str, bytes]) -> bytes:
|
||||||
|
bc = B_CACHE
|
||||||
|
if s in bc:
|
||||||
|
return bc[s]
|
||||||
|
if isinstance(s, bytes):
|
||||||
|
return s
|
||||||
|
try:
|
||||||
|
r = s.encode("latin-1")
|
||||||
|
if len(s) < 2:
|
||||||
|
bc[s] = r
|
||||||
|
return r
|
||||||
|
except Exception:
|
||||||
|
r = s.encode("utf-8")
|
||||||
|
if len(s) < 2:
|
||||||
|
bc[s] = r
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def str_(b: str) -> str:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def str_(b: bytes) -> str:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
def str_(b: Union[str, bytes]) -> str:
|
||||||
|
if isinstance(b, bytes):
|
||||||
|
return b.decode("latin-1")
|
||||||
|
else:
|
||||||
|
return b
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def ord_(b: str) -> int:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def ord_(b: bytes) -> bytes:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def ord_(b: int) -> int:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
def ord_(b: Union[int, str, bytes]) -> Union[int, bytes]:
|
||||||
|
if isinstance(b, str):
|
||||||
|
return ord(b)
|
||||||
|
return b
|
||||||
|
|
||||||
|
|
||||||
|
def hexencode(b: bytes) -> bytes:
|
||||||
|
|
||||||
|
coder = getencoder("hex_codec")
|
||||||
|
coded = coder(b) # type: ignore
|
||||||
|
return coded[0]
|
||||||
|
|
||||||
|
|
||||||
|
def hex_str(num: int) -> str:
|
||||||
|
return hex(num).replace("L", "")
|
||||||
|
|
||||||
|
|
||||||
|
WHITESPACES = (b" ", b"\n", b"\r", b"\t", b"\x00")
|
||||||
|
|
||||||
|
|
||||||
|
def paeth_predictor(left: int, up: int, up_left: int) -> int:
|
||||||
|
p = left + up - up_left
|
||||||
|
dist_left = abs(p - left)
|
||||||
|
dist_up = abs(p - up)
|
||||||
|
dist_up_left = abs(p - up_left)
|
||||||
|
|
||||||
|
if dist_left <= dist_up and dist_left <= dist_up_left:
|
||||||
|
return left
|
||||||
|
elif dist_up <= dist_up_left:
|
||||||
|
return up
|
||||||
|
else:
|
||||||
|
return up_left
|
||||||
|
|
||||||
|
|
||||||
|
def deprecate(msg: str, stacklevel: int = 3) -> None:
|
||||||
|
warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
|
||||||
|
|
||||||
|
|
||||||
|
def deprecation(msg: str) -> None:
|
||||||
|
raise DeprecationError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def deprecate_with_replacement(
|
||||||
|
old_name: str, new_name: str, removed_in: str = "3.0.0"
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Raise an exception that a feature will be removed, but has a replacement.
|
||||||
|
"""
|
||||||
|
deprecate(DEPR_MSG.format(old_name, new_name, removed_in), 4)
|
||||||
|
|
||||||
|
|
||||||
|
def deprecation_with_replacement(
|
||||||
|
old_name: str, new_name: str, removed_in: str = "3.0.0"
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Raise an exception that a feature was already removed, but has a replacement.
|
||||||
|
"""
|
||||||
|
deprecation(DEPR_MSG_HAPPENED.format(old_name, removed_in, new_name))
|
||||||
|
|
||||||
|
|
||||||
|
def deprecate_no_replacement(name: str, removed_in: str = "3.0.0") -> None:
|
||||||
|
"""
|
||||||
|
Raise an exception that a feature will be removed without replacement.
|
||||||
|
"""
|
||||||
|
deprecate(DEPR_MSG_NO_REPLACEMENT.format(name, removed_in), 4)
|
||||||
|
|
||||||
|
|
||||||
|
def deprecation_no_replacement(name: str, removed_in: str = "3.0.0") -> None:
|
||||||
|
"""
|
||||||
|
Raise an exception that a feature was already removed without replacement.
|
||||||
|
"""
|
||||||
|
deprecation(DEPR_MSG_NO_REPLACEMENT_HAPPENED.format(name, removed_in))
|
||||||
|
|
||||||
|
|
||||||
|
def logger_warning(msg: str, src: str) -> None:
|
||||||
|
"""
|
||||||
|
Use this instead of logger.warning directly.
|
||||||
|
|
||||||
|
That allows people to overwrite it more easily.
|
||||||
|
|
||||||
|
## Exception, warnings.warn, logger_warning
|
||||||
|
- Exceptions should be used if the user should write code that deals with
|
||||||
|
an error case, e.g. the PDF being completely broken.
|
||||||
|
- warnings.warn should be used if the user needs to fix their code, e.g.
|
||||||
|
DeprecationWarnings
|
||||||
|
- logger_warning should be used if the user needs to know that an issue was
|
||||||
|
handled by PyPDF2, e.g. a non-compliant PDF being read in a way that
|
||||||
|
PyPDF2 could apply a robustness fix to still read it. This applies mainly
|
||||||
|
to strict=False mode.
|
||||||
|
"""
|
||||||
|
logging.getLogger(src).warning(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def deprecation_bookmark(**aliases: str) -> Callable:
|
||||||
|
"""
|
||||||
|
Decorator for deprecated term "bookmark"
|
||||||
|
To be used for methods and function arguments
|
||||||
|
outline_item = a bookmark
|
||||||
|
outline = a collection of outline items
|
||||||
|
"""
|
||||||
|
|
||||||
|
def decoration(func: Callable): # type: ignore
|
||||||
|
@functools.wraps(func)
|
||||||
|
def wrapper(*args, **kwargs): # type: ignore
|
||||||
|
rename_kwargs(func.__name__, kwargs, aliases, fail=True)
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
return decoration
|
||||||
|
|
||||||
|
|
||||||
|
def rename_kwargs( # type: ignore
|
||||||
|
func_name: str, kwargs: Dict[str, Any], aliases: Dict[str, str], fail: bool = False
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Helper function to deprecate arguments.
|
||||||
|
"""
|
||||||
|
|
||||||
|
for old_term, new_term in aliases.items():
|
||||||
|
if old_term in kwargs:
|
||||||
|
if fail:
|
||||||
|
raise DeprecationError(
|
||||||
|
f"{old_term} is deprecated as an argument. Use {new_term} instead"
|
||||||
|
)
|
||||||
|
if new_term in kwargs:
|
||||||
|
raise TypeError(
|
||||||
|
f"{func_name} received both {old_term} and {new_term} as an argument. "
|
||||||
|
f"{old_term} is deprecated. Use {new_term} instead."
|
||||||
|
)
|
||||||
|
kwargs[new_term] = kwargs.pop(old_term)
|
||||||
|
warnings.warn(
|
||||||
|
message=(
|
||||||
|
f"{old_term} is deprecated as an argument. Use {new_term} instead"
|
||||||
|
),
|
||||||
|
category=DeprecationWarning,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _human_readable_bytes(bytes: int) -> str:
|
||||||
|
if bytes < 10**3:
|
||||||
|
return f"{bytes} Byte"
|
||||||
|
elif bytes < 10**6:
|
||||||
|
return f"{bytes / 10**3:.1f} kB"
|
||||||
|
elif bytes < 10**9:
|
||||||
|
return f"{bytes / 10**6:.1f} MB"
|
||||||
|
else:
|
||||||
|
return f"{bytes / 10**9:.1f} GB"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class File:
|
||||||
|
name: str
|
||||||
|
data: bytes
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return f"File(name={self.name}, data: {_human_readable_bytes(len(self.data))})"
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return f"File(name={self.name}, data: {_human_readable_bytes(len(self.data))}, hash: {hash(self.data)})"
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
__version__ = "3.0.1"
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,461 @@
|
||||||
|
"""
|
||||||
|
See Portable Document Format Reference Manual, 1993. ISBN 0-201-62628-4.
|
||||||
|
|
||||||
|
See https://ia802202.us.archive.org/8/items/pdfy-0vt8s-egqFwDl7L2/PDF%20Reference%201.0.pdf
|
||||||
|
|
||||||
|
PDF Reference, third edition, Version 1.4, 2001. ISBN 0-201-75839-3.
|
||||||
|
|
||||||
|
PDF Reference, sixth edition, Version 1.7, 2006.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from enum import IntFlag
|
||||||
|
from typing import Dict, Tuple
|
||||||
|
|
||||||
|
|
||||||
|
class Core:
|
||||||
|
"""Keywords that don't quite belong anywhere else."""
|
||||||
|
|
||||||
|
OUTLINES = "/Outlines"
|
||||||
|
THREADS = "/Threads"
|
||||||
|
PAGE = "/Page"
|
||||||
|
PAGES = "/Pages"
|
||||||
|
CATALOG = "/Catalog"
|
||||||
|
|
||||||
|
|
||||||
|
class TrailerKeys:
|
||||||
|
ROOT = "/Root"
|
||||||
|
ENCRYPT = "/Encrypt"
|
||||||
|
ID = "/ID"
|
||||||
|
INFO = "/Info"
|
||||||
|
SIZE = "/Size"
|
||||||
|
|
||||||
|
|
||||||
|
class CatalogAttributes:
|
||||||
|
NAMES = "/Names"
|
||||||
|
DESTS = "/Dests"
|
||||||
|
|
||||||
|
|
||||||
|
class EncryptionDictAttributes:
|
||||||
|
"""
|
||||||
|
Additional encryption dictionary entries for the standard security handler.
|
||||||
|
|
||||||
|
TABLE 3.19, Page 122
|
||||||
|
"""
|
||||||
|
|
||||||
|
R = "/R" # number, required; revision of the standard security handler
|
||||||
|
O = "/O" # 32-byte string, required
|
||||||
|
U = "/U" # 32-byte string, required
|
||||||
|
P = "/P" # integer flag, required; permitted operations
|
||||||
|
ENCRYPT_METADATA = "/EncryptMetadata" # boolean flag, optional
|
||||||
|
|
||||||
|
|
||||||
|
class UserAccessPermissions(IntFlag):
|
||||||
|
"""TABLE 3.20 User access permissions"""
|
||||||
|
|
||||||
|
R1 = 1
|
||||||
|
R2 = 2
|
||||||
|
PRINT = 4
|
||||||
|
MODIFY = 8
|
||||||
|
EXTRACT = 16
|
||||||
|
ADD_OR_MODIFY = 32
|
||||||
|
R7 = 64
|
||||||
|
R8 = 128
|
||||||
|
FILL_FORM_FIELDS = 256
|
||||||
|
EXTRACT_TEXT_AND_GRAPHICS = 512
|
||||||
|
ASSEMBLE_DOC = 1024
|
||||||
|
PRINT_TO_REPRESENTATION = 2048
|
||||||
|
R13 = 2**12
|
||||||
|
R14 = 2**13
|
||||||
|
R15 = 2**14
|
||||||
|
R16 = 2**15
|
||||||
|
R17 = 2**16
|
||||||
|
R18 = 2**17
|
||||||
|
R19 = 2**18
|
||||||
|
R20 = 2**19
|
||||||
|
R21 = 2**20
|
||||||
|
R22 = 2**21
|
||||||
|
R23 = 2**22
|
||||||
|
R24 = 2**23
|
||||||
|
R25 = 2**24
|
||||||
|
R26 = 2**25
|
||||||
|
R27 = 2**26
|
||||||
|
R28 = 2**27
|
||||||
|
R29 = 2**28
|
||||||
|
R30 = 2**29
|
||||||
|
R31 = 2**30
|
||||||
|
R32 = 2**31
|
||||||
|
|
||||||
|
|
||||||
|
class Ressources:
|
||||||
|
"""TABLE 3.30 Entries in a resource dictionary."""
|
||||||
|
|
||||||
|
EXT_G_STATE = "/ExtGState" # dictionary, optional
|
||||||
|
COLOR_SPACE = "/ColorSpace" # dictionary, optional
|
||||||
|
PATTERN = "/Pattern" # dictionary, optional
|
||||||
|
SHADING = "/Shading" # dictionary, optional
|
||||||
|
XOBJECT = "/XObject" # dictionary, optional
|
||||||
|
FONT = "/Font" # dictionary, optional
|
||||||
|
PROC_SET = "/ProcSet" # array, optional
|
||||||
|
PROPERTIES = "/Properties" # dictionary, optional
|
||||||
|
|
||||||
|
|
||||||
|
class PagesAttributes:
|
||||||
|
"""Page Attributes, Table 6.2, Page 52."""
|
||||||
|
|
||||||
|
TYPE = "/Type" # name, required; must be /Pages
|
||||||
|
KIDS = "/Kids" # array, required; List of indirect references
|
||||||
|
COUNT = "/Count" # integer, required; the number of all nodes und this node
|
||||||
|
PARENT = "/Parent" # dictionary, required; indirect reference to pages object
|
||||||
|
|
||||||
|
|
||||||
|
class PageAttributes:
|
||||||
|
"""TABLE 3.27 Entries in a page object."""
|
||||||
|
|
||||||
|
TYPE = "/Type" # name, required; must be /Page
|
||||||
|
PARENT = "/Parent" # dictionary, required; a pages object
|
||||||
|
LAST_MODIFIED = (
|
||||||
|
"/LastModified" # date, optional; date and time of last modification
|
||||||
|
)
|
||||||
|
RESOURCES = "/Resources" # dictionary, required if there are any
|
||||||
|
MEDIABOX = "/MediaBox" # rectangle, required; rectangle specifying page size
|
||||||
|
CROPBOX = "/CropBox" # rectangle, optional; rectangle
|
||||||
|
BLEEDBOX = "/BleedBox" # rectangle, optional; rectangle
|
||||||
|
TRIMBOX = "/TrimBox" # rectangle, optional; rectangle
|
||||||
|
ARTBOX = "/ArtBox" # rectangle, optional; rectangle
|
||||||
|
BOX_COLOR_INFO = "/BoxColorInfo" # dictionary, optional
|
||||||
|
CONTENTS = "/Contents" # stream or array, optional
|
||||||
|
ROTATE = "/Rotate" # integer, optional; page rotation in degrees
|
||||||
|
GROUP = "/Group" # dictionary, optional; page group
|
||||||
|
THUMB = "/Thumb" # stream, optional; indirect reference to image of the page
|
||||||
|
B = "/B" # array, optional
|
||||||
|
DUR = "/Dur" # number, optional
|
||||||
|
TRANS = "/Trans" # dictionary, optional
|
||||||
|
ANNOTS = "/Annots" # array, optional; an array of annotations
|
||||||
|
AA = "/AA" # dictionary, optional
|
||||||
|
METADATA = "/Metadata" # stream, optional
|
||||||
|
PIECE_INFO = "/PieceInfo" # dictionary, optional
|
||||||
|
STRUCT_PARENTS = "/StructParents" # integer, optional
|
||||||
|
ID = "/ID" # byte string, optional
|
||||||
|
PZ = "/PZ" # number, optional
|
||||||
|
TABS = "/Tabs" # name, optional
|
||||||
|
TEMPLATE_INSTANTIATED = "/TemplateInstantiated" # name, optional
|
||||||
|
PRES_STEPS = "/PresSteps" # dictionary, optional
|
||||||
|
USER_UNIT = "/UserUnit" # number, optional
|
||||||
|
VP = "/VP" # dictionary, optional
|
||||||
|
|
||||||
|
|
||||||
|
class FileSpecificationDictionaryEntries:
|
||||||
|
"""TABLE 3.41 Entries in a file specification dictionary"""
|
||||||
|
|
||||||
|
Type = "/Type"
|
||||||
|
FS = "/FS" # The name of the file system to be used to interpret this file specification
|
||||||
|
F = "/F" # A file specification string of the form described in Section 3.10.1
|
||||||
|
EF = "/EF" # dictionary, containing a subset of the keys F , UF , DOS , Mac , and Unix
|
||||||
|
|
||||||
|
|
||||||
|
class StreamAttributes:
|
||||||
|
"""Table 4.2."""
|
||||||
|
|
||||||
|
LENGTH = "/Length" # integer, required
|
||||||
|
FILTER = "/Filter" # name or array of names, optional
|
||||||
|
DECODE_PARMS = "/DecodeParms" # variable, optional -- 'decodeParams is wrong
|
||||||
|
|
||||||
|
|
||||||
|
class FilterTypes:
|
||||||
|
"""
|
||||||
|
Table 4.3 of the 1.4 Manual.
|
||||||
|
|
||||||
|
Page 354 of the 1.7 Manual
|
||||||
|
"""
|
||||||
|
|
||||||
|
ASCII_HEX_DECODE = "/ASCIIHexDecode" # abbreviation: AHx
|
||||||
|
ASCII_85_DECODE = "/ASCII85Decode" # abbreviation: A85
|
||||||
|
LZW_DECODE = "/LZWDecode" # abbreviation: LZW
|
||||||
|
FLATE_DECODE = "/FlateDecode" # abbreviation: Fl, PDF 1.2
|
||||||
|
RUN_LENGTH_DECODE = "/RunLengthDecode" # abbreviation: RL
|
||||||
|
CCITT_FAX_DECODE = "/CCITTFaxDecode" # abbreviation: CCF
|
||||||
|
DCT_DECODE = "/DCTDecode" # abbreviation: DCT
|
||||||
|
|
||||||
|
|
||||||
|
class FilterTypeAbbreviations:
|
||||||
|
"""Table 4.44 of the 1.7 Manual (page 353ff)."""
|
||||||
|
|
||||||
|
AHx = "/AHx"
|
||||||
|
A85 = "/A85"
|
||||||
|
LZW = "/LZW"
|
||||||
|
FL = "/Fl" # FlateDecode
|
||||||
|
RL = "/RL"
|
||||||
|
CCF = "/CCF"
|
||||||
|
DCT = "/DCT"
|
||||||
|
|
||||||
|
|
||||||
|
class LzwFilterParameters:
|
||||||
|
"""Table 4.4."""
|
||||||
|
|
||||||
|
PREDICTOR = "/Predictor" # integer
|
||||||
|
COLUMNS = "/Columns" # integer
|
||||||
|
COLORS = "/Colors" # integer
|
||||||
|
BITS_PER_COMPONENT = "/BitsPerComponent" # integer
|
||||||
|
EARLY_CHANGE = "/EarlyChange" # integer
|
||||||
|
|
||||||
|
|
||||||
|
class CcittFaxDecodeParameters:
|
||||||
|
"""Table 4.5."""
|
||||||
|
|
||||||
|
K = "/K" # integer
|
||||||
|
END_OF_LINE = "/EndOfLine" # boolean
|
||||||
|
ENCODED_BYTE_ALIGN = "/EncodedByteAlign" # boolean
|
||||||
|
COLUMNS = "/Columns" # integer
|
||||||
|
ROWS = "/Rows" # integer
|
||||||
|
END_OF_BLOCK = "/EndOfBlock" # boolean
|
||||||
|
BLACK_IS_1 = "/BlackIs1" # boolean
|
||||||
|
DAMAGED_ROWS_BEFORE_ERROR = "/DamagedRowsBeforeError" # integer
|
||||||
|
|
||||||
|
|
||||||
|
class ImageAttributes:
|
||||||
|
"""Table 6.20."""
|
||||||
|
|
||||||
|
TYPE = "/Type" # name, required; must be /XObject
|
||||||
|
SUBTYPE = "/Subtype" # name, required; must be /Image
|
||||||
|
NAME = "/Name" # name, required
|
||||||
|
WIDTH = "/Width" # integer, required
|
||||||
|
HEIGHT = "/Height" # integer, required
|
||||||
|
BITS_PER_COMPONENT = "/BitsPerComponent" # integer, required
|
||||||
|
COLOR_SPACE = "/ColorSpace" # name, required
|
||||||
|
DECODE = "/Decode" # array, optional
|
||||||
|
INTERPOLATE = "/Interpolate" # boolean, optional
|
||||||
|
IMAGE_MASK = "/ImageMask" # boolean, optional
|
||||||
|
|
||||||
|
|
||||||
|
class ColorSpaces:
|
||||||
|
DEVICE_RGB = "/DeviceRGB"
|
||||||
|
DEVICE_CMYK = "/DeviceCMYK"
|
||||||
|
DEVICE_GRAY = "/DeviceGray"
|
||||||
|
|
||||||
|
|
||||||
|
class TypArguments:
|
||||||
|
"""Table 8.2 of the PDF 1.7 reference."""
|
||||||
|
|
||||||
|
LEFT = "/Left"
|
||||||
|
RIGHT = "/Right"
|
||||||
|
BOTTOM = "/Bottom"
|
||||||
|
TOP = "/Top"
|
||||||
|
|
||||||
|
|
||||||
|
class TypFitArguments:
|
||||||
|
"""Table 8.2 of the PDF 1.7 reference."""
|
||||||
|
|
||||||
|
FIT = "/Fit"
|
||||||
|
FIT_V = "/FitV"
|
||||||
|
FIT_BV = "/FitBV"
|
||||||
|
FIT_B = "/FitB"
|
||||||
|
FIT_H = "/FitH"
|
||||||
|
FIT_BH = "/FitBH"
|
||||||
|
FIT_R = "/FitR"
|
||||||
|
XYZ = "/XYZ"
|
||||||
|
|
||||||
|
|
||||||
|
class GoToActionArguments:
|
||||||
|
S = "/S" # name, required: type of action
|
||||||
|
D = "/D" # name / byte string /array, required: Destination to jump to
|
||||||
|
|
||||||
|
|
||||||
|
class AnnotationDictionaryAttributes:
|
||||||
|
"""TABLE 8.15 Entries common to all annotation dictionaries"""
|
||||||
|
|
||||||
|
Type = "/Type"
|
||||||
|
Subtype = "/Subtype"
|
||||||
|
Rect = "/Rect"
|
||||||
|
Contents = "/Contents"
|
||||||
|
P = "/P"
|
||||||
|
NM = "/NM"
|
||||||
|
M = "/M"
|
||||||
|
F = "/F"
|
||||||
|
AP = "/AP"
|
||||||
|
AS = "/AS"
|
||||||
|
Border = "/Border"
|
||||||
|
C = "/C"
|
||||||
|
StructParent = "/StructParent"
|
||||||
|
OC = "/OC"
|
||||||
|
|
||||||
|
|
||||||
|
class InteractiveFormDictEntries:
|
||||||
|
Fields = "/Fields"
|
||||||
|
NeedAppearances = "/NeedAppearances"
|
||||||
|
SigFlags = "/SigFlags"
|
||||||
|
CO = "/CO"
|
||||||
|
DR = "/DR"
|
||||||
|
DA = "/DA"
|
||||||
|
Q = "/Q"
|
||||||
|
XFA = "/XFA"
|
||||||
|
|
||||||
|
|
||||||
|
class FieldDictionaryAttributes:
|
||||||
|
"""TABLE 8.69 Entries common to all field dictionaries (PDF 1.7 reference)."""
|
||||||
|
|
||||||
|
FT = "/FT" # name, required for terminal fields
|
||||||
|
Parent = "/Parent" # dictionary, required for children
|
||||||
|
Kids = "/Kids" # array, sometimes required
|
||||||
|
T = "/T" # text string, optional
|
||||||
|
TU = "/TU" # text string, optional
|
||||||
|
TM = "/TM" # text string, optional
|
||||||
|
Ff = "/Ff" # integer, optional
|
||||||
|
V = "/V" # text string, optional
|
||||||
|
DV = "/DV" # text string, optional
|
||||||
|
AA = "/AA" # dictionary, optional
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def attributes(cls) -> Tuple[str, ...]:
|
||||||
|
return (
|
||||||
|
cls.TM,
|
||||||
|
cls.T,
|
||||||
|
cls.FT,
|
||||||
|
cls.Parent,
|
||||||
|
cls.TU,
|
||||||
|
cls.Ff,
|
||||||
|
cls.V,
|
||||||
|
cls.DV,
|
||||||
|
cls.Kids,
|
||||||
|
cls.AA,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def attributes_dict(cls) -> Dict[str, str]:
|
||||||
|
return {
|
||||||
|
cls.FT: "Field Type",
|
||||||
|
cls.Parent: "Parent",
|
||||||
|
cls.T: "Field Name",
|
||||||
|
cls.TU: "Alternate Field Name",
|
||||||
|
cls.TM: "Mapping Name",
|
||||||
|
cls.Ff: "Field Flags",
|
||||||
|
cls.V: "Value",
|
||||||
|
cls.DV: "Default Value",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CheckboxRadioButtonAttributes:
|
||||||
|
"""TABLE 8.76 Field flags common to all field types"""
|
||||||
|
|
||||||
|
Opt = "/Opt" # Options, Optional
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def attributes(cls) -> Tuple[str, ...]:
|
||||||
|
return (cls.Opt,)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def attributes_dict(cls) -> Dict[str, str]:
|
||||||
|
return {
|
||||||
|
cls.Opt: "Options",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class FieldFlag(IntFlag):
|
||||||
|
"""TABLE 8.70 Field flags common to all field types"""
|
||||||
|
|
||||||
|
READ_ONLY = 1
|
||||||
|
REQUIRED = 2
|
||||||
|
NO_EXPORT = 4
|
||||||
|
|
||||||
|
|
||||||
|
class DocumentInformationAttributes:
|
||||||
|
"""TABLE 10.2 Entries in the document information dictionary."""
|
||||||
|
|
||||||
|
TITLE = "/Title" # text string, optional
|
||||||
|
AUTHOR = "/Author" # text string, optional
|
||||||
|
SUBJECT = "/Subject" # text string, optional
|
||||||
|
KEYWORDS = "/Keywords" # text string, optional
|
||||||
|
CREATOR = "/Creator" # text string, optional
|
||||||
|
PRODUCER = "/Producer" # text string, optional
|
||||||
|
CREATION_DATE = "/CreationDate" # date, optional
|
||||||
|
MOD_DATE = "/ModDate" # date, optional
|
||||||
|
TRAPPED = "/Trapped" # name, optional
|
||||||
|
|
||||||
|
|
||||||
|
class PageLayouts:
|
||||||
|
"""Page 84, PDF 1.4 reference."""
|
||||||
|
|
||||||
|
SINGLE_PAGE = "/SinglePage"
|
||||||
|
ONE_COLUMN = "/OneColumn"
|
||||||
|
TWO_COLUMN_LEFT = "/TwoColumnLeft"
|
||||||
|
TWO_COLUMN_RIGHT = "/TwoColumnRight"
|
||||||
|
|
||||||
|
|
||||||
|
class GraphicsStateParameters:
|
||||||
|
"""Table 4.8 of the 1.7 reference."""
|
||||||
|
|
||||||
|
TYPE = "/Type" # name, optional
|
||||||
|
LW = "/LW" # number, optional
|
||||||
|
# TODO: Many more!
|
||||||
|
FONT = "/Font" # array, optional
|
||||||
|
S_MASK = "/SMask" # dictionary or name, optional
|
||||||
|
|
||||||
|
|
||||||
|
class CatalogDictionary:
|
||||||
|
"""Table 3.25 in the 1.7 reference."""
|
||||||
|
|
||||||
|
TYPE = "/Type" # name, required; must be /Catalog
|
||||||
|
VERSION = "/Version" # name
|
||||||
|
PAGES = "/Pages" # dictionary, required
|
||||||
|
PAGE_LABELS = "/PageLabels" # number tree, optional
|
||||||
|
NAMES = "/Names" # dictionary, optional
|
||||||
|
DESTS = "/Dests" # dictionary, optional
|
||||||
|
VIEWER_PREFERENCES = "/ViewerPreferences" # dictionary, optional
|
||||||
|
PAGE_LAYOUT = "/PageLayout" # name, optional
|
||||||
|
PAGE_MODE = "/PageMode" # name, optional
|
||||||
|
OUTLINES = "/Outlines" # dictionary, optional
|
||||||
|
THREADS = "/Threads" # array, optional
|
||||||
|
OPEN_ACTION = "/OpenAction" # array or dictionary or name, optional
|
||||||
|
AA = "/AA" # dictionary, optional
|
||||||
|
URI = "/URI" # dictionary, optional
|
||||||
|
ACRO_FORM = "/AcroForm" # dictionary, optional
|
||||||
|
METADATA = "/Metadata" # stream, optional
|
||||||
|
STRUCT_TREE_ROOT = "/StructTreeRoot" # dictionary, optional
|
||||||
|
MARK_INFO = "/MarkInfo" # dictionary, optional
|
||||||
|
LANG = "/Lang" # text string, optional
|
||||||
|
SPIDER_INFO = "/SpiderInfo" # dictionary, optional
|
||||||
|
OUTPUT_INTENTS = "/OutputIntents" # array, optional
|
||||||
|
PIECE_INFO = "/PieceInfo" # dictionary, optional
|
||||||
|
OC_PROPERTIES = "/OCProperties" # dictionary, optional
|
||||||
|
PERMS = "/Perms" # dictionary, optional
|
||||||
|
LEGAL = "/Legal" # dictionary, optional
|
||||||
|
REQUIREMENTS = "/Requirements" # array, optional
|
||||||
|
COLLECTION = "/Collection" # dictionary, optional
|
||||||
|
NEEDS_RENDERING = "/NeedsRendering" # boolean, optional
|
||||||
|
|
||||||
|
|
||||||
|
class OutlineFontFlag(IntFlag):
|
||||||
|
"""
|
||||||
|
A class used as an enumerable flag for formatting an outline font
|
||||||
|
"""
|
||||||
|
|
||||||
|
italic = 1
|
||||||
|
bold = 2
|
||||||
|
|
||||||
|
|
||||||
|
PDF_KEYS = (
|
||||||
|
AnnotationDictionaryAttributes,
|
||||||
|
CatalogAttributes,
|
||||||
|
CatalogDictionary,
|
||||||
|
CcittFaxDecodeParameters,
|
||||||
|
CheckboxRadioButtonAttributes,
|
||||||
|
ColorSpaces,
|
||||||
|
Core,
|
||||||
|
DocumentInformationAttributes,
|
||||||
|
EncryptionDictAttributes,
|
||||||
|
FieldDictionaryAttributes,
|
||||||
|
FilterTypeAbbreviations,
|
||||||
|
FilterTypes,
|
||||||
|
GoToActionArguments,
|
||||||
|
GraphicsStateParameters,
|
||||||
|
ImageAttributes,
|
||||||
|
FileSpecificationDictionaryEntries,
|
||||||
|
LzwFilterParameters,
|
||||||
|
PageAttributes,
|
||||||
|
PageLayouts,
|
||||||
|
PagesAttributes,
|
||||||
|
Ressources,
|
||||||
|
StreamAttributes,
|
||||||
|
TrailerKeys,
|
||||||
|
TypArguments,
|
||||||
|
TypFitArguments,
|
||||||
|
)
|
||||||
|
|
@ -0,0 +1,54 @@
|
||||||
|
"""
|
||||||
|
All errors/exceptions PyPDF2 raises and all of the warnings it uses.
|
||||||
|
|
||||||
|
Please note that broken PDF files might cause other Exceptions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class DeprecationError(Exception):
|
||||||
|
"""Raised when a deprecated feature is used."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DependencyError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PyPdfError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PdfReadError(PyPdfError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PageSizeNotDefinedError(PyPdfError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PdfReadWarning(UserWarning):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PdfStreamError(PdfReadError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ParseError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FileNotDecryptedError(PdfReadError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class WrongPasswordError(FileNotDecryptedError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyFileError(PdfReadError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
STREAM_TRUNCATED_PREMATURELY = "Stream has ended unexpectedly"
|
||||||
|
|
@ -0,0 +1,645 @@
|
||||||
|
# Copyright (c) 2006, Mathieu Fenniak
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
# * The name of the author may not be used to endorse or promote products
|
||||||
|
# derived from this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
# POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Implementation of stream filters for PDF.
|
||||||
|
|
||||||
|
See TABLE H.1 Abbreviations for standard filter names
|
||||||
|
"""
|
||||||
|
__author__ = "Mathieu Fenniak"
|
||||||
|
__author_email__ = "biziqe@mathieu.fenniak.net"
|
||||||
|
|
||||||
|
import math
|
||||||
|
import struct
|
||||||
|
import zlib
|
||||||
|
from io import BytesIO
|
||||||
|
from typing import Any, Dict, Optional, Tuple, Union, cast
|
||||||
|
|
||||||
|
from .generic import ArrayObject, DictionaryObject, IndirectObject, NameObject
|
||||||
|
|
||||||
|
try:
|
||||||
|
from typing import Literal # type: ignore[attr-defined]
|
||||||
|
except ImportError:
|
||||||
|
# PEP 586 introduced typing.Literal with Python 3.8
|
||||||
|
# For older Python versions, the backport typing_extensions is necessary:
|
||||||
|
from typing_extensions import Literal # type: ignore[misc]
|
||||||
|
|
||||||
|
from ._utils import b_, deprecate_with_replacement, ord_, paeth_predictor
|
||||||
|
from .constants import CcittFaxDecodeParameters as CCITT
|
||||||
|
from .constants import ColorSpaces
|
||||||
|
from .constants import FilterTypeAbbreviations as FTA
|
||||||
|
from .constants import FilterTypes as FT
|
||||||
|
from .constants import GraphicsStateParameters as G
|
||||||
|
from .constants import ImageAttributes as IA
|
||||||
|
from .constants import LzwFilterParameters as LZW
|
||||||
|
from .constants import StreamAttributes as SA
|
||||||
|
from .errors import PdfReadError, PdfStreamError
|
||||||
|
|
||||||
|
|
||||||
|
def decompress(data: bytes) -> bytes:
|
||||||
|
try:
|
||||||
|
return zlib.decompress(data)
|
||||||
|
except zlib.error:
|
||||||
|
d = zlib.decompressobj(zlib.MAX_WBITS | 32)
|
||||||
|
result_str = b""
|
||||||
|
for b in [data[i : i + 1] for i in range(len(data))]:
|
||||||
|
try:
|
||||||
|
result_str += d.decompress(b)
|
||||||
|
except zlib.error:
|
||||||
|
pass
|
||||||
|
return result_str
|
||||||
|
|
||||||
|
|
||||||
|
class FlateDecode:
|
||||||
|
@staticmethod
|
||||||
|
def decode(
|
||||||
|
data: bytes,
|
||||||
|
decode_parms: Union[None, ArrayObject, DictionaryObject] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> bytes:
|
||||||
|
"""
|
||||||
|
Decode data which is flate-encoded.
|
||||||
|
|
||||||
|
:param data: flate-encoded data.
|
||||||
|
:param decode_parms: a dictionary of values, understanding the
|
||||||
|
"/Predictor":<int> key only
|
||||||
|
:return: the flate-decoded data.
|
||||||
|
|
||||||
|
:raises PdfReadError:
|
||||||
|
"""
|
||||||
|
if "decodeParms" in kwargs: # pragma: no cover
|
||||||
|
deprecate_with_replacement("decodeParms", "parameters", "4.0.0")
|
||||||
|
decode_parms = kwargs["decodeParms"]
|
||||||
|
str_data = decompress(data)
|
||||||
|
predictor = 1
|
||||||
|
|
||||||
|
if decode_parms:
|
||||||
|
try:
|
||||||
|
if isinstance(decode_parms, ArrayObject):
|
||||||
|
for decode_parm in decode_parms:
|
||||||
|
if "/Predictor" in decode_parm:
|
||||||
|
predictor = decode_parm["/Predictor"]
|
||||||
|
else:
|
||||||
|
predictor = decode_parms.get("/Predictor", 1)
|
||||||
|
except (AttributeError, TypeError): # Type Error is NullObject
|
||||||
|
pass # Usually an array with a null object was read
|
||||||
|
# predictor 1 == no predictor
|
||||||
|
if predictor != 1:
|
||||||
|
# The /Columns param. has 1 as the default value; see ISO 32000,
|
||||||
|
# §7.4.4.3 LZWDecode and FlateDecode Parameters, Table 8
|
||||||
|
DEFAULT_BITS_PER_COMPONENT = 8
|
||||||
|
if isinstance(decode_parms, ArrayObject):
|
||||||
|
columns = 1
|
||||||
|
bits_per_component = DEFAULT_BITS_PER_COMPONENT
|
||||||
|
for decode_parm in decode_parms:
|
||||||
|
if "/Columns" in decode_parm:
|
||||||
|
columns = decode_parm["/Columns"]
|
||||||
|
if LZW.BITS_PER_COMPONENT in decode_parm:
|
||||||
|
bits_per_component = decode_parm[LZW.BITS_PER_COMPONENT]
|
||||||
|
else:
|
||||||
|
columns = (
|
||||||
|
1 if decode_parms is None else decode_parms.get(LZW.COLUMNS, 1)
|
||||||
|
)
|
||||||
|
bits_per_component = (
|
||||||
|
decode_parms.get(LZW.BITS_PER_COMPONENT, DEFAULT_BITS_PER_COMPONENT)
|
||||||
|
if decode_parms
|
||||||
|
else DEFAULT_BITS_PER_COMPONENT
|
||||||
|
)
|
||||||
|
|
||||||
|
# PNG predictor can vary by row and so is the lead byte on each row
|
||||||
|
rowlength = (
|
||||||
|
math.ceil(columns * bits_per_component / 8) + 1
|
||||||
|
) # number of bytes
|
||||||
|
|
||||||
|
# PNG prediction:
|
||||||
|
if 10 <= predictor <= 15:
|
||||||
|
str_data = FlateDecode._decode_png_prediction(str_data, columns, rowlength) # type: ignore
|
||||||
|
else:
|
||||||
|
# unsupported predictor
|
||||||
|
raise PdfReadError(f"Unsupported flatedecode predictor {predictor!r}")
|
||||||
|
return str_data
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _decode_png_prediction(data: str, columns: int, rowlength: int) -> bytes:
|
||||||
|
output = BytesIO()
|
||||||
|
# PNG prediction can vary from row to row
|
||||||
|
if len(data) % rowlength != 0:
|
||||||
|
raise PdfReadError("Image data is not rectangular")
|
||||||
|
prev_rowdata = (0,) * rowlength
|
||||||
|
for row in range(len(data) // rowlength):
|
||||||
|
rowdata = [
|
||||||
|
ord_(x) for x in data[(row * rowlength) : ((row + 1) * rowlength)]
|
||||||
|
]
|
||||||
|
filter_byte = rowdata[0]
|
||||||
|
|
||||||
|
if filter_byte == 0:
|
||||||
|
pass
|
||||||
|
elif filter_byte == 1:
|
||||||
|
for i in range(2, rowlength):
|
||||||
|
rowdata[i] = (rowdata[i] + rowdata[i - 1]) % 256
|
||||||
|
elif filter_byte == 2:
|
||||||
|
for i in range(1, rowlength):
|
||||||
|
rowdata[i] = (rowdata[i] + prev_rowdata[i]) % 256
|
||||||
|
elif filter_byte == 3:
|
||||||
|
for i in range(1, rowlength):
|
||||||
|
left = rowdata[i - 1] if i > 1 else 0
|
||||||
|
floor = math.floor(left + prev_rowdata[i]) / 2
|
||||||
|
rowdata[i] = (rowdata[i] + int(floor)) % 256
|
||||||
|
elif filter_byte == 4:
|
||||||
|
for i in range(1, rowlength):
|
||||||
|
left = rowdata[i - 1] if i > 1 else 0
|
||||||
|
up = prev_rowdata[i]
|
||||||
|
up_left = prev_rowdata[i - 1] if i > 1 else 0
|
||||||
|
paeth = paeth_predictor(left, up, up_left)
|
||||||
|
rowdata[i] = (rowdata[i] + paeth) % 256
|
||||||
|
else:
|
||||||
|
# unsupported PNG filter
|
||||||
|
raise PdfReadError(f"Unsupported PNG filter {filter_byte!r}")
|
||||||
|
prev_rowdata = tuple(rowdata)
|
||||||
|
output.write(bytearray(rowdata[1:]))
|
||||||
|
return output.getvalue()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def encode(data: bytes) -> bytes:
|
||||||
|
return zlib.compress(data)
|
||||||
|
|
||||||
|
|
||||||
|
class ASCIIHexDecode:
|
||||||
|
"""
|
||||||
|
The ASCIIHexDecode filter decodes data that has been encoded in ASCII
|
||||||
|
hexadecimal form into a base-7 ASCII format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def decode(
|
||||||
|
data: str,
|
||||||
|
decode_parms: Union[None, ArrayObject, DictionaryObject] = None, # noqa: F841
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
:param data: a str sequence of hexadecimal-encoded values to be
|
||||||
|
converted into a base-7 ASCII string
|
||||||
|
:param decode_parms:
|
||||||
|
:return: a string conversion in base-7 ASCII, where each of its values
|
||||||
|
v is such that 0 <= ord(v) <= 127.
|
||||||
|
|
||||||
|
:raises PdfStreamError:
|
||||||
|
"""
|
||||||
|
if "decodeParms" in kwargs: # pragma: no cover
|
||||||
|
deprecate_with_replacement("decodeParms", "parameters", "4.0.0")
|
||||||
|
decode_parms = kwargs["decodeParms"] # noqa: F841
|
||||||
|
retval = ""
|
||||||
|
hex_pair = ""
|
||||||
|
index = 0
|
||||||
|
while True:
|
||||||
|
if index >= len(data):
|
||||||
|
raise PdfStreamError("Unexpected EOD in ASCIIHexDecode")
|
||||||
|
char = data[index]
|
||||||
|
if char == ">":
|
||||||
|
break
|
||||||
|
elif char.isspace():
|
||||||
|
index += 1
|
||||||
|
continue
|
||||||
|
hex_pair += char
|
||||||
|
if len(hex_pair) == 2:
|
||||||
|
retval += chr(int(hex_pair, base=16))
|
||||||
|
hex_pair = ""
|
||||||
|
index += 1
|
||||||
|
assert hex_pair == ""
|
||||||
|
return retval
|
||||||
|
|
||||||
|
|
||||||
|
class LZWDecode:
|
||||||
|
"""Taken from:
|
||||||
|
http://www.java2s.com/Open-Source/Java-Document/PDF/PDF-Renderer/com/sun/pdfview/decode/LZWDecode.java.htm
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Decoder:
|
||||||
|
def __init__(self, data: bytes) -> None:
|
||||||
|
self.STOP = 257
|
||||||
|
self.CLEARDICT = 256
|
||||||
|
self.data = data
|
||||||
|
self.bytepos = 0
|
||||||
|
self.bitpos = 0
|
||||||
|
self.dict = [""] * 4096
|
||||||
|
for i in range(256):
|
||||||
|
self.dict[i] = chr(i)
|
||||||
|
self.reset_dict()
|
||||||
|
|
||||||
|
def reset_dict(self) -> None:
|
||||||
|
self.dictlen = 258
|
||||||
|
self.bitspercode = 9
|
||||||
|
|
||||||
|
def next_code(self) -> int:
|
||||||
|
fillbits = self.bitspercode
|
||||||
|
value = 0
|
||||||
|
while fillbits > 0:
|
||||||
|
if self.bytepos >= len(self.data):
|
||||||
|
return -1
|
||||||
|
nextbits = ord_(self.data[self.bytepos])
|
||||||
|
bitsfromhere = 8 - self.bitpos
|
||||||
|
bitsfromhere = min(bitsfromhere, fillbits)
|
||||||
|
value |= (
|
||||||
|
(nextbits >> (8 - self.bitpos - bitsfromhere))
|
||||||
|
& (0xFF >> (8 - bitsfromhere))
|
||||||
|
) << (fillbits - bitsfromhere)
|
||||||
|
fillbits -= bitsfromhere
|
||||||
|
self.bitpos += bitsfromhere
|
||||||
|
if self.bitpos >= 8:
|
||||||
|
self.bitpos = 0
|
||||||
|
self.bytepos = self.bytepos + 1
|
||||||
|
return value
|
||||||
|
|
||||||
|
def decode(self) -> str:
|
||||||
|
"""
|
||||||
|
TIFF 6.0 specification explains in sufficient details the steps to
|
||||||
|
implement the LZW encode() and decode() algorithms.
|
||||||
|
|
||||||
|
algorithm derived from:
|
||||||
|
http://www.rasip.fer.hr/research/compress/algorithms/fund/lz/lzw.html
|
||||||
|
and the PDFReference
|
||||||
|
|
||||||
|
:raises PdfReadError: If the stop code is missing
|
||||||
|
"""
|
||||||
|
cW = self.CLEARDICT
|
||||||
|
baos = ""
|
||||||
|
while True:
|
||||||
|
pW = cW
|
||||||
|
cW = self.next_code()
|
||||||
|
if cW == -1:
|
||||||
|
raise PdfReadError("Missed the stop code in LZWDecode!")
|
||||||
|
if cW == self.STOP:
|
||||||
|
break
|
||||||
|
elif cW == self.CLEARDICT:
|
||||||
|
self.reset_dict()
|
||||||
|
elif pW == self.CLEARDICT:
|
||||||
|
baos += self.dict[cW]
|
||||||
|
else:
|
||||||
|
if cW < self.dictlen:
|
||||||
|
baos += self.dict[cW]
|
||||||
|
p = self.dict[pW] + self.dict[cW][0]
|
||||||
|
self.dict[self.dictlen] = p
|
||||||
|
self.dictlen += 1
|
||||||
|
else:
|
||||||
|
p = self.dict[pW] + self.dict[pW][0]
|
||||||
|
baos += p
|
||||||
|
self.dict[self.dictlen] = p
|
||||||
|
self.dictlen += 1
|
||||||
|
if (
|
||||||
|
self.dictlen >= (1 << self.bitspercode) - 1
|
||||||
|
and self.bitspercode < 12
|
||||||
|
):
|
||||||
|
self.bitspercode += 1
|
||||||
|
return baos
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def decode(
|
||||||
|
data: bytes,
|
||||||
|
decode_parms: Union[None, ArrayObject, DictionaryObject] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
:param data: ``bytes`` or ``str`` text to decode.
|
||||||
|
:param decode_parms: a dictionary of parameter values.
|
||||||
|
:return: decoded data.
|
||||||
|
"""
|
||||||
|
if "decodeParms" in kwargs: # pragma: no cover
|
||||||
|
deprecate_with_replacement("decodeParms", "parameters", "4.0.0")
|
||||||
|
decode_parms = kwargs["decodeParms"] # noqa: F841
|
||||||
|
return LZWDecode.Decoder(data).decode()
|
||||||
|
|
||||||
|
|
||||||
|
class ASCII85Decode:
|
||||||
|
"""Decodes string ASCII85-encoded data into a byte format."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def decode(
|
||||||
|
data: Union[str, bytes],
|
||||||
|
decode_parms: Union[None, ArrayObject, DictionaryObject] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> bytes:
|
||||||
|
if "decodeParms" in kwargs: # pragma: no cover
|
||||||
|
deprecate_with_replacement("decodeParms", "parameters", "4.0.0")
|
||||||
|
decode_parms = kwargs["decodeParms"] # noqa: F841
|
||||||
|
if isinstance(data, str):
|
||||||
|
data = data.encode("ascii")
|
||||||
|
group_index = b = 0
|
||||||
|
out = bytearray()
|
||||||
|
for char in data:
|
||||||
|
if ord("!") <= char and char <= ord("u"):
|
||||||
|
group_index += 1
|
||||||
|
b = b * 85 + (char - 33)
|
||||||
|
if group_index == 5:
|
||||||
|
out += struct.pack(b">L", b)
|
||||||
|
group_index = b = 0
|
||||||
|
elif char == ord("z"):
|
||||||
|
assert group_index == 0
|
||||||
|
out += b"\0\0\0\0"
|
||||||
|
elif char == ord("~"):
|
||||||
|
if group_index:
|
||||||
|
for _ in range(5 - group_index):
|
||||||
|
b = b * 85 + 84
|
||||||
|
out += struct.pack(b">L", b)[: group_index - 1]
|
||||||
|
break
|
||||||
|
return bytes(out)
|
||||||
|
|
||||||
|
|
||||||
|
class DCTDecode:
|
||||||
|
@staticmethod
|
||||||
|
def decode(
|
||||||
|
data: bytes,
|
||||||
|
decode_parms: Union[None, ArrayObject, DictionaryObject] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> bytes:
|
||||||
|
if "decodeParms" in kwargs: # pragma: no cover
|
||||||
|
deprecate_with_replacement("decodeParms", "parameters", "4.0.0")
|
||||||
|
decode_parms = kwargs["decodeParms"] # noqa: F841
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class JPXDecode:
|
||||||
|
@staticmethod
|
||||||
|
def decode(
|
||||||
|
data: bytes,
|
||||||
|
decode_parms: Union[None, ArrayObject, DictionaryObject] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> bytes:
|
||||||
|
if "decodeParms" in kwargs: # pragma: no cover
|
||||||
|
deprecate_with_replacement("decodeParms", "parameters", "4.0.0")
|
||||||
|
decode_parms = kwargs["decodeParms"] # noqa: F841
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class CCITParameters:
|
||||||
|
"""TABLE 3.9 Optional parameters for the CCITTFaxDecode filter."""
|
||||||
|
|
||||||
|
def __init__(self, K: int = 0, columns: int = 0, rows: int = 0) -> None:
|
||||||
|
self.K = K
|
||||||
|
self.EndOfBlock = None
|
||||||
|
self.EndOfLine = None
|
||||||
|
self.EncodedByteAlign = None
|
||||||
|
self.columns = columns # width
|
||||||
|
self.rows = rows # height
|
||||||
|
self.DamagedRowsBeforeError = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def group(self) -> int:
|
||||||
|
if self.K < 0:
|
||||||
|
CCITTgroup = 4
|
||||||
|
else:
|
||||||
|
# k == 0: Pure one-dimensional encoding (Group 3, 1-D)
|
||||||
|
# k > 0: Mixed one- and two-dimensional encoding (Group 3, 2-D)
|
||||||
|
CCITTgroup = 3
|
||||||
|
return CCITTgroup
|
||||||
|
|
||||||
|
|
||||||
|
class CCITTFaxDecode:
|
||||||
|
"""
|
||||||
|
See 3.3.5 CCITTFaxDecode Filter (PDF 1.7 Standard).
|
||||||
|
|
||||||
|
Either Group 3 or Group 4 CCITT facsimile (fax) encoding.
|
||||||
|
CCITT encoding is bit-oriented, not byte-oriented.
|
||||||
|
|
||||||
|
See: TABLE 3.9 Optional parameters for the CCITTFaxDecode filter
|
||||||
|
"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_parameters(
|
||||||
|
parameters: Union[None, ArrayObject, DictionaryObject], rows: int
|
||||||
|
) -> CCITParameters:
|
||||||
|
# TABLE 3.9 Optional parameters for the CCITTFaxDecode filter
|
||||||
|
k = 0
|
||||||
|
columns = 1728
|
||||||
|
if parameters:
|
||||||
|
if isinstance(parameters, ArrayObject):
|
||||||
|
for decode_parm in parameters:
|
||||||
|
if CCITT.COLUMNS in decode_parm:
|
||||||
|
columns = decode_parm[CCITT.COLUMNS]
|
||||||
|
if CCITT.K in decode_parm:
|
||||||
|
k = decode_parm[CCITT.K]
|
||||||
|
else:
|
||||||
|
if CCITT.COLUMNS in parameters:
|
||||||
|
columns = parameters[CCITT.COLUMNS] # type: ignore
|
||||||
|
if CCITT.K in parameters:
|
||||||
|
k = parameters[CCITT.K] # type: ignore
|
||||||
|
|
||||||
|
return CCITParameters(k, columns, rows)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def decode(
|
||||||
|
data: bytes,
|
||||||
|
decode_parms: Union[None, ArrayObject, DictionaryObject] = None,
|
||||||
|
height: int = 0,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> bytes:
|
||||||
|
if "decodeParms" in kwargs: # pragma: no cover
|
||||||
|
deprecate_with_replacement("decodeParms", "parameters", "4.0.0")
|
||||||
|
decode_parms = kwargs["decodeParms"]
|
||||||
|
parms = CCITTFaxDecode._get_parameters(decode_parms, height)
|
||||||
|
|
||||||
|
img_size = len(data)
|
||||||
|
tiff_header_struct = "<2shlh" + "hhll" * 8 + "h"
|
||||||
|
tiff_header = struct.pack(
|
||||||
|
tiff_header_struct,
|
||||||
|
b"II", # Byte order indication: Little endian
|
||||||
|
42, # Version number (always 42)
|
||||||
|
8, # Offset to first IFD
|
||||||
|
8, # Number of tags in IFD
|
||||||
|
256,
|
||||||
|
4,
|
||||||
|
1,
|
||||||
|
parms.columns, # ImageWidth, LONG, 1, width
|
||||||
|
257,
|
||||||
|
4,
|
||||||
|
1,
|
||||||
|
parms.rows, # ImageLength, LONG, 1, length
|
||||||
|
258,
|
||||||
|
3,
|
||||||
|
1,
|
||||||
|
1, # BitsPerSample, SHORT, 1, 1
|
||||||
|
259,
|
||||||
|
3,
|
||||||
|
1,
|
||||||
|
parms.group, # Compression, SHORT, 1, 4 = CCITT Group 4 fax encoding
|
||||||
|
262,
|
||||||
|
3,
|
||||||
|
1,
|
||||||
|
0, # Thresholding, SHORT, 1, 0 = WhiteIsZero
|
||||||
|
273,
|
||||||
|
4,
|
||||||
|
1,
|
||||||
|
struct.calcsize(
|
||||||
|
tiff_header_struct
|
||||||
|
), # StripOffsets, LONG, 1, length of header
|
||||||
|
278,
|
||||||
|
4,
|
||||||
|
1,
|
||||||
|
parms.rows, # RowsPerStrip, LONG, 1, length
|
||||||
|
279,
|
||||||
|
4,
|
||||||
|
1,
|
||||||
|
img_size, # StripByteCounts, LONG, 1, size of image
|
||||||
|
0, # last IFD
|
||||||
|
)
|
||||||
|
|
||||||
|
return tiff_header + data
|
||||||
|
|
||||||
|
|
||||||
|
def decode_stream_data(stream: Any) -> Union[str, bytes]: # utils.StreamObject
|
||||||
|
filters = stream.get(SA.FILTER, ())
|
||||||
|
if isinstance(filters, IndirectObject):
|
||||||
|
filters = cast(ArrayObject, filters.get_object())
|
||||||
|
if len(filters) and not isinstance(filters[0], NameObject):
|
||||||
|
# we have a single filter instance
|
||||||
|
filters = (filters,)
|
||||||
|
data: bytes = stream._data
|
||||||
|
# If there is not data to decode we should not try to decode the data.
|
||||||
|
if data:
|
||||||
|
for filter_type in filters:
|
||||||
|
if filter_type in (FT.FLATE_DECODE, FTA.FL):
|
||||||
|
data = FlateDecode.decode(data, stream.get(SA.DECODE_PARMS))
|
||||||
|
elif filter_type in (FT.ASCII_HEX_DECODE, FTA.AHx):
|
||||||
|
data = ASCIIHexDecode.decode(data) # type: ignore
|
||||||
|
elif filter_type in (FT.LZW_DECODE, FTA.LZW):
|
||||||
|
data = LZWDecode.decode(data, stream.get(SA.DECODE_PARMS)) # type: ignore
|
||||||
|
elif filter_type in (FT.ASCII_85_DECODE, FTA.A85):
|
||||||
|
data = ASCII85Decode.decode(data)
|
||||||
|
elif filter_type == FT.DCT_DECODE:
|
||||||
|
data = DCTDecode.decode(data)
|
||||||
|
elif filter_type == "/JPXDecode":
|
||||||
|
data = JPXDecode.decode(data)
|
||||||
|
elif filter_type == FT.CCITT_FAX_DECODE:
|
||||||
|
height = stream.get(IA.HEIGHT, ())
|
||||||
|
data = CCITTFaxDecode.decode(data, stream.get(SA.DECODE_PARMS), height)
|
||||||
|
elif filter_type == "/Crypt":
|
||||||
|
decode_parms = stream.get(SA.DECODE_PARMS, {})
|
||||||
|
if "/Name" not in decode_parms and "/Type" not in decode_parms:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
"/Crypt filter with /Name or /Type not supported yet"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Unsupported filter
|
||||||
|
raise NotImplementedError(f"unsupported filter {filter_type}")
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def decodeStreamData(stream: Any) -> Union[str, bytes]: # pragma: no cover
|
||||||
|
deprecate_with_replacement("decodeStreamData", "decode_stream_data", "4.0.0")
|
||||||
|
return decode_stream_data(stream)
|
||||||
|
|
||||||
|
|
||||||
|
def _xobj_to_image(x_object_obj: Dict[str, Any]) -> Tuple[Optional[str], bytes]:
|
||||||
|
"""
|
||||||
|
Users need to have the pillow package installed.
|
||||||
|
|
||||||
|
It's unclear if PyPDF2 will keep this function here, hence it's private.
|
||||||
|
It might get removed at any point.
|
||||||
|
|
||||||
|
:return: Tuple[file extension, bytes]
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from PIL import Image
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"pillow is required to do image extraction. "
|
||||||
|
"It can be installed via 'pip install PyPDF2[image]'"
|
||||||
|
)
|
||||||
|
|
||||||
|
size = (x_object_obj[IA.WIDTH], x_object_obj[IA.HEIGHT])
|
||||||
|
data = x_object_obj.get_data() # type: ignore
|
||||||
|
if (
|
||||||
|
IA.COLOR_SPACE in x_object_obj
|
||||||
|
and x_object_obj[IA.COLOR_SPACE] == ColorSpaces.DEVICE_RGB
|
||||||
|
):
|
||||||
|
# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
|
||||||
|
mode: Literal["RGB", "P"] = "RGB"
|
||||||
|
else:
|
||||||
|
mode = "P"
|
||||||
|
extension = None
|
||||||
|
if SA.FILTER in x_object_obj:
|
||||||
|
if x_object_obj[SA.FILTER] == FT.FLATE_DECODE:
|
||||||
|
extension = ".png" # mime_type = "image/png"
|
||||||
|
color_space = None
|
||||||
|
if "/ColorSpace" in x_object_obj:
|
||||||
|
color_space = x_object_obj["/ColorSpace"].get_object()
|
||||||
|
if (
|
||||||
|
isinstance(color_space, ArrayObject)
|
||||||
|
and color_space[0] == "/Indexed"
|
||||||
|
):
|
||||||
|
color_space, base, hival, lookup = (
|
||||||
|
value.get_object() for value in color_space
|
||||||
|
)
|
||||||
|
|
||||||
|
img = Image.frombytes(mode, size, data)
|
||||||
|
if color_space == "/Indexed":
|
||||||
|
from .generic import ByteStringObject
|
||||||
|
|
||||||
|
if isinstance(lookup, ByteStringObject):
|
||||||
|
if base == ColorSpaces.DEVICE_GRAY and len(lookup) == hival + 1:
|
||||||
|
lookup = b"".join(
|
||||||
|
[lookup[i : i + 1] * 3 for i in range(len(lookup))]
|
||||||
|
)
|
||||||
|
img.putpalette(lookup)
|
||||||
|
else:
|
||||||
|
img.putpalette(lookup.get_data())
|
||||||
|
img = img.convert("L" if base == ColorSpaces.DEVICE_GRAY else "RGB")
|
||||||
|
if G.S_MASK in x_object_obj: # add alpha channel
|
||||||
|
alpha = Image.frombytes("L", size, x_object_obj[G.S_MASK].get_data())
|
||||||
|
img.putalpha(alpha)
|
||||||
|
img_byte_arr = BytesIO()
|
||||||
|
img.save(img_byte_arr, format="PNG")
|
||||||
|
data = img_byte_arr.getvalue()
|
||||||
|
elif x_object_obj[SA.FILTER] in (
|
||||||
|
[FT.LZW_DECODE],
|
||||||
|
[FT.ASCII_85_DECODE],
|
||||||
|
[FT.CCITT_FAX_DECODE],
|
||||||
|
):
|
||||||
|
# I'm not sure if the following logic is correct.
|
||||||
|
# There might not be any relationship between the filters and the
|
||||||
|
# extension
|
||||||
|
if x_object_obj[SA.FILTER] in [[FT.LZW_DECODE], [FT.CCITT_FAX_DECODE]]:
|
||||||
|
extension = ".tiff" # mime_type = "image/tiff"
|
||||||
|
else:
|
||||||
|
extension = ".png" # mime_type = "image/png"
|
||||||
|
data = b_(data)
|
||||||
|
elif x_object_obj[SA.FILTER] == FT.DCT_DECODE:
|
||||||
|
extension = ".jpg" # mime_type = "image/jpeg"
|
||||||
|
elif x_object_obj[SA.FILTER] == "/JPXDecode":
|
||||||
|
extension = ".jp2" # mime_type = "image/x-jp2"
|
||||||
|
elif x_object_obj[SA.FILTER] == FT.CCITT_FAX_DECODE:
|
||||||
|
extension = ".tiff" # mime_type = "image/tiff"
|
||||||
|
else:
|
||||||
|
extension = ".png" # mime_type = "image/png"
|
||||||
|
img = Image.frombytes(mode, size, data)
|
||||||
|
img_byte_arr = BytesIO()
|
||||||
|
img.save(img_byte_arr, format="PNG")
|
||||||
|
data = img_byte_arr.getvalue()
|
||||||
|
|
||||||
|
return extension, data
|
||||||
|
|
@ -0,0 +1,144 @@
|
||||||
|
# Copyright (c) 2006, Mathieu Fenniak
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
# * The name of the author may not be used to endorse or promote products
|
||||||
|
# derived from this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
# POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
"""Implementation of generic PDF objects (dictionary, number, string, ...)."""
|
||||||
|
__author__ = "Mathieu Fenniak"
|
||||||
|
__author_email__ = "biziqe@mathieu.fenniak.net"
|
||||||
|
|
||||||
|
from typing import Dict, List, Union
|
||||||
|
|
||||||
|
from .._utils import StreamType, deprecate_with_replacement
|
||||||
|
from ..constants import OutlineFontFlag
|
||||||
|
from ._annotations import AnnotationBuilder
|
||||||
|
from ._base import (
|
||||||
|
BooleanObject,
|
||||||
|
ByteStringObject,
|
||||||
|
FloatObject,
|
||||||
|
IndirectObject,
|
||||||
|
NameObject,
|
||||||
|
NullObject,
|
||||||
|
NumberObject,
|
||||||
|
PdfObject,
|
||||||
|
TextStringObject,
|
||||||
|
encode_pdfdocencoding,
|
||||||
|
)
|
||||||
|
from ._data_structures import (
|
||||||
|
ArrayObject,
|
||||||
|
ContentStream,
|
||||||
|
DecodedStreamObject,
|
||||||
|
Destination,
|
||||||
|
DictionaryObject,
|
||||||
|
EncodedStreamObject,
|
||||||
|
Field,
|
||||||
|
StreamObject,
|
||||||
|
TreeObject,
|
||||||
|
read_object,
|
||||||
|
)
|
||||||
|
from ._fit import Fit
|
||||||
|
from ._outline import Bookmark, OutlineItem
|
||||||
|
from ._rectangle import RectangleObject
|
||||||
|
from ._utils import (
|
||||||
|
create_string_object,
|
||||||
|
decode_pdfdocencoding,
|
||||||
|
hex_to_rgb,
|
||||||
|
read_hex_string_from_stream,
|
||||||
|
read_string_from_stream,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def readHexStringFromStream(
|
||||||
|
stream: StreamType,
|
||||||
|
) -> Union["TextStringObject", "ByteStringObject"]: # pragma: no cover
|
||||||
|
deprecate_with_replacement(
|
||||||
|
"readHexStringFromStream", "read_hex_string_from_stream", "4.0.0"
|
||||||
|
)
|
||||||
|
return read_hex_string_from_stream(stream)
|
||||||
|
|
||||||
|
|
||||||
|
def readStringFromStream(
|
||||||
|
stream: StreamType,
|
||||||
|
forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
|
||||||
|
) -> Union["TextStringObject", "ByteStringObject"]: # pragma: no cover
|
||||||
|
deprecate_with_replacement(
|
||||||
|
"readStringFromStream", "read_string_from_stream", "4.0.0"
|
||||||
|
)
|
||||||
|
return read_string_from_stream(stream, forced_encoding)
|
||||||
|
|
||||||
|
|
||||||
|
def createStringObject(
|
||||||
|
string: Union[str, bytes],
|
||||||
|
forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
|
||||||
|
) -> Union[TextStringObject, ByteStringObject]: # pragma: no cover
|
||||||
|
deprecate_with_replacement("createStringObject", "create_string_object", "4.0.0")
|
||||||
|
return create_string_object(string, forced_encoding)
|
||||||
|
|
||||||
|
|
||||||
|
PAGE_FIT = Fit.fit()
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
# Base types
|
||||||
|
"BooleanObject",
|
||||||
|
"FloatObject",
|
||||||
|
"NumberObject",
|
||||||
|
"NameObject",
|
||||||
|
"IndirectObject",
|
||||||
|
"NullObject",
|
||||||
|
"PdfObject",
|
||||||
|
"TextStringObject",
|
||||||
|
"ByteStringObject",
|
||||||
|
# Annotations
|
||||||
|
"AnnotationBuilder",
|
||||||
|
# Fit
|
||||||
|
"Fit",
|
||||||
|
"PAGE_FIT",
|
||||||
|
# Data structures
|
||||||
|
"ArrayObject",
|
||||||
|
"DictionaryObject",
|
||||||
|
"TreeObject",
|
||||||
|
"StreamObject",
|
||||||
|
"DecodedStreamObject",
|
||||||
|
"EncodedStreamObject",
|
||||||
|
"ContentStream",
|
||||||
|
"RectangleObject",
|
||||||
|
"Field",
|
||||||
|
"Destination",
|
||||||
|
# --- More specific stuff
|
||||||
|
# Outline
|
||||||
|
"OutlineItem",
|
||||||
|
"OutlineFontFlag",
|
||||||
|
"Bookmark",
|
||||||
|
# Data structures core functions
|
||||||
|
"read_object",
|
||||||
|
# Utility functions
|
||||||
|
"create_string_object",
|
||||||
|
"encode_pdfdocencoding",
|
||||||
|
"decode_pdfdocencoding",
|
||||||
|
"hex_to_rgb",
|
||||||
|
"read_hex_string_from_stream",
|
||||||
|
"read_string_from_stream",
|
||||||
|
]
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,275 @@
|
||||||
|
from typing import Optional, Tuple, Union
|
||||||
|
|
||||||
|
from ._base import (
|
||||||
|
BooleanObject,
|
||||||
|
FloatObject,
|
||||||
|
NameObject,
|
||||||
|
NumberObject,
|
||||||
|
TextStringObject,
|
||||||
|
)
|
||||||
|
from ._data_structures import ArrayObject, DictionaryObject
|
||||||
|
from ._fit import DEFAULT_FIT, Fit
|
||||||
|
from ._rectangle import RectangleObject
|
||||||
|
from ._utils import hex_to_rgb
|
||||||
|
|
||||||
|
|
||||||
|
class AnnotationBuilder:
|
||||||
|
"""
|
||||||
|
The AnnotationBuilder creates dictionaries representing PDF annotations.
|
||||||
|
|
||||||
|
Those dictionaries can be modified before they are added to a PdfWriter
|
||||||
|
instance via `writer.add_annotation`.
|
||||||
|
|
||||||
|
See `adding PDF annotations <../user/adding-pdf-annotations.html>`_ for
|
||||||
|
it's usage combined with PdfWriter.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from ..types import FitType, ZoomArgType
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def text(
|
||||||
|
rect: Union[RectangleObject, Tuple[float, float, float, float]],
|
||||||
|
text: str,
|
||||||
|
open: bool = False,
|
||||||
|
flags: int = 0,
|
||||||
|
) -> DictionaryObject:
|
||||||
|
"""
|
||||||
|
Add text annotation.
|
||||||
|
|
||||||
|
:param Tuple[int, int, int, int] rect:
|
||||||
|
or array of four integers specifying the clickable rectangular area
|
||||||
|
``[xLL, yLL, xUR, yUR]``
|
||||||
|
:param bool open:
|
||||||
|
:param int flags:
|
||||||
|
"""
|
||||||
|
# TABLE 8.23 Additional entries specific to a text annotation
|
||||||
|
text_obj = DictionaryObject(
|
||||||
|
{
|
||||||
|
NameObject("/Type"): NameObject("/Annot"),
|
||||||
|
NameObject("/Subtype"): NameObject("/Text"),
|
||||||
|
NameObject("/Rect"): RectangleObject(rect),
|
||||||
|
NameObject("/Contents"): TextStringObject(text),
|
||||||
|
NameObject("/Open"): BooleanObject(open),
|
||||||
|
NameObject("/Flags"): NumberObject(flags),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return text_obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def free_text(
|
||||||
|
text: str,
|
||||||
|
rect: Union[RectangleObject, Tuple[float, float, float, float]],
|
||||||
|
font: str = "Helvetica",
|
||||||
|
bold: bool = False,
|
||||||
|
italic: bool = False,
|
||||||
|
font_size: str = "14pt",
|
||||||
|
font_color: str = "000000",
|
||||||
|
border_color: str = "000000",
|
||||||
|
background_color: str = "ffffff",
|
||||||
|
) -> DictionaryObject:
|
||||||
|
"""
|
||||||
|
Add text in a rectangle to a page.
|
||||||
|
|
||||||
|
:param str text: Text to be added
|
||||||
|
:param RectangleObject rect: or array of four integers
|
||||||
|
specifying the clickable rectangular area ``[xLL, yLL, xUR, yUR]``
|
||||||
|
:param str font: Name of the Font, e.g. 'Helvetica'
|
||||||
|
:param bool bold: Print the text in bold
|
||||||
|
:param bool italic: Print the text in italic
|
||||||
|
:param str font_size: How big the text will be, e.g. '14pt'
|
||||||
|
:param str font_color: Hex-string for the color
|
||||||
|
:param str border_color: Hex-string for the border color
|
||||||
|
:param str background_color: Hex-string for the background of the annotation
|
||||||
|
"""
|
||||||
|
font_str = "font: "
|
||||||
|
if bold is True:
|
||||||
|
font_str = font_str + "bold "
|
||||||
|
if italic is True:
|
||||||
|
font_str = font_str + "italic "
|
||||||
|
font_str = font_str + font + " " + font_size
|
||||||
|
font_str = font_str + ";text-align:left;color:#" + font_color
|
||||||
|
|
||||||
|
bg_color_str = ""
|
||||||
|
for st in hex_to_rgb(border_color):
|
||||||
|
bg_color_str = bg_color_str + str(st) + " "
|
||||||
|
bg_color_str = bg_color_str + "rg"
|
||||||
|
|
||||||
|
free_text = DictionaryObject()
|
||||||
|
free_text.update(
|
||||||
|
{
|
||||||
|
NameObject("/Type"): NameObject("/Annot"),
|
||||||
|
NameObject("/Subtype"): NameObject("/FreeText"),
|
||||||
|
NameObject("/Rect"): RectangleObject(rect),
|
||||||
|
NameObject("/Contents"): TextStringObject(text),
|
||||||
|
# font size color
|
||||||
|
NameObject("/DS"): TextStringObject(font_str),
|
||||||
|
# border color
|
||||||
|
NameObject("/DA"): TextStringObject(bg_color_str),
|
||||||
|
# background color
|
||||||
|
NameObject("/C"): ArrayObject(
|
||||||
|
[FloatObject(n) for n in hex_to_rgb(background_color)]
|
||||||
|
),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return free_text
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def line(
|
||||||
|
p1: Tuple[float, float],
|
||||||
|
p2: Tuple[float, float],
|
||||||
|
rect: Union[RectangleObject, Tuple[float, float, float, float]],
|
||||||
|
text: str = "",
|
||||||
|
title_bar: str = "",
|
||||||
|
) -> DictionaryObject:
|
||||||
|
"""
|
||||||
|
Draw a line on the PDF.
|
||||||
|
|
||||||
|
:param Tuple[float, float] p1: First point
|
||||||
|
:param Tuple[float, float] p2: Second point
|
||||||
|
:param RectangleObject rect: or array of four
|
||||||
|
integers specifying the clickable rectangular area
|
||||||
|
``[xLL, yLL, xUR, yUR]``
|
||||||
|
:param str text: Text to be displayed as the line annotation
|
||||||
|
:param str title_bar: Text to be displayed in the title bar of the
|
||||||
|
annotation; by convention this is the name of the author
|
||||||
|
"""
|
||||||
|
line_obj = DictionaryObject(
|
||||||
|
{
|
||||||
|
NameObject("/Type"): NameObject("/Annot"),
|
||||||
|
NameObject("/Subtype"): NameObject("/Line"),
|
||||||
|
NameObject("/Rect"): RectangleObject(rect),
|
||||||
|
NameObject("/T"): TextStringObject(title_bar),
|
||||||
|
NameObject("/L"): ArrayObject(
|
||||||
|
[
|
||||||
|
FloatObject(p1[0]),
|
||||||
|
FloatObject(p1[1]),
|
||||||
|
FloatObject(p2[0]),
|
||||||
|
FloatObject(p2[1]),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
NameObject("/LE"): ArrayObject(
|
||||||
|
[
|
||||||
|
NameObject(None),
|
||||||
|
NameObject(None),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
NameObject("/IC"): ArrayObject(
|
||||||
|
[
|
||||||
|
FloatObject(0.5),
|
||||||
|
FloatObject(0.5),
|
||||||
|
FloatObject(0.5),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
NameObject("/Contents"): TextStringObject(text),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return line_obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def rectangle(
|
||||||
|
rect: Union[RectangleObject, Tuple[float, float, float, float]],
|
||||||
|
interiour_color: Optional[str] = None,
|
||||||
|
) -> DictionaryObject:
|
||||||
|
"""
|
||||||
|
Draw a rectangle on the PDF.
|
||||||
|
|
||||||
|
:param RectangleObject rect: or array of four
|
||||||
|
integers specifying the clickable rectangular area
|
||||||
|
``[xLL, yLL, xUR, yUR]``
|
||||||
|
"""
|
||||||
|
square_obj = DictionaryObject(
|
||||||
|
{
|
||||||
|
NameObject("/Type"): NameObject("/Annot"),
|
||||||
|
NameObject("/Subtype"): NameObject("/Square"),
|
||||||
|
NameObject("/Rect"): RectangleObject(rect),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if interiour_color:
|
||||||
|
square_obj[NameObject("/IC")] = ArrayObject(
|
||||||
|
[FloatObject(n) for n in hex_to_rgb(interiour_color)]
|
||||||
|
)
|
||||||
|
|
||||||
|
return square_obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def link(
|
||||||
|
rect: Union[RectangleObject, Tuple[float, float, float, float]],
|
||||||
|
border: Optional[ArrayObject] = None,
|
||||||
|
url: Optional[str] = None,
|
||||||
|
target_page_index: Optional[int] = None,
|
||||||
|
fit: Fit = DEFAULT_FIT,
|
||||||
|
) -> DictionaryObject:
|
||||||
|
"""
|
||||||
|
Add a link to the document.
|
||||||
|
|
||||||
|
The link can either be an external link or an internal link.
|
||||||
|
|
||||||
|
An external link requires the URL parameter.
|
||||||
|
An internal link requires the target_page_index, fit, and fit args.
|
||||||
|
|
||||||
|
|
||||||
|
:param RectangleObject rect: or array of four
|
||||||
|
integers specifying the clickable rectangular area
|
||||||
|
``[xLL, yLL, xUR, yUR]``
|
||||||
|
:param border: if provided, an array describing border-drawing
|
||||||
|
properties. See the PDF spec for details. No border will be
|
||||||
|
drawn if this argument is omitted.
|
||||||
|
- horizontal corner radius,
|
||||||
|
- vertical corner radius, and
|
||||||
|
- border width
|
||||||
|
- Optionally: Dash
|
||||||
|
:param str url: Link to a website (if you want to make an external link)
|
||||||
|
:param int target_page_index: index of the page to which the link should go
|
||||||
|
(if you want to make an internal link)
|
||||||
|
:param Fit fit: Page fit or 'zoom' option.
|
||||||
|
"""
|
||||||
|
from ..types import BorderArrayType
|
||||||
|
|
||||||
|
is_external = url is not None
|
||||||
|
is_internal = target_page_index is not None
|
||||||
|
if not is_external and not is_internal:
|
||||||
|
raise ValueError(
|
||||||
|
"Either 'url' or 'target_page_index' have to be provided. Both were None."
|
||||||
|
)
|
||||||
|
if is_external and is_internal:
|
||||||
|
raise ValueError(
|
||||||
|
f"Either 'url' or 'target_page_index' have to be provided. url={url}, target_page_index={target_page_index}"
|
||||||
|
)
|
||||||
|
|
||||||
|
border_arr: BorderArrayType
|
||||||
|
if border is not None:
|
||||||
|
border_arr = [NameObject(n) for n in border[:3]]
|
||||||
|
if len(border) == 4:
|
||||||
|
dash_pattern = ArrayObject([NameObject(n) for n in border[3]])
|
||||||
|
border_arr.append(dash_pattern)
|
||||||
|
else:
|
||||||
|
border_arr = [NumberObject(0)] * 3
|
||||||
|
|
||||||
|
link_obj = DictionaryObject(
|
||||||
|
{
|
||||||
|
NameObject("/Type"): NameObject("/Annot"),
|
||||||
|
NameObject("/Subtype"): NameObject("/Link"),
|
||||||
|
NameObject("/Rect"): RectangleObject(rect),
|
||||||
|
NameObject("/Border"): ArrayObject(border_arr),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if is_external:
|
||||||
|
link_obj[NameObject("/A")] = DictionaryObject(
|
||||||
|
{
|
||||||
|
NameObject("/S"): NameObject("/URI"),
|
||||||
|
NameObject("/Type"): NameObject("/Action"),
|
||||||
|
NameObject("/URI"): TextStringObject(url),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if is_internal:
|
||||||
|
# This needs to be updated later!
|
||||||
|
dest_deferred = DictionaryObject(
|
||||||
|
{
|
||||||
|
"target_page_index": NumberObject(target_page_index),
|
||||||
|
"fit": NameObject(fit.fit_type),
|
||||||
|
"fit_args": fit.fit_args,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
link_obj[NameObject("/Dest")] = dest_deferred
|
||||||
|
return link_obj
|
||||||
|
|
@ -0,0 +1,648 @@
|
||||||
|
# Copyright (c) 2006, Mathieu Fenniak
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
# * The name of the author may not be used to endorse or promote products
|
||||||
|
# derived from this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
# POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
import codecs
|
||||||
|
import decimal
|
||||||
|
import hashlib
|
||||||
|
import re
|
||||||
|
from binascii import unhexlify
|
||||||
|
from typing import Any, Callable, List, Optional, Tuple, Union, cast
|
||||||
|
|
||||||
|
from .._codecs import _pdfdoc_encoding_rev
|
||||||
|
from .._protocols import PdfObjectProtocol, PdfWriterProtocol
|
||||||
|
from .._utils import (
|
||||||
|
StreamType,
|
||||||
|
b_,
|
||||||
|
deprecation_with_replacement,
|
||||||
|
hex_str,
|
||||||
|
hexencode,
|
||||||
|
logger_warning,
|
||||||
|
read_non_whitespace,
|
||||||
|
read_until_regex,
|
||||||
|
str_,
|
||||||
|
)
|
||||||
|
from ..errors import STREAM_TRUNCATED_PREMATURELY, PdfReadError, PdfStreamError
|
||||||
|
|
||||||
|
__author__ = "Mathieu Fenniak"
|
||||||
|
__author_email__ = "biziqe@mathieu.fenniak.net"
|
||||||
|
|
||||||
|
|
||||||
|
class PdfObject(PdfObjectProtocol):
|
||||||
|
# function for calculating a hash value
|
||||||
|
hash_func: Callable[..., "hashlib._Hash"] = hashlib.sha1
|
||||||
|
indirect_reference: Optional["IndirectObject"]
|
||||||
|
|
||||||
|
def hash_value_data(self) -> bytes:
|
||||||
|
return ("%s" % self).encode()
|
||||||
|
|
||||||
|
def hash_value(self) -> bytes:
|
||||||
|
return (
|
||||||
|
"%s:%s"
|
||||||
|
% (
|
||||||
|
self.__class__.__name__,
|
||||||
|
self.hash_func(self.hash_value_data()).hexdigest(),
|
||||||
|
)
|
||||||
|
).encode()
|
||||||
|
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: PdfWriterProtocol,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> "PdfObject":
|
||||||
|
"""
|
||||||
|
clone object into pdf_dest (PdfWriterProtocol which is an interface for PdfWriter)
|
||||||
|
force_duplicate: in standard if the object has been already cloned and reference,
|
||||||
|
the copy is returned; when force_duplicate == True, a new copy is always performed
|
||||||
|
ignore_fields : list/tuple of Fields names (for dictionaries that will be ignored during cloning (apply also to childs duplication)
|
||||||
|
in standard, clone function call _reference_clone (see _reference)
|
||||||
|
"""
|
||||||
|
raise Exception("clone PdfObject")
|
||||||
|
|
||||||
|
def _reference_clone(
|
||||||
|
self, clone: Any, pdf_dest: PdfWriterProtocol
|
||||||
|
) -> PdfObjectProtocol:
|
||||||
|
"""
|
||||||
|
reference the object within the _objects of pdf_dest only if
|
||||||
|
indirect_reference attribute exists (which means the objects
|
||||||
|
was already identified in xref/xobjstm)
|
||||||
|
if object has been already referenced do nothing
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if clone.indirect_reference.pdf == pdf_dest:
|
||||||
|
return clone
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
if hasattr(self, "indirect_reference"):
|
||||||
|
ind = self.indirect_reference
|
||||||
|
i = len(pdf_dest._objects) + 1
|
||||||
|
if ind is not None:
|
||||||
|
if id(ind.pdf) not in pdf_dest._id_translated:
|
||||||
|
pdf_dest._id_translated[id(ind.pdf)] = {}
|
||||||
|
if ind.idnum in pdf_dest._id_translated[id(ind.pdf)]:
|
||||||
|
obj = pdf_dest.get_object(
|
||||||
|
pdf_dest._id_translated[id(ind.pdf)][ind.idnum]
|
||||||
|
)
|
||||||
|
assert obj is not None
|
||||||
|
return obj
|
||||||
|
pdf_dest._id_translated[id(ind.pdf)][ind.idnum] = i
|
||||||
|
pdf_dest._objects.append(clone)
|
||||||
|
clone.indirect_reference = IndirectObject(i, 0, pdf_dest)
|
||||||
|
return clone
|
||||||
|
|
||||||
|
def get_object(self) -> Optional["PdfObject"]:
|
||||||
|
"""Resolve indirect references."""
|
||||||
|
return self
|
||||||
|
|
||||||
|
def getObject(self) -> Optional["PdfObject"]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getObject", "get_object", "3.0.0")
|
||||||
|
return self.get_object()
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class NullObject(PdfObject):
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: PdfWriterProtocol,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> "NullObject":
|
||||||
|
"""clone object into pdf_dest"""
|
||||||
|
return cast("NullObject", self._reference_clone(NullObject(), pdf_dest))
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
stream.write(b"null")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def read_from_stream(stream: StreamType) -> "NullObject":
|
||||||
|
nulltxt = stream.read(4)
|
||||||
|
if nulltxt != b"null":
|
||||||
|
raise PdfReadError("Could not read Null object")
|
||||||
|
return NullObject()
|
||||||
|
|
||||||
|
def writeToStream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
|
||||||
|
self.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return "NullObject"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def readFromStream(stream: StreamType) -> "NullObject": # pragma: no cover
|
||||||
|
deprecation_with_replacement("readFromStream", "read_from_stream", "3.0.0")
|
||||||
|
return NullObject.read_from_stream(stream)
|
||||||
|
|
||||||
|
|
||||||
|
class BooleanObject(PdfObject):
|
||||||
|
def __init__(self, value: Any) -> None:
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: PdfWriterProtocol,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> "BooleanObject":
|
||||||
|
"""clone object into pdf_dest"""
|
||||||
|
return cast(
|
||||||
|
"BooleanObject", self._reference_clone(BooleanObject(self.value), pdf_dest)
|
||||||
|
)
|
||||||
|
|
||||||
|
def __eq__(self, __o: object) -> bool:
|
||||||
|
if isinstance(__o, BooleanObject):
|
||||||
|
return self.value == __o.value
|
||||||
|
elif isinstance(__o, bool):
|
||||||
|
return self.value == __o
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return "True" if self.value else "False"
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
if self.value:
|
||||||
|
stream.write(b"true")
|
||||||
|
else:
|
||||||
|
stream.write(b"false")
|
||||||
|
|
||||||
|
def writeToStream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
|
||||||
|
self.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def read_from_stream(stream: StreamType) -> "BooleanObject":
|
||||||
|
word = stream.read(4)
|
||||||
|
if word == b"true":
|
||||||
|
return BooleanObject(True)
|
||||||
|
elif word == b"fals":
|
||||||
|
stream.read(1)
|
||||||
|
return BooleanObject(False)
|
||||||
|
else:
|
||||||
|
raise PdfReadError("Could not read Boolean object")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def readFromStream(stream: StreamType) -> "BooleanObject": # pragma: no cover
|
||||||
|
deprecation_with_replacement("readFromStream", "read_from_stream", "3.0.0")
|
||||||
|
return BooleanObject.read_from_stream(stream)
|
||||||
|
|
||||||
|
|
||||||
|
class IndirectObject(PdfObject):
|
||||||
|
def __init__(self, idnum: int, generation: int, pdf: Any) -> None: # PdfReader
|
||||||
|
self.idnum = idnum
|
||||||
|
self.generation = generation
|
||||||
|
self.pdf = pdf
|
||||||
|
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: PdfWriterProtocol,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> "IndirectObject":
|
||||||
|
"""clone object into pdf_dest"""
|
||||||
|
if self.pdf == pdf_dest and not force_duplicate:
|
||||||
|
# Already duplicated and no extra duplication required
|
||||||
|
return self
|
||||||
|
if id(self.pdf) not in pdf_dest._id_translated:
|
||||||
|
pdf_dest._id_translated[id(self.pdf)] = {}
|
||||||
|
|
||||||
|
if not force_duplicate and self.idnum in pdf_dest._id_translated[id(self.pdf)]:
|
||||||
|
dup = pdf_dest.get_object(pdf_dest._id_translated[id(self.pdf)][self.idnum])
|
||||||
|
else:
|
||||||
|
obj = self.get_object()
|
||||||
|
assert obj is not None
|
||||||
|
dup = obj.clone(pdf_dest, force_duplicate, ignore_fields)
|
||||||
|
assert dup is not None
|
||||||
|
assert dup.indirect_reference is not None
|
||||||
|
return dup.indirect_reference
|
||||||
|
|
||||||
|
@property
|
||||||
|
def indirect_reference(self) -> "IndirectObject": # type: ignore[override]
|
||||||
|
return self
|
||||||
|
|
||||||
|
def get_object(self) -> Optional["PdfObject"]:
|
||||||
|
obj = self.pdf.get_object(self)
|
||||||
|
if obj is None:
|
||||||
|
return None
|
||||||
|
return obj.get_object()
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return f"IndirectObject({self.idnum!r}, {self.generation!r}, {id(self.pdf)})"
|
||||||
|
|
||||||
|
def __eq__(self, other: Any) -> bool:
|
||||||
|
return (
|
||||||
|
other is not None
|
||||||
|
and isinstance(other, IndirectObject)
|
||||||
|
and self.idnum == other.idnum
|
||||||
|
and self.generation == other.generation
|
||||||
|
and self.pdf is other.pdf
|
||||||
|
)
|
||||||
|
|
||||||
|
def __ne__(self, other: Any) -> bool:
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
stream.write(b_(f"{self.idnum} {self.generation} R"))
|
||||||
|
|
||||||
|
def writeToStream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
|
||||||
|
self.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def read_from_stream(stream: StreamType, pdf: Any) -> "IndirectObject": # PdfReader
|
||||||
|
idnum = b""
|
||||||
|
while True:
|
||||||
|
tok = stream.read(1)
|
||||||
|
if not tok:
|
||||||
|
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
|
||||||
|
if tok.isspace():
|
||||||
|
break
|
||||||
|
idnum += tok
|
||||||
|
generation = b""
|
||||||
|
while True:
|
||||||
|
tok = stream.read(1)
|
||||||
|
if not tok:
|
||||||
|
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
|
||||||
|
if tok.isspace():
|
||||||
|
if not generation:
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
generation += tok
|
||||||
|
r = read_non_whitespace(stream)
|
||||||
|
if r != b"R":
|
||||||
|
raise PdfReadError(
|
||||||
|
f"Error reading indirect object reference at byte {hex_str(stream.tell())}"
|
||||||
|
)
|
||||||
|
return IndirectObject(int(idnum), int(generation), pdf)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def readFromStream(
|
||||||
|
stream: StreamType, pdf: Any # PdfReader
|
||||||
|
) -> "IndirectObject": # pragma: no cover
|
||||||
|
deprecation_with_replacement("readFromStream", "read_from_stream", "3.0.0")
|
||||||
|
return IndirectObject.read_from_stream(stream, pdf)
|
||||||
|
|
||||||
|
|
||||||
|
class FloatObject(decimal.Decimal, PdfObject):
|
||||||
|
def __new__(
|
||||||
|
cls, value: Union[str, Any] = "0", context: Optional[Any] = None
|
||||||
|
) -> "FloatObject":
|
||||||
|
try:
|
||||||
|
return decimal.Decimal.__new__(cls, str_(value), context)
|
||||||
|
except Exception:
|
||||||
|
# If this isn't a valid decimal (happens in malformed PDFs)
|
||||||
|
# fallback to 0
|
||||||
|
logger_warning(f"FloatObject ({value}) invalid; use 0.0 instead", __name__)
|
||||||
|
return decimal.Decimal.__new__(cls, "0.0")
|
||||||
|
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: Any,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> "FloatObject":
|
||||||
|
"""clone object into pdf_dest"""
|
||||||
|
return cast("FloatObject", self._reference_clone(FloatObject(self), pdf_dest))
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
if self == self.to_integral():
|
||||||
|
# If this is an integer, format it with no decimal place.
|
||||||
|
return str(self.quantize(decimal.Decimal(1)))
|
||||||
|
else:
|
||||||
|
# Otherwise, format it with a decimal place, taking care to
|
||||||
|
# remove any extraneous trailing zeros.
|
||||||
|
return f"{self:f}".rstrip("0")
|
||||||
|
|
||||||
|
def as_numeric(self) -> float:
|
||||||
|
return float(repr(self).encode("utf8"))
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
stream.write(repr(self).encode("utf8"))
|
||||||
|
|
||||||
|
def writeToStream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
|
||||||
|
self.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
|
||||||
|
class NumberObject(int, PdfObject):
|
||||||
|
NumberPattern = re.compile(b"[^+-.0-9]")
|
||||||
|
|
||||||
|
def __new__(cls, value: Any) -> "NumberObject":
|
||||||
|
try:
|
||||||
|
return int.__new__(cls, int(value))
|
||||||
|
except ValueError:
|
||||||
|
logger_warning(f"NumberObject({value}) invalid; use 0 instead", __name__)
|
||||||
|
return int.__new__(cls, 0)
|
||||||
|
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: Any,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> "NumberObject":
|
||||||
|
"""clone object into pdf_dest"""
|
||||||
|
return cast("NumberObject", self._reference_clone(NumberObject(self), pdf_dest))
|
||||||
|
|
||||||
|
def as_numeric(self) -> int:
|
||||||
|
return int(repr(self).encode("utf8"))
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
stream.write(repr(self).encode("utf8"))
|
||||||
|
|
||||||
|
def writeToStream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
|
||||||
|
self.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def read_from_stream(stream: StreamType) -> Union["NumberObject", "FloatObject"]:
|
||||||
|
num = read_until_regex(stream, NumberObject.NumberPattern)
|
||||||
|
if num.find(b".") != -1:
|
||||||
|
return FloatObject(num)
|
||||||
|
return NumberObject(num)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def readFromStream(
|
||||||
|
stream: StreamType,
|
||||||
|
) -> Union["NumberObject", "FloatObject"]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("readFromStream", "read_from_stream", "3.0.0")
|
||||||
|
return NumberObject.read_from_stream(stream)
|
||||||
|
|
||||||
|
|
||||||
|
class ByteStringObject(bytes, PdfObject):
|
||||||
|
"""
|
||||||
|
Represents a string object where the text encoding could not be determined.
|
||||||
|
This occurs quite often, as the PDF spec doesn't provide an alternate way to
|
||||||
|
represent strings -- for example, the encryption data stored in files (like
|
||||||
|
/O) is clearly not text, but is still stored in a "String" object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: Any,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> "ByteStringObject":
|
||||||
|
"""clone object into pdf_dest"""
|
||||||
|
return cast(
|
||||||
|
"ByteStringObject",
|
||||||
|
self._reference_clone(ByteStringObject(bytes(self)), pdf_dest),
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def original_bytes(self) -> bytes:
|
||||||
|
"""For compatibility with TextStringObject.original_bytes."""
|
||||||
|
return self
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
bytearr = self
|
||||||
|
if encryption_key:
|
||||||
|
from .._security import RC4_encrypt
|
||||||
|
|
||||||
|
bytearr = RC4_encrypt(encryption_key, bytearr) # type: ignore
|
||||||
|
stream.write(b"<")
|
||||||
|
stream.write(hexencode(bytearr))
|
||||||
|
stream.write(b">")
|
||||||
|
|
||||||
|
def writeToStream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
|
||||||
|
self.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
|
||||||
|
class TextStringObject(str, PdfObject):
|
||||||
|
"""
|
||||||
|
Represents a string object that has been decoded into a real unicode string.
|
||||||
|
If read from a PDF document, this string appeared to match the
|
||||||
|
PDFDocEncoding, or contained a UTF-16BE BOM mark to cause UTF-16 decoding to
|
||||||
|
occur.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: Any,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> "TextStringObject":
|
||||||
|
"""clone object into pdf_dest"""
|
||||||
|
obj = TextStringObject(self)
|
||||||
|
obj.autodetect_pdfdocencoding = self.autodetect_pdfdocencoding
|
||||||
|
obj.autodetect_utf16 = self.autodetect_utf16
|
||||||
|
return cast("TextStringObject", self._reference_clone(obj, pdf_dest))
|
||||||
|
|
||||||
|
autodetect_pdfdocencoding = False
|
||||||
|
autodetect_utf16 = False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def original_bytes(self) -> bytes:
|
||||||
|
"""
|
||||||
|
It is occasionally possible that a text string object gets created where
|
||||||
|
a byte string object was expected due to the autodetection mechanism --
|
||||||
|
if that occurs, this "original_bytes" property can be used to
|
||||||
|
back-calculate what the original encoded bytes were.
|
||||||
|
"""
|
||||||
|
return self.get_original_bytes()
|
||||||
|
|
||||||
|
def get_original_bytes(self) -> bytes:
|
||||||
|
# We're a text string object, but the library is trying to get our raw
|
||||||
|
# bytes. This can happen if we auto-detected this string as text, but
|
||||||
|
# we were wrong. It's pretty common. Return the original bytes that
|
||||||
|
# would have been used to create this object, based upon the autodetect
|
||||||
|
# method.
|
||||||
|
if self.autodetect_utf16:
|
||||||
|
return codecs.BOM_UTF16_BE + self.encode("utf-16be")
|
||||||
|
elif self.autodetect_pdfdocencoding:
|
||||||
|
return encode_pdfdocencoding(self)
|
||||||
|
else:
|
||||||
|
raise Exception("no information about original bytes")
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
# Try to write the string out as a PDFDocEncoding encoded string. It's
|
||||||
|
# nicer to look at in the PDF file. Sadly, we take a performance hit
|
||||||
|
# here for trying...
|
||||||
|
try:
|
||||||
|
bytearr = encode_pdfdocencoding(self)
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
bytearr = codecs.BOM_UTF16_BE + self.encode("utf-16be")
|
||||||
|
if encryption_key:
|
||||||
|
from .._security import RC4_encrypt
|
||||||
|
|
||||||
|
bytearr = RC4_encrypt(encryption_key, bytearr)
|
||||||
|
obj = ByteStringObject(bytearr)
|
||||||
|
obj.write_to_stream(stream, None)
|
||||||
|
else:
|
||||||
|
stream.write(b"(")
|
||||||
|
for c in bytearr:
|
||||||
|
if not chr(c).isalnum() and c != b" ":
|
||||||
|
# This:
|
||||||
|
# stream.write(b_(rf"\{c:0>3o}"))
|
||||||
|
# gives
|
||||||
|
# https://github.com/davidhalter/parso/issues/207
|
||||||
|
stream.write(b_("\\%03o" % c))
|
||||||
|
else:
|
||||||
|
stream.write(b_(chr(c)))
|
||||||
|
stream.write(b")")
|
||||||
|
|
||||||
|
def writeToStream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
|
||||||
|
self.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
|
||||||
|
class NameObject(str, PdfObject):
|
||||||
|
delimiter_pattern = re.compile(rb"\s+|[\(\)<>\[\]{}/%]")
|
||||||
|
surfix = b"/"
|
||||||
|
renumber_table = {
|
||||||
|
"#": b"#23",
|
||||||
|
"(": b"#28",
|
||||||
|
")": b"#29",
|
||||||
|
"/": b"#2F",
|
||||||
|
**{chr(i): f"#{i:02X}".encode() for i in range(33)},
|
||||||
|
}
|
||||||
|
|
||||||
|
def clone(
|
||||||
|
self,
|
||||||
|
pdf_dest: Any,
|
||||||
|
force_duplicate: bool = False,
|
||||||
|
ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
|
||||||
|
) -> "NameObject":
|
||||||
|
"""clone object into pdf_dest"""
|
||||||
|
return cast("NameObject", self._reference_clone(NameObject(self), pdf_dest))
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
stream.write(self.renumber()) # b_(renumber(self)))
|
||||||
|
|
||||||
|
def writeToStream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
|
||||||
|
self.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
def renumber(self) -> bytes:
|
||||||
|
out = self[0].encode("utf-8")
|
||||||
|
if out != b"/":
|
||||||
|
logger_warning(f"Incorrect first char in NameObject:({self})", __name__)
|
||||||
|
for c in self[1:]:
|
||||||
|
if c > "~":
|
||||||
|
for x in c.encode("utf-8"):
|
||||||
|
out += f"#{x:02X}".encode()
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
out += self.renumber_table[c]
|
||||||
|
except KeyError:
|
||||||
|
out += c.encode("utf-8")
|
||||||
|
return out
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def unnumber(sin: bytes) -> bytes:
|
||||||
|
i = sin.find(b"#", 0)
|
||||||
|
while i >= 0:
|
||||||
|
try:
|
||||||
|
sin = sin[:i] + unhexlify(sin[i + 1 : i + 3]) + sin[i + 3 :]
|
||||||
|
i = sin.find(b"#", i + 1)
|
||||||
|
except ValueError:
|
||||||
|
# if the 2 characters after # can not be converted to hexa
|
||||||
|
# we change nothing and carry on
|
||||||
|
i = i + 1
|
||||||
|
return sin
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def read_from_stream(stream: StreamType, pdf: Any) -> "NameObject": # PdfReader
|
||||||
|
name = stream.read(1)
|
||||||
|
if name != NameObject.surfix:
|
||||||
|
raise PdfReadError("name read error")
|
||||||
|
name += read_until_regex(stream, NameObject.delimiter_pattern, ignore_eof=True)
|
||||||
|
try:
|
||||||
|
# Name objects should represent irregular characters
|
||||||
|
# with a '#' followed by the symbol's hex number
|
||||||
|
name = NameObject.unnumber(name)
|
||||||
|
for enc in ("utf-8", "gbk"):
|
||||||
|
try:
|
||||||
|
ret = name.decode(enc)
|
||||||
|
return NameObject(ret)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
raise UnicodeDecodeError("", name, 0, 0, "Code Not Found")
|
||||||
|
except (UnicodeEncodeError, UnicodeDecodeError) as e:
|
||||||
|
if not pdf.strict:
|
||||||
|
logger_warning(
|
||||||
|
f"Illegal character in Name Object ({repr(name)})", __name__
|
||||||
|
)
|
||||||
|
return NameObject(name.decode("charmap"))
|
||||||
|
else:
|
||||||
|
raise PdfReadError(
|
||||||
|
f"Illegal character in Name Object ({repr(name)})"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def readFromStream(
|
||||||
|
stream: StreamType, pdf: Any # PdfReader
|
||||||
|
) -> "NameObject": # pragma: no cover
|
||||||
|
deprecation_with_replacement("readFromStream", "read_from_stream", "3.0.0")
|
||||||
|
return NameObject.read_from_stream(stream, pdf)
|
||||||
|
|
||||||
|
|
||||||
|
def encode_pdfdocencoding(unicode_string: str) -> bytes:
|
||||||
|
retval = b""
|
||||||
|
for c in unicode_string:
|
||||||
|
try:
|
||||||
|
retval += b_(chr(_pdfdoc_encoding_rev[c]))
|
||||||
|
except KeyError:
|
||||||
|
raise UnicodeEncodeError(
|
||||||
|
"pdfdocencoding", c, -1, -1, "does not exist in translation table"
|
||||||
|
)
|
||||||
|
return retval
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,129 @@
|
||||||
|
from typing import Any, Optional, Tuple, Union
|
||||||
|
|
||||||
|
|
||||||
|
class Fit:
|
||||||
|
def __init__(
|
||||||
|
self, fit_type: str, fit_args: Tuple[Union[None, float, Any], ...] = tuple()
|
||||||
|
):
|
||||||
|
from ._base import FloatObject, NameObject, NullObject
|
||||||
|
|
||||||
|
self.fit_type = NameObject(fit_type)
|
||||||
|
self.fit_args = [
|
||||||
|
NullObject() if a is None or isinstance(a, NullObject) else FloatObject(a)
|
||||||
|
for a in fit_args
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def xyz(
|
||||||
|
cls,
|
||||||
|
left: Optional[float] = None,
|
||||||
|
top: Optional[float] = None,
|
||||||
|
zoom: Optional[float] = None,
|
||||||
|
) -> "Fit":
|
||||||
|
"""
|
||||||
|
Display the page designated by page, with the coordinates ( left , top )
|
||||||
|
positioned at the upper-left corner of the window and the contents
|
||||||
|
of the page magnified by the factor zoom.
|
||||||
|
|
||||||
|
A null value for any of the parameters left, top, or zoom specifies
|
||||||
|
that the current value of that parameter is to be retained unchanged.
|
||||||
|
|
||||||
|
A zoom value of 0 has the same meaning as a null value.
|
||||||
|
"""
|
||||||
|
return Fit(fit_type="/XYZ", fit_args=(left, top, zoom))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fit(cls) -> "Fit":
|
||||||
|
"""
|
||||||
|
Display the page designated by page, with its contents magnified just
|
||||||
|
enough to fit the entire page within the window both horizontally and
|
||||||
|
vertically. If the required horizontal and vertical magnification
|
||||||
|
factors are different, use the smaller of the two, centering the page
|
||||||
|
within the window in the other dimension.
|
||||||
|
"""
|
||||||
|
return Fit(fit_type="/Fit")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fit_horizontally(cls, top: Optional[float] = None) -> "Fit":
|
||||||
|
"""
|
||||||
|
Display the page designated by page , with the vertical coordinate top
|
||||||
|
positioned at the top edge of the window and the contents of the page
|
||||||
|
magnified just enough to fit the entire width of the page within the
|
||||||
|
window.
|
||||||
|
|
||||||
|
A null value for `top` specifies that the current value of that
|
||||||
|
parameter is to be retained unchanged.
|
||||||
|
"""
|
||||||
|
return Fit(fit_type="/FitH", fit_args=(top,))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fit_vertically(cls, left: Optional[float] = None) -> "Fit":
|
||||||
|
return Fit(fit_type="/FitV", fit_args=(left,))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fit_rectangle(
|
||||||
|
cls,
|
||||||
|
left: Optional[float] = None,
|
||||||
|
bottom: Optional[float] = None,
|
||||||
|
right: Optional[float] = None,
|
||||||
|
top: Optional[float] = None,
|
||||||
|
) -> "Fit":
|
||||||
|
"""
|
||||||
|
Display the page designated by page , with its contents magnified
|
||||||
|
just enough to fit the rectangle specified by the coordinates
|
||||||
|
left , bottom , right , and top entirely within the window
|
||||||
|
both horizontally and vertically.
|
||||||
|
|
||||||
|
If the required horizontal and vertical magnification factors are
|
||||||
|
different, use the smaller of the two, centering the rectangle within
|
||||||
|
the window in the other dimension.
|
||||||
|
|
||||||
|
A null value for any of the parameters may result in unpredictable
|
||||||
|
behavior.
|
||||||
|
"""
|
||||||
|
return Fit(fit_type="/FitR", fit_args=(left, bottom, right, top))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fit_box(cls) -> "Fit":
|
||||||
|
"""
|
||||||
|
Display the page designated by page , with its contents magnified
|
||||||
|
just enough to fit its bounding box entirely within the window both
|
||||||
|
horizontally and vertically. If the required horizontal and vertical
|
||||||
|
magnification factors are different, use the smaller of the two,
|
||||||
|
centering the bounding box within the window in the other dimension.
|
||||||
|
"""
|
||||||
|
return Fit(fit_type="/FitB")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fit_box_horizontally(cls, top: Optional[float] = None) -> "Fit":
|
||||||
|
"""
|
||||||
|
Display the page designated by page , with the vertical coordinate
|
||||||
|
top positioned at the top edge of the window and the contents of the
|
||||||
|
page magnified just enough to fit the entire width of its bounding box
|
||||||
|
within the window.
|
||||||
|
|
||||||
|
A null value for top specifies that the current value of that parameter
|
||||||
|
is to be retained unchanged.
|
||||||
|
"""
|
||||||
|
return Fit(fit_type="/FitBH", fit_args=(top,))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fit_box_vertically(cls, left: Optional[float] = None) -> "Fit":
|
||||||
|
"""
|
||||||
|
Display the page designated by page , with the horizontal coordinate
|
||||||
|
left positioned at the left edge of the window and the contents of
|
||||||
|
the page magnified just enough to fit the entire height of its
|
||||||
|
bounding box within the window.
|
||||||
|
|
||||||
|
A null value for left specifies that the current value of that
|
||||||
|
parameter is to be retained unchanged.
|
||||||
|
"""
|
||||||
|
return Fit(fit_type="/FitBV", fit_args=(left,))
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
if not self.fit_args:
|
||||||
|
return f"Fit({self.fit_type})"
|
||||||
|
return f"Fit({self.fit_type}, {self.fit_args})"
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_FIT = Fit.fit()
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
from typing import Any, Union
|
||||||
|
|
||||||
|
from .._utils import StreamType, deprecation_with_replacement
|
||||||
|
from ._base import NameObject
|
||||||
|
from ._data_structures import Destination
|
||||||
|
|
||||||
|
|
||||||
|
class OutlineItem(Destination):
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
stream.write(b"<<\n")
|
||||||
|
for key in [
|
||||||
|
NameObject(x)
|
||||||
|
for x in ["/Title", "/Parent", "/First", "/Last", "/Next", "/Prev"]
|
||||||
|
if x in self
|
||||||
|
]:
|
||||||
|
key.write_to_stream(stream, encryption_key)
|
||||||
|
stream.write(b" ")
|
||||||
|
value = self.raw_get(key)
|
||||||
|
value.write_to_stream(stream, encryption_key)
|
||||||
|
stream.write(b"\n")
|
||||||
|
key = NameObject("/Dest")
|
||||||
|
key.write_to_stream(stream, encryption_key)
|
||||||
|
stream.write(b" ")
|
||||||
|
value = self.dest_array
|
||||||
|
value.write_to_stream(stream, encryption_key)
|
||||||
|
stream.write(b"\n")
|
||||||
|
stream.write(b">>")
|
||||||
|
|
||||||
|
|
||||||
|
class Bookmark(OutlineItem): # pragma: no cover
|
||||||
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||||
|
deprecation_with_replacement("Bookmark", "OutlineItem", "3.0.0")
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
@ -0,0 +1,265 @@
|
||||||
|
import decimal
|
||||||
|
from typing import Any, List, Tuple, Union
|
||||||
|
|
||||||
|
from .._utils import deprecation_no_replacement, deprecation_with_replacement
|
||||||
|
from ._base import FloatObject, NumberObject
|
||||||
|
from ._data_structures import ArrayObject
|
||||||
|
|
||||||
|
|
||||||
|
class RectangleObject(ArrayObject):
|
||||||
|
"""
|
||||||
|
This class is used to represent *page boxes* in PyPDF2. These boxes include:
|
||||||
|
* :attr:`artbox <PyPDF2._page.PageObject.artbox>`
|
||||||
|
* :attr:`bleedbox <PyPDF2._page.PageObject.bleedbox>`
|
||||||
|
* :attr:`cropbox <PyPDF2._page.PageObject.cropbox>`
|
||||||
|
* :attr:`mediabox <PyPDF2._page.PageObject.mediabox>`
|
||||||
|
* :attr:`trimbox <PyPDF2._page.PageObject.trimbox>`
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, arr: Union["RectangleObject", Tuple[float, float, float, float]]
|
||||||
|
) -> None:
|
||||||
|
# must have four points
|
||||||
|
assert len(arr) == 4
|
||||||
|
# automatically convert arr[x] into NumberObject(arr[x]) if necessary
|
||||||
|
ArrayObject.__init__(self, [self._ensure_is_number(x) for x in arr]) # type: ignore
|
||||||
|
|
||||||
|
def _ensure_is_number(self, value: Any) -> Union[FloatObject, NumberObject]:
|
||||||
|
if not isinstance(value, (NumberObject, FloatObject)):
|
||||||
|
value = FloatObject(value)
|
||||||
|
return value
|
||||||
|
|
||||||
|
def scale(self, sx: float, sy: float) -> "RectangleObject":
|
||||||
|
return RectangleObject(
|
||||||
|
(
|
||||||
|
float(self.left) * sx,
|
||||||
|
float(self.bottom) * sy,
|
||||||
|
float(self.right) * sx,
|
||||||
|
float(self.top) * sy,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
def ensureIsNumber(
|
||||||
|
self, value: Any
|
||||||
|
) -> Union[FloatObject, NumberObject]: # pragma: no cover
|
||||||
|
deprecation_no_replacement("ensureIsNumber", "3.0.0")
|
||||||
|
return self._ensure_is_number(value)
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return f"RectangleObject({repr(list(self))})"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def left(self) -> FloatObject:
|
||||||
|
return self[0]
|
||||||
|
|
||||||
|
@left.setter
|
||||||
|
def left(self, f: float) -> None:
|
||||||
|
self[0] = FloatObject(f)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def bottom(self) -> FloatObject:
|
||||||
|
return self[1]
|
||||||
|
|
||||||
|
@bottom.setter
|
||||||
|
def bottom(self, f: float) -> None:
|
||||||
|
self[1] = FloatObject(f)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def right(self) -> FloatObject:
|
||||||
|
return self[2]
|
||||||
|
|
||||||
|
@right.setter
|
||||||
|
def right(self, f: float) -> None:
|
||||||
|
self[2] = FloatObject(f)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def top(self) -> FloatObject:
|
||||||
|
return self[3]
|
||||||
|
|
||||||
|
@top.setter
|
||||||
|
def top(self, f: float) -> None:
|
||||||
|
self[3] = FloatObject(f)
|
||||||
|
|
||||||
|
def getLowerLeft_x(self) -> FloatObject: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getLowerLeft_x", "left", "3.0.0")
|
||||||
|
return self.left
|
||||||
|
|
||||||
|
def getLowerLeft_y(self) -> FloatObject: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getLowerLeft_y", "bottom", "3.0.0")
|
||||||
|
return self.bottom
|
||||||
|
|
||||||
|
def getUpperRight_x(self) -> FloatObject: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getUpperRight_x", "right", "3.0.0")
|
||||||
|
return self.right
|
||||||
|
|
||||||
|
def getUpperRight_y(self) -> FloatObject: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getUpperRight_y", "top", "3.0.0")
|
||||||
|
return self.top
|
||||||
|
|
||||||
|
def getUpperLeft_x(self) -> FloatObject: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getUpperLeft_x", "left", "3.0.0")
|
||||||
|
return self.left
|
||||||
|
|
||||||
|
def getUpperLeft_y(self) -> FloatObject: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getUpperLeft_y", "top", "3.0.0")
|
||||||
|
return self.top
|
||||||
|
|
||||||
|
def getLowerRight_x(self) -> FloatObject: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getLowerRight_x", "right", "3.0.0")
|
||||||
|
return self.right
|
||||||
|
|
||||||
|
def getLowerRight_y(self) -> FloatObject: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getLowerRight_y", "bottom", "3.0.0")
|
||||||
|
return self.bottom
|
||||||
|
|
||||||
|
@property
|
||||||
|
def lower_left(self) -> Tuple[decimal.Decimal, decimal.Decimal]:
|
||||||
|
"""
|
||||||
|
Property to read and modify the lower left coordinate of this box
|
||||||
|
in (x,y) form.
|
||||||
|
"""
|
||||||
|
return self.left, self.bottom
|
||||||
|
|
||||||
|
@lower_left.setter
|
||||||
|
def lower_left(self, value: List[Any]) -> None:
|
||||||
|
self[0], self[1] = (self._ensure_is_number(x) for x in value)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def lower_right(self) -> Tuple[decimal.Decimal, decimal.Decimal]:
|
||||||
|
"""
|
||||||
|
Property to read and modify the lower right coordinate of this box
|
||||||
|
in (x,y) form.
|
||||||
|
"""
|
||||||
|
return self.right, self.bottom
|
||||||
|
|
||||||
|
@lower_right.setter
|
||||||
|
def lower_right(self, value: List[Any]) -> None:
|
||||||
|
self[2], self[1] = (self._ensure_is_number(x) for x in value)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def upper_left(self) -> Tuple[decimal.Decimal, decimal.Decimal]:
|
||||||
|
"""
|
||||||
|
Property to read and modify the upper left coordinate of this box
|
||||||
|
in (x,y) form.
|
||||||
|
"""
|
||||||
|
return self.left, self.top
|
||||||
|
|
||||||
|
@upper_left.setter
|
||||||
|
def upper_left(self, value: List[Any]) -> None:
|
||||||
|
self[0], self[3] = (self._ensure_is_number(x) for x in value)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def upper_right(self) -> Tuple[decimal.Decimal, decimal.Decimal]:
|
||||||
|
"""
|
||||||
|
Property to read and modify the upper right coordinate of this box
|
||||||
|
in (x,y) form.
|
||||||
|
"""
|
||||||
|
return self.right, self.top
|
||||||
|
|
||||||
|
@upper_right.setter
|
||||||
|
def upper_right(self, value: List[Any]) -> None:
|
||||||
|
self[2], self[3] = (self._ensure_is_number(x) for x in value)
|
||||||
|
|
||||||
|
def getLowerLeft(
|
||||||
|
self,
|
||||||
|
) -> Tuple[decimal.Decimal, decimal.Decimal]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getLowerLeft", "lower_left", "3.0.0")
|
||||||
|
return self.lower_left
|
||||||
|
|
||||||
|
def getLowerRight(
|
||||||
|
self,
|
||||||
|
) -> Tuple[decimal.Decimal, decimal.Decimal]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getLowerRight", "lower_right", "3.0.0")
|
||||||
|
return self.lower_right
|
||||||
|
|
||||||
|
def getUpperLeft(
|
||||||
|
self,
|
||||||
|
) -> Tuple[decimal.Decimal, decimal.Decimal]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getUpperLeft", "upper_left", "3.0.0")
|
||||||
|
return self.upper_left
|
||||||
|
|
||||||
|
def getUpperRight(
|
||||||
|
self,
|
||||||
|
) -> Tuple[decimal.Decimal, decimal.Decimal]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getUpperRight", "upper_right", "3.0.0")
|
||||||
|
return self.upper_right
|
||||||
|
|
||||||
|
def setLowerLeft(self, value: Tuple[float, float]) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("setLowerLeft", "lower_left", "3.0.0")
|
||||||
|
self.lower_left = value # type: ignore
|
||||||
|
|
||||||
|
def setLowerRight(self, value: Tuple[float, float]) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("setLowerRight", "lower_right", "3.0.0")
|
||||||
|
self[2], self[1] = (self._ensure_is_number(x) for x in value)
|
||||||
|
|
||||||
|
def setUpperLeft(self, value: Tuple[float, float]) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("setUpperLeft", "upper_left", "3.0.0")
|
||||||
|
self[0], self[3] = (self._ensure_is_number(x) for x in value)
|
||||||
|
|
||||||
|
def setUpperRight(self, value: Tuple[float, float]) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("setUpperRight", "upper_right", "3.0.0")
|
||||||
|
self[2], self[3] = (self._ensure_is_number(x) for x in value)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def width(self) -> decimal.Decimal:
|
||||||
|
return self.right - self.left
|
||||||
|
|
||||||
|
def getWidth(self) -> decimal.Decimal: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getWidth", "width", "3.0.0")
|
||||||
|
return self.width
|
||||||
|
|
||||||
|
@property
|
||||||
|
def height(self) -> decimal.Decimal:
|
||||||
|
return self.top - self.bottom
|
||||||
|
|
||||||
|
def getHeight(self) -> decimal.Decimal: # pragma: no cover
|
||||||
|
deprecation_with_replacement("getHeight", "height", "3.0.0")
|
||||||
|
return self.height
|
||||||
|
|
||||||
|
@property
|
||||||
|
def lowerLeft(self) -> Tuple[decimal.Decimal, decimal.Decimal]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("lowerLeft", "lower_left", "3.0.0")
|
||||||
|
return self.lower_left
|
||||||
|
|
||||||
|
@lowerLeft.setter
|
||||||
|
def lowerLeft(
|
||||||
|
self, value: Tuple[decimal.Decimal, decimal.Decimal]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("lowerLeft", "lower_left", "3.0.0")
|
||||||
|
self.lower_left = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def lowerRight(self) -> Tuple[decimal.Decimal, decimal.Decimal]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("lowerRight", "lower_right", "3.0.0")
|
||||||
|
return self.lower_right
|
||||||
|
|
||||||
|
@lowerRight.setter
|
||||||
|
def lowerRight(
|
||||||
|
self, value: Tuple[decimal.Decimal, decimal.Decimal]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("lowerRight", "lower_right", "3.0.0")
|
||||||
|
self.lower_right = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def upperLeft(self) -> Tuple[decimal.Decimal, decimal.Decimal]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("upperLeft", "upper_left", "3.0.0")
|
||||||
|
return self.upper_left
|
||||||
|
|
||||||
|
@upperLeft.setter
|
||||||
|
def upperLeft(
|
||||||
|
self, value: Tuple[decimal.Decimal, decimal.Decimal]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("upperLeft", "upper_left", "3.0.0")
|
||||||
|
self.upper_left = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def upperRight(self) -> Tuple[decimal.Decimal, decimal.Decimal]: # pragma: no cover
|
||||||
|
deprecation_with_replacement("upperRight", "upper_right", "3.0.0")
|
||||||
|
return self.upper_right
|
||||||
|
|
||||||
|
@upperRight.setter
|
||||||
|
def upperRight(
|
||||||
|
self, value: Tuple[decimal.Decimal, decimal.Decimal]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("upperRight", "upper_right", "3.0.0")
|
||||||
|
self.upper_right = value
|
||||||
|
|
@ -0,0 +1,172 @@
|
||||||
|
import codecs
|
||||||
|
from typing import Dict, List, Tuple, Union
|
||||||
|
|
||||||
|
from .._codecs import _pdfdoc_encoding
|
||||||
|
from .._utils import StreamType, b_, logger_warning, read_non_whitespace
|
||||||
|
from ..errors import STREAM_TRUNCATED_PREMATURELY, PdfStreamError
|
||||||
|
from ._base import ByteStringObject, TextStringObject
|
||||||
|
|
||||||
|
|
||||||
|
def hex_to_rgb(value: str) -> Tuple[float, float, float]:
|
||||||
|
return tuple(int(value.lstrip("#")[i : i + 2], 16) / 255.0 for i in (0, 2, 4)) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
def read_hex_string_from_stream(
|
||||||
|
stream: StreamType,
|
||||||
|
forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
|
||||||
|
) -> Union["TextStringObject", "ByteStringObject"]:
|
||||||
|
stream.read(1)
|
||||||
|
txt = ""
|
||||||
|
x = b""
|
||||||
|
while True:
|
||||||
|
tok = read_non_whitespace(stream)
|
||||||
|
if not tok:
|
||||||
|
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
|
||||||
|
if tok == b">":
|
||||||
|
break
|
||||||
|
x += tok
|
||||||
|
if len(x) == 2:
|
||||||
|
txt += chr(int(x, base=16))
|
||||||
|
x = b""
|
||||||
|
if len(x) == 1:
|
||||||
|
x += b"0"
|
||||||
|
if len(x) == 2:
|
||||||
|
txt += chr(int(x, base=16))
|
||||||
|
return create_string_object(b_(txt), forced_encoding)
|
||||||
|
|
||||||
|
|
||||||
|
def read_string_from_stream(
|
||||||
|
stream: StreamType,
|
||||||
|
forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
|
||||||
|
) -> Union["TextStringObject", "ByteStringObject"]:
|
||||||
|
tok = stream.read(1)
|
||||||
|
parens = 1
|
||||||
|
txt = []
|
||||||
|
while True:
|
||||||
|
tok = stream.read(1)
|
||||||
|
if not tok:
|
||||||
|
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
|
||||||
|
if tok == b"(":
|
||||||
|
parens += 1
|
||||||
|
elif tok == b")":
|
||||||
|
parens -= 1
|
||||||
|
if parens == 0:
|
||||||
|
break
|
||||||
|
elif tok == b"\\":
|
||||||
|
tok = stream.read(1)
|
||||||
|
escape_dict = {
|
||||||
|
b"n": b"\n",
|
||||||
|
b"r": b"\r",
|
||||||
|
b"t": b"\t",
|
||||||
|
b"b": b"\b",
|
||||||
|
b"f": b"\f",
|
||||||
|
b"c": rb"\c",
|
||||||
|
b"(": b"(",
|
||||||
|
b")": b")",
|
||||||
|
b"/": b"/",
|
||||||
|
b"\\": b"\\",
|
||||||
|
b" ": b" ",
|
||||||
|
b"%": b"%",
|
||||||
|
b"<": b"<",
|
||||||
|
b">": b">",
|
||||||
|
b"[": b"[",
|
||||||
|
b"]": b"]",
|
||||||
|
b"#": b"#",
|
||||||
|
b"_": b"_",
|
||||||
|
b"&": b"&",
|
||||||
|
b"$": b"$",
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
tok = escape_dict[tok]
|
||||||
|
except KeyError:
|
||||||
|
if b"0" <= tok and tok <= b"7":
|
||||||
|
# "The number ddd may consist of one, two, or three
|
||||||
|
# octal digits; high-order overflow shall be ignored.
|
||||||
|
# Three octal digits shall be used, with leading zeros
|
||||||
|
# as needed, if the next character of the string is also
|
||||||
|
# a digit." (PDF reference 7.3.4.2, p 16)
|
||||||
|
for _ in range(2):
|
||||||
|
ntok = stream.read(1)
|
||||||
|
if b"0" <= ntok and ntok <= b"7":
|
||||||
|
tok += ntok
|
||||||
|
else:
|
||||||
|
stream.seek(-1, 1) # ntok has to be analysed
|
||||||
|
break
|
||||||
|
tok = b_(chr(int(tok, base=8)))
|
||||||
|
elif tok in b"\n\r":
|
||||||
|
# This case is hit when a backslash followed by a line
|
||||||
|
# break occurs. If it's a multi-char EOL, consume the
|
||||||
|
# second character:
|
||||||
|
tok = stream.read(1)
|
||||||
|
if tok not in b"\n\r":
|
||||||
|
stream.seek(-1, 1)
|
||||||
|
# Then don't add anything to the actual string, since this
|
||||||
|
# line break was escaped:
|
||||||
|
tok = b""
|
||||||
|
else:
|
||||||
|
msg = rf"Unexpected escaped string: {tok.decode('utf8')}"
|
||||||
|
logger_warning(msg, __name__)
|
||||||
|
txt.append(tok)
|
||||||
|
return create_string_object(b"".join(txt), forced_encoding)
|
||||||
|
|
||||||
|
|
||||||
|
def create_string_object(
|
||||||
|
string: Union[str, bytes],
|
||||||
|
forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
|
||||||
|
) -> Union[TextStringObject, ByteStringObject]:
|
||||||
|
"""
|
||||||
|
Create a ByteStringObject or a TextStringObject from a string to represent the string.
|
||||||
|
|
||||||
|
:param Union[str, bytes] string: A string
|
||||||
|
|
||||||
|
:raises TypeError: If string is not of type str or bytes.
|
||||||
|
"""
|
||||||
|
if isinstance(string, str):
|
||||||
|
return TextStringObject(string)
|
||||||
|
elif isinstance(string, bytes):
|
||||||
|
if isinstance(forced_encoding, (list, dict)):
|
||||||
|
out = ""
|
||||||
|
for x in string:
|
||||||
|
try:
|
||||||
|
out += forced_encoding[x]
|
||||||
|
except Exception:
|
||||||
|
out += bytes((x,)).decode("charmap")
|
||||||
|
return TextStringObject(out)
|
||||||
|
elif isinstance(forced_encoding, str):
|
||||||
|
if forced_encoding == "bytes":
|
||||||
|
return ByteStringObject(string)
|
||||||
|
return TextStringObject(string.decode(forced_encoding))
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if string.startswith(codecs.BOM_UTF16_BE):
|
||||||
|
retval = TextStringObject(string.decode("utf-16"))
|
||||||
|
retval.autodetect_utf16 = True
|
||||||
|
return retval
|
||||||
|
else:
|
||||||
|
# This is probably a big performance hit here, but we need to
|
||||||
|
# convert string objects into the text/unicode-aware version if
|
||||||
|
# possible... and the only way to check if that's possible is
|
||||||
|
# to try. Some strings are strings, some are just byte arrays.
|
||||||
|
retval = TextStringObject(decode_pdfdocencoding(string))
|
||||||
|
retval.autodetect_pdfdocencoding = True
|
||||||
|
return retval
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
return ByteStringObject(string)
|
||||||
|
else:
|
||||||
|
raise TypeError("create_string_object should have str or unicode arg")
|
||||||
|
|
||||||
|
|
||||||
|
def decode_pdfdocencoding(byte_array: bytes) -> str:
|
||||||
|
retval = ""
|
||||||
|
for b in byte_array:
|
||||||
|
c = _pdfdoc_encoding[b]
|
||||||
|
if c == "\u0000":
|
||||||
|
raise UnicodeDecodeError(
|
||||||
|
"pdfdocencoding",
|
||||||
|
bytearray(b),
|
||||||
|
-1,
|
||||||
|
-1,
|
||||||
|
"does not exist in translation table",
|
||||||
|
)
|
||||||
|
retval += c
|
||||||
|
return retval
|
||||||
|
|
@ -0,0 +1,173 @@
|
||||||
|
"""
|
||||||
|
Representation and utils for ranges of PDF file pages.
|
||||||
|
|
||||||
|
Copyright (c) 2014, Steve Witham <switham_github@mac-guyver.com>.
|
||||||
|
All rights reserved. This software is available under a BSD license;
|
||||||
|
see https://github.com/py-pdf/PyPDF2/blob/main/LICENSE
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from typing import Any, List, Tuple, Union
|
||||||
|
|
||||||
|
from .errors import ParseError
|
||||||
|
|
||||||
|
_INT_RE = r"(0|-?[1-9]\d*)" # A decimal int, don't allow "-0".
|
||||||
|
PAGE_RANGE_RE = "^({int}|({int}?(:{int}?(:{int}?)?)))$".format(int=_INT_RE)
|
||||||
|
# groups: 12 34 5 6 7 8
|
||||||
|
|
||||||
|
|
||||||
|
class PageRange:
|
||||||
|
"""
|
||||||
|
A slice-like representation of a range of page indices.
|
||||||
|
|
||||||
|
For example, page numbers, only starting at zero.
|
||||||
|
|
||||||
|
The syntax is like what you would put between brackets [ ].
|
||||||
|
The slice is one of the few Python types that can't be subclassed,
|
||||||
|
but this class converts to and from slices, and allows similar use.
|
||||||
|
|
||||||
|
- PageRange(str) parses a string representing a page range.
|
||||||
|
- PageRange(slice) directly "imports" a slice.
|
||||||
|
- to_slice() gives the equivalent slice.
|
||||||
|
- str() and repr() allow printing.
|
||||||
|
- indices(n) is like slice.indices(n).
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, arg: Union[slice, "PageRange", str]) -> None:
|
||||||
|
"""
|
||||||
|
Initialize with either a slice -- giving the equivalent page range,
|
||||||
|
or a PageRange object -- making a copy,
|
||||||
|
or a string like
|
||||||
|
"int", "[int]:[int]" or "[int]:[int]:[int]",
|
||||||
|
where the brackets indicate optional ints.
|
||||||
|
Remember, page indices start with zero.
|
||||||
|
Page range expression examples:
|
||||||
|
: all pages. -1 last page.
|
||||||
|
22 just the 23rd page. :-1 all but the last page.
|
||||||
|
0:3 the first three pages. -2 second-to-last page.
|
||||||
|
:3 the first three pages. -2: last two pages.
|
||||||
|
5: from the sixth page onward. -3:-1 third & second to last.
|
||||||
|
The third, "stride" or "step" number is also recognized.
|
||||||
|
::2 0 2 4 ... to the end. 3:0:-1 3 2 1 but not 0.
|
||||||
|
1:10:2 1 3 5 7 9 2::-1 2 1 0.
|
||||||
|
::-1 all pages in reverse order.
|
||||||
|
Note the difference between this notation and arguments to slice():
|
||||||
|
slice(3) means the first three pages;
|
||||||
|
PageRange("3") means the range of only the fourth page.
|
||||||
|
However PageRange(slice(3)) means the first three pages.
|
||||||
|
"""
|
||||||
|
if isinstance(arg, slice):
|
||||||
|
self._slice = arg
|
||||||
|
return
|
||||||
|
|
||||||
|
if isinstance(arg, PageRange):
|
||||||
|
self._slice = arg.to_slice()
|
||||||
|
return
|
||||||
|
|
||||||
|
m = isinstance(arg, str) and re.match(PAGE_RANGE_RE, arg)
|
||||||
|
if not m:
|
||||||
|
raise ParseError(arg)
|
||||||
|
elif m.group(2):
|
||||||
|
# Special case: just an int means a range of one page.
|
||||||
|
start = int(m.group(2))
|
||||||
|
stop = start + 1 if start != -1 else None
|
||||||
|
self._slice = slice(start, stop)
|
||||||
|
else:
|
||||||
|
self._slice = slice(*[int(g) if g else None for g in m.group(4, 6, 8)])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def valid(input: Any) -> bool:
|
||||||
|
"""True if input is a valid initializer for a PageRange."""
|
||||||
|
return isinstance(input, (slice, PageRange)) or (
|
||||||
|
isinstance(input, str) and bool(re.match(PAGE_RANGE_RE, input))
|
||||||
|
)
|
||||||
|
|
||||||
|
def to_slice(self) -> slice:
|
||||||
|
"""Return the slice equivalent of this page range."""
|
||||||
|
return self._slice
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
"""A string like "1:2:3"."""
|
||||||
|
s = self._slice
|
||||||
|
indices: Union[Tuple[int, int], Tuple[int, int, int]]
|
||||||
|
if s.step is None:
|
||||||
|
if s.start is not None and s.stop == s.start + 1:
|
||||||
|
return str(s.start)
|
||||||
|
|
||||||
|
indices = s.start, s.stop
|
||||||
|
else:
|
||||||
|
indices = s.start, s.stop, s.step
|
||||||
|
return ":".join("" if i is None else str(i) for i in indices)
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
"""A string like "PageRange('1:2:3')"."""
|
||||||
|
return "PageRange(" + repr(str(self)) + ")"
|
||||||
|
|
||||||
|
def indices(self, n: int) -> Tuple[int, int, int]:
|
||||||
|
"""
|
||||||
|
n is the length of the list of pages to choose from.
|
||||||
|
|
||||||
|
Returns arguments for range(). See help(slice.indices).
|
||||||
|
"""
|
||||||
|
return self._slice.indices(n)
|
||||||
|
|
||||||
|
def __eq__(self, other: Any) -> bool:
|
||||||
|
if not isinstance(other, PageRange):
|
||||||
|
return False
|
||||||
|
return self._slice == other._slice
|
||||||
|
|
||||||
|
def __add__(self, other: "PageRange") -> "PageRange":
|
||||||
|
if not isinstance(other, PageRange):
|
||||||
|
raise TypeError(f"Can't add PageRange and {type(other)}")
|
||||||
|
if self._slice.step is not None or other._slice.step is not None:
|
||||||
|
raise ValueError("Can't add PageRange with stride")
|
||||||
|
a = self._slice.start, self._slice.stop
|
||||||
|
b = other._slice.start, other._slice.stop
|
||||||
|
|
||||||
|
if a[0] > b[0]:
|
||||||
|
a, b = b, a
|
||||||
|
|
||||||
|
# Now a[0] is the smallest
|
||||||
|
if b[0] > a[1]:
|
||||||
|
# There is a gap between a and b.
|
||||||
|
raise ValueError("Can't add PageRanges with gap")
|
||||||
|
return PageRange(slice(a[0], max(a[1], b[1])))
|
||||||
|
|
||||||
|
|
||||||
|
PAGE_RANGE_ALL = PageRange(":") # The range of all pages.
|
||||||
|
|
||||||
|
|
||||||
|
def parse_filename_page_ranges(
|
||||||
|
args: List[Union[str, PageRange, None]]
|
||||||
|
) -> List[Tuple[str, PageRange]]:
|
||||||
|
"""
|
||||||
|
Given a list of filenames and page ranges, return a list of (filename, page_range) pairs.
|
||||||
|
|
||||||
|
First arg must be a filename; other ags are filenames, page-range
|
||||||
|
expressions, slice objects, or PageRange objects.
|
||||||
|
A filename not followed by a page range indicates all pages of the file.
|
||||||
|
"""
|
||||||
|
pairs: List[Tuple[str, PageRange]] = []
|
||||||
|
pdf_filename = None
|
||||||
|
did_page_range = False
|
||||||
|
for arg in args + [None]:
|
||||||
|
if PageRange.valid(arg):
|
||||||
|
if not pdf_filename:
|
||||||
|
raise ValueError(
|
||||||
|
"The first argument must be a filename, not a page range."
|
||||||
|
)
|
||||||
|
|
||||||
|
pairs.append((pdf_filename, PageRange(arg)))
|
||||||
|
did_page_range = True
|
||||||
|
else:
|
||||||
|
# New filename or end of list--do all of the previous file?
|
||||||
|
if pdf_filename and not did_page_range:
|
||||||
|
pairs.append((pdf_filename, PAGE_RANGE_ALL))
|
||||||
|
|
||||||
|
pdf_filename = arg
|
||||||
|
did_page_range = False
|
||||||
|
return pairs
|
||||||
|
|
||||||
|
|
||||||
|
PageRangeSpec = Union[str, PageRange, Tuple[int, int], Tuple[int, int, int], List[int]]
|
||||||
|
|
@ -0,0 +1,48 @@
|
||||||
|
"""Helper to get paper sizes."""
|
||||||
|
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
Dimensions = namedtuple("Dimensions", ["width", "height"])
|
||||||
|
|
||||||
|
|
||||||
|
class PaperSize:
|
||||||
|
"""(width, height) of the paper in portrait mode in pixels at 72 ppi."""
|
||||||
|
|
||||||
|
# Notes how to calculate it:
|
||||||
|
# 1. Get the size of the paper in mm
|
||||||
|
# 2. Convert it to inches (25.4 millimeters are equal to 1 inches)
|
||||||
|
# 3. Convert it to pixels ad 72dpi (1 inch is equal to 72 pixels)
|
||||||
|
|
||||||
|
# All Din-A paper sizes follow this pattern:
|
||||||
|
# 2xA(n-1) = A(n)
|
||||||
|
# So the height of the next bigger one is the width of the smaller one
|
||||||
|
# The ratio is always approximately the ratio 1:2**0.5
|
||||||
|
# Additionally, A0 is defined to have an area of 1 m**2
|
||||||
|
# Be aware of rounding issues!
|
||||||
|
A0 = Dimensions(2384, 3370) # 841mm x 1189mm
|
||||||
|
A1 = Dimensions(1684, 2384)
|
||||||
|
A2 = Dimensions(1191, 1684)
|
||||||
|
A3 = Dimensions(842, 1191)
|
||||||
|
A4 = Dimensions(
|
||||||
|
595, 842
|
||||||
|
) # Printer paper, documents - this is by far the most common
|
||||||
|
A5 = Dimensions(420, 595) # Paperback books
|
||||||
|
A6 = Dimensions(298, 420) # Post cards
|
||||||
|
A7 = Dimensions(210, 298)
|
||||||
|
A8 = Dimensions(147, 210)
|
||||||
|
|
||||||
|
# Envelopes
|
||||||
|
C4 = Dimensions(649, 918)
|
||||||
|
|
||||||
|
|
||||||
|
_din_a = (
|
||||||
|
PaperSize.A0,
|
||||||
|
PaperSize.A1,
|
||||||
|
PaperSize.A2,
|
||||||
|
PaperSize.A3,
|
||||||
|
PaperSize.A4,
|
||||||
|
PaperSize.A5,
|
||||||
|
PaperSize.A6,
|
||||||
|
PaperSize.A7,
|
||||||
|
PaperSize.A8,
|
||||||
|
)
|
||||||
|
|
@ -0,0 +1,52 @@
|
||||||
|
"""Helpers for working with PDF types."""
|
||||||
|
|
||||||
|
from typing import List, Union
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Python 3.8+: https://peps.python.org/pep-0586
|
||||||
|
from typing import Literal # type: ignore[attr-defined]
|
||||||
|
except ImportError:
|
||||||
|
from typing_extensions import Literal # type: ignore[misc]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Python 3.10+: https://www.python.org/dev/peps/pep-0484/
|
||||||
|
from typing import TypeAlias # type: ignore[attr-defined]
|
||||||
|
except ImportError:
|
||||||
|
from typing_extensions import TypeAlias
|
||||||
|
|
||||||
|
from .generic._base import NameObject, NullObject, NumberObject
|
||||||
|
from .generic._data_structures import ArrayObject, Destination
|
||||||
|
from .generic._outline import OutlineItem
|
||||||
|
|
||||||
|
BorderArrayType: TypeAlias = List[Union[NameObject, NumberObject, ArrayObject]]
|
||||||
|
OutlineItemType: TypeAlias = Union[OutlineItem, Destination]
|
||||||
|
FitType: TypeAlias = Literal[
|
||||||
|
"/Fit", "/XYZ", "/FitH", "/FitV", "/FitR", "/FitB", "/FitBH", "/FitBV"
|
||||||
|
]
|
||||||
|
# Those go with the FitType: They specify values for the fit
|
||||||
|
ZoomArgType: TypeAlias = Union[NumberObject, NullObject, float]
|
||||||
|
ZoomArgsType: TypeAlias = List[ZoomArgType]
|
||||||
|
|
||||||
|
# Recursive types like the following are not yet supported by mypy:
|
||||||
|
# OutlineType = List[Union[Destination, "OutlineType"]]
|
||||||
|
# See https://github.com/python/mypy/issues/731
|
||||||
|
# Hence use this for the moment:
|
||||||
|
OutlineType = List[Union[Destination, List[Union[Destination, List[Destination]]]]]
|
||||||
|
|
||||||
|
LayoutType: TypeAlias = Literal[
|
||||||
|
"/NoLayout",
|
||||||
|
"/SinglePage",
|
||||||
|
"/OneColumn",
|
||||||
|
"/TwoColumnLeft",
|
||||||
|
"/TwoColumnRight",
|
||||||
|
"/TwoPageLeft",
|
||||||
|
"/TwoPageRight",
|
||||||
|
]
|
||||||
|
PagemodeType: TypeAlias = Literal[
|
||||||
|
"/UseNone",
|
||||||
|
"/UseOutlines",
|
||||||
|
"/UseThumbs",
|
||||||
|
"/FullScreen",
|
||||||
|
"/UseOC",
|
||||||
|
"/UseAttachments",
|
||||||
|
]
|
||||||
|
|
@ -0,0 +1,525 @@
|
||||||
|
"""
|
||||||
|
Anything related to XMP metadata.
|
||||||
|
|
||||||
|
See https://en.wikipedia.org/wiki/Extensible_Metadata_Platform
|
||||||
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import decimal
|
||||||
|
import re
|
||||||
|
from typing import (
|
||||||
|
Any,
|
||||||
|
Callable,
|
||||||
|
Dict,
|
||||||
|
Iterator,
|
||||||
|
List,
|
||||||
|
Optional,
|
||||||
|
TypeVar,
|
||||||
|
Union,
|
||||||
|
cast,
|
||||||
|
)
|
||||||
|
from xml.dom.minidom import Document
|
||||||
|
from xml.dom.minidom import Element as XmlElement
|
||||||
|
from xml.dom.minidom import parseString
|
||||||
|
from xml.parsers.expat import ExpatError
|
||||||
|
|
||||||
|
from ._utils import (
|
||||||
|
StreamType,
|
||||||
|
deprecate_with_replacement,
|
||||||
|
deprecation_with_replacement,
|
||||||
|
)
|
||||||
|
from .errors import PdfReadError
|
||||||
|
from .generic import ContentStream, PdfObject
|
||||||
|
|
||||||
|
RDF_NAMESPACE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||||
|
DC_NAMESPACE = "http://purl.org/dc/elements/1.1/"
|
||||||
|
XMP_NAMESPACE = "http://ns.adobe.com/xap/1.0/"
|
||||||
|
PDF_NAMESPACE = "http://ns.adobe.com/pdf/1.3/"
|
||||||
|
XMPMM_NAMESPACE = "http://ns.adobe.com/xap/1.0/mm/"
|
||||||
|
|
||||||
|
# What is the PDFX namespace, you might ask? I might ask that too. It's
|
||||||
|
# a completely undocumented namespace used to place "custom metadata"
|
||||||
|
# properties, which are arbitrary metadata properties with no semantic or
|
||||||
|
# documented meaning. Elements in the namespace are key/value-style storage,
|
||||||
|
# where the element name is the key and the content is the value. The keys
|
||||||
|
# are transformed into valid XML identifiers by substituting an invalid
|
||||||
|
# identifier character with \u2182 followed by the unicode hex ID of the
|
||||||
|
# original character. A key like "my car" is therefore "my\u21820020car".
|
||||||
|
#
|
||||||
|
# \u2182, in case you're wondering, is the unicode character
|
||||||
|
# \u{ROMAN NUMERAL TEN THOUSAND}, a straightforward and obvious choice for
|
||||||
|
# escaping characters.
|
||||||
|
#
|
||||||
|
# Intentional users of the pdfx namespace should be shot on sight. A
|
||||||
|
# custom data schema and sensical XML elements could be used instead, as is
|
||||||
|
# suggested by Adobe's own documentation on XMP (under "Extensibility of
|
||||||
|
# Schemas").
|
||||||
|
#
|
||||||
|
# Information presented here on the /pdfx/ schema is a result of limited
|
||||||
|
# reverse engineering, and does not constitute a full specification.
|
||||||
|
PDFX_NAMESPACE = "http://ns.adobe.com/pdfx/1.3/"
|
||||||
|
|
||||||
|
iso8601 = re.compile(
|
||||||
|
"""
|
||||||
|
(?P<year>[0-9]{4})
|
||||||
|
(-
|
||||||
|
(?P<month>[0-9]{2})
|
||||||
|
(-
|
||||||
|
(?P<day>[0-9]+)
|
||||||
|
(T
|
||||||
|
(?P<hour>[0-9]{2}):
|
||||||
|
(?P<minute>[0-9]{2})
|
||||||
|
(:(?P<second>[0-9]{2}(.[0-9]+)?))?
|
||||||
|
(?P<tzd>Z|[-+][0-9]{2}:[0-9]{2})
|
||||||
|
)?
|
||||||
|
)?
|
||||||
|
)?
|
||||||
|
""",
|
||||||
|
re.VERBOSE,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
K = TypeVar("K")
|
||||||
|
|
||||||
|
|
||||||
|
def _identity(value: K) -> K:
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
def _converter_date(value: str) -> datetime.datetime:
|
||||||
|
matches = iso8601.match(value)
|
||||||
|
if matches is None:
|
||||||
|
raise ValueError(f"Invalid date format: {value}")
|
||||||
|
year = int(matches.group("year"))
|
||||||
|
month = int(matches.group("month") or "1")
|
||||||
|
day = int(matches.group("day") or "1")
|
||||||
|
hour = int(matches.group("hour") or "0")
|
||||||
|
minute = int(matches.group("minute") or "0")
|
||||||
|
second = decimal.Decimal(matches.group("second") or "0")
|
||||||
|
seconds_dec = second.to_integral(decimal.ROUND_FLOOR)
|
||||||
|
milliseconds_dec = (second - seconds_dec) * 1000000
|
||||||
|
|
||||||
|
seconds = int(seconds_dec)
|
||||||
|
milliseconds = int(milliseconds_dec)
|
||||||
|
|
||||||
|
tzd = matches.group("tzd") or "Z"
|
||||||
|
dt = datetime.datetime(year, month, day, hour, minute, seconds, milliseconds)
|
||||||
|
if tzd != "Z":
|
||||||
|
tzd_hours, tzd_minutes = (int(x) for x in tzd.split(":"))
|
||||||
|
tzd_hours *= -1
|
||||||
|
if tzd_hours < 0:
|
||||||
|
tzd_minutes *= -1
|
||||||
|
dt = dt + datetime.timedelta(hours=tzd_hours, minutes=tzd_minutes)
|
||||||
|
return dt
|
||||||
|
|
||||||
|
|
||||||
|
def _getter_bag(
|
||||||
|
namespace: str, name: str
|
||||||
|
) -> Callable[["XmpInformation"], Optional[List[str]]]:
|
||||||
|
def get(self: "XmpInformation") -> Optional[List[str]]:
|
||||||
|
cached = self.cache.get(namespace, {}).get(name)
|
||||||
|
if cached:
|
||||||
|
return cached
|
||||||
|
retval = []
|
||||||
|
for element in self.get_element("", namespace, name):
|
||||||
|
bags = element.getElementsByTagNameNS(RDF_NAMESPACE, "Bag")
|
||||||
|
if len(bags):
|
||||||
|
for bag in bags:
|
||||||
|
for item in bag.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
|
||||||
|
value = self._get_text(item)
|
||||||
|
retval.append(value)
|
||||||
|
ns_cache = self.cache.setdefault(namespace, {})
|
||||||
|
ns_cache[name] = retval
|
||||||
|
return retval
|
||||||
|
|
||||||
|
return get
|
||||||
|
|
||||||
|
|
||||||
|
def _getter_seq(
|
||||||
|
namespace: str, name: str, converter: Callable[[Any], Any] = _identity
|
||||||
|
) -> Callable[["XmpInformation"], Optional[List[Any]]]:
|
||||||
|
def get(self: "XmpInformation") -> Optional[List[Any]]:
|
||||||
|
cached = self.cache.get(namespace, {}).get(name)
|
||||||
|
if cached:
|
||||||
|
return cached
|
||||||
|
retval = []
|
||||||
|
for element in self.get_element("", namespace, name):
|
||||||
|
seqs = element.getElementsByTagNameNS(RDF_NAMESPACE, "Seq")
|
||||||
|
if len(seqs):
|
||||||
|
for seq in seqs:
|
||||||
|
for item in seq.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
|
||||||
|
value = self._get_text(item)
|
||||||
|
value = converter(value)
|
||||||
|
retval.append(value)
|
||||||
|
else:
|
||||||
|
value = converter(self._get_text(element))
|
||||||
|
retval.append(value)
|
||||||
|
ns_cache = self.cache.setdefault(namespace, {})
|
||||||
|
ns_cache[name] = retval
|
||||||
|
return retval
|
||||||
|
|
||||||
|
return get
|
||||||
|
|
||||||
|
|
||||||
|
def _getter_langalt(
|
||||||
|
namespace: str, name: str
|
||||||
|
) -> Callable[["XmpInformation"], Optional[Dict[Any, Any]]]:
|
||||||
|
def get(self: "XmpInformation") -> Optional[Dict[Any, Any]]:
|
||||||
|
cached = self.cache.get(namespace, {}).get(name)
|
||||||
|
if cached:
|
||||||
|
return cached
|
||||||
|
retval = {}
|
||||||
|
for element in self.get_element("", namespace, name):
|
||||||
|
alts = element.getElementsByTagNameNS(RDF_NAMESPACE, "Alt")
|
||||||
|
if len(alts):
|
||||||
|
for alt in alts:
|
||||||
|
for item in alt.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
|
||||||
|
value = self._get_text(item)
|
||||||
|
retval[item.getAttribute("xml:lang")] = value
|
||||||
|
else:
|
||||||
|
retval["x-default"] = self._get_text(element)
|
||||||
|
ns_cache = self.cache.setdefault(namespace, {})
|
||||||
|
ns_cache[name] = retval
|
||||||
|
return retval
|
||||||
|
|
||||||
|
return get
|
||||||
|
|
||||||
|
|
||||||
|
def _getter_single(
|
||||||
|
namespace: str, name: str, converter: Callable[[str], Any] = _identity
|
||||||
|
) -> Callable[["XmpInformation"], Optional[Any]]:
|
||||||
|
def get(self: "XmpInformation") -> Optional[Any]:
|
||||||
|
cached = self.cache.get(namespace, {}).get(name)
|
||||||
|
if cached:
|
||||||
|
return cached
|
||||||
|
value = None
|
||||||
|
for element in self.get_element("", namespace, name):
|
||||||
|
if element.nodeType == element.ATTRIBUTE_NODE:
|
||||||
|
value = element.nodeValue
|
||||||
|
else:
|
||||||
|
value = self._get_text(element)
|
||||||
|
break
|
||||||
|
if value is not None:
|
||||||
|
value = converter(value)
|
||||||
|
ns_cache = self.cache.setdefault(namespace, {})
|
||||||
|
ns_cache[name] = value
|
||||||
|
return value
|
||||||
|
|
||||||
|
return get
|
||||||
|
|
||||||
|
|
||||||
|
class XmpInformation(PdfObject):
|
||||||
|
"""
|
||||||
|
An object that represents Adobe XMP metadata.
|
||||||
|
Usually accessed by :py:attr:`xmp_metadata()<PyPDF2.PdfReader.xmp_metadata>`
|
||||||
|
|
||||||
|
:raises PdfReadError: if XML is invalid
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, stream: ContentStream) -> None:
|
||||||
|
self.stream = stream
|
||||||
|
try:
|
||||||
|
data = self.stream.get_data()
|
||||||
|
doc_root: Document = parseString(data)
|
||||||
|
except ExpatError as e:
|
||||||
|
raise PdfReadError(f"XML in XmpInformation was invalid: {e}")
|
||||||
|
self.rdf_root: XmlElement = doc_root.getElementsByTagNameNS(
|
||||||
|
RDF_NAMESPACE, "RDF"
|
||||||
|
)[0]
|
||||||
|
self.cache: Dict[Any, Any] = {}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def rdfRoot(self) -> XmlElement: # pragma: no cover
|
||||||
|
deprecate_with_replacement("rdfRoot", "rdf_root", "4.0.0")
|
||||||
|
return self.rdf_root
|
||||||
|
|
||||||
|
def write_to_stream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None:
|
||||||
|
self.stream.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
def writeToStream(
|
||||||
|
self, stream: StreamType, encryption_key: Union[None, str, bytes]
|
||||||
|
) -> None: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.28.0
|
||||||
|
|
||||||
|
Use :meth:`write_to_stream` instead.
|
||||||
|
"""
|
||||||
|
deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
|
||||||
|
self.write_to_stream(stream, encryption_key)
|
||||||
|
|
||||||
|
def get_element(self, about_uri: str, namespace: str, name: str) -> Iterator[Any]:
|
||||||
|
for desc in self.rdf_root.getElementsByTagNameNS(RDF_NAMESPACE, "Description"):
|
||||||
|
if desc.getAttributeNS(RDF_NAMESPACE, "about") == about_uri:
|
||||||
|
attr = desc.getAttributeNodeNS(namespace, name)
|
||||||
|
if attr is not None:
|
||||||
|
yield attr
|
||||||
|
yield from desc.getElementsByTagNameNS(namespace, name)
|
||||||
|
|
||||||
|
def getElement(
|
||||||
|
self, aboutUri: str, namespace: str, name: str
|
||||||
|
) -> Iterator[Any]: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.28.0
|
||||||
|
|
||||||
|
Use :meth:`get_element` instead.
|
||||||
|
"""
|
||||||
|
deprecation_with_replacement("getElement", "get_element", "3.0.0")
|
||||||
|
return self.get_element(aboutUri, namespace, name)
|
||||||
|
|
||||||
|
def get_nodes_in_namespace(self, about_uri: str, namespace: str) -> Iterator[Any]:
|
||||||
|
for desc in self.rdf_root.getElementsByTagNameNS(RDF_NAMESPACE, "Description"):
|
||||||
|
if desc.getAttributeNS(RDF_NAMESPACE, "about") == about_uri:
|
||||||
|
for i in range(desc.attributes.length):
|
||||||
|
attr = desc.attributes.item(i)
|
||||||
|
if attr.namespaceURI == namespace:
|
||||||
|
yield attr
|
||||||
|
for child in desc.childNodes:
|
||||||
|
if child.namespaceURI == namespace:
|
||||||
|
yield child
|
||||||
|
|
||||||
|
def getNodesInNamespace(
|
||||||
|
self, aboutUri: str, namespace: str
|
||||||
|
) -> Iterator[Any]: # pragma: no cover
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.28.0
|
||||||
|
|
||||||
|
Use :meth:`get_nodes_in_namespace` instead.
|
||||||
|
"""
|
||||||
|
deprecation_with_replacement(
|
||||||
|
"getNodesInNamespace", "get_nodes_in_namespace", "3.0.0"
|
||||||
|
)
|
||||||
|
return self.get_nodes_in_namespace(aboutUri, namespace)
|
||||||
|
|
||||||
|
def _get_text(self, element: XmlElement) -> str:
|
||||||
|
text = ""
|
||||||
|
for child in element.childNodes:
|
||||||
|
if child.nodeType == child.TEXT_NODE:
|
||||||
|
text += child.data
|
||||||
|
return text
|
||||||
|
|
||||||
|
dc_contributor = property(_getter_bag(DC_NAMESPACE, "contributor"))
|
||||||
|
"""
|
||||||
|
Contributors to the resource (other than the authors). An unsorted
|
||||||
|
array of names.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_coverage = property(_getter_single(DC_NAMESPACE, "coverage"))
|
||||||
|
"""
|
||||||
|
Text describing the extent or scope of the resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_creator = property(_getter_seq(DC_NAMESPACE, "creator"))
|
||||||
|
"""
|
||||||
|
A sorted array of names of the authors of the resource, listed in order
|
||||||
|
of precedence.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_date = property(_getter_seq(DC_NAMESPACE, "date", _converter_date))
|
||||||
|
"""
|
||||||
|
A sorted array of dates (datetime.datetime instances) of significance to
|
||||||
|
the resource. The dates and times are in UTC.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_description = property(_getter_langalt(DC_NAMESPACE, "description"))
|
||||||
|
"""
|
||||||
|
A language-keyed dictionary of textual descriptions of the content of the
|
||||||
|
resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_format = property(_getter_single(DC_NAMESPACE, "format"))
|
||||||
|
"""
|
||||||
|
The mime-type of the resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_identifier = property(_getter_single(DC_NAMESPACE, "identifier"))
|
||||||
|
"""
|
||||||
|
Unique identifier of the resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_language = property(_getter_bag(DC_NAMESPACE, "language"))
|
||||||
|
"""
|
||||||
|
An unordered array specifying the languages used in the resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_publisher = property(_getter_bag(DC_NAMESPACE, "publisher"))
|
||||||
|
"""
|
||||||
|
An unordered array of publisher names.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_relation = property(_getter_bag(DC_NAMESPACE, "relation"))
|
||||||
|
"""
|
||||||
|
An unordered array of text descriptions of relationships to other
|
||||||
|
documents.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_rights = property(_getter_langalt(DC_NAMESPACE, "rights"))
|
||||||
|
"""
|
||||||
|
A language-keyed dictionary of textual descriptions of the rights the
|
||||||
|
user has to this resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_source = property(_getter_single(DC_NAMESPACE, "source"))
|
||||||
|
"""
|
||||||
|
Unique identifier of the work from which this resource was derived.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_subject = property(_getter_bag(DC_NAMESPACE, "subject"))
|
||||||
|
"""
|
||||||
|
An unordered array of descriptive phrases or keywrods that specify the
|
||||||
|
topic of the content of the resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_title = property(_getter_langalt(DC_NAMESPACE, "title"))
|
||||||
|
"""
|
||||||
|
A language-keyed dictionary of the title of the resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_type = property(_getter_bag(DC_NAMESPACE, "type"))
|
||||||
|
"""
|
||||||
|
An unordered array of textual descriptions of the document type.
|
||||||
|
"""
|
||||||
|
|
||||||
|
pdf_keywords = property(_getter_single(PDF_NAMESPACE, "Keywords"))
|
||||||
|
"""
|
||||||
|
An unformatted text string representing document keywords.
|
||||||
|
"""
|
||||||
|
|
||||||
|
pdf_pdfversion = property(_getter_single(PDF_NAMESPACE, "PDFVersion"))
|
||||||
|
"""
|
||||||
|
The PDF file version, for example 1.0, 1.3.
|
||||||
|
"""
|
||||||
|
|
||||||
|
pdf_producer = property(_getter_single(PDF_NAMESPACE, "Producer"))
|
||||||
|
"""
|
||||||
|
The name of the tool that created the PDF document.
|
||||||
|
"""
|
||||||
|
|
||||||
|
xmp_create_date = property(
|
||||||
|
_getter_single(XMP_NAMESPACE, "CreateDate", _converter_date)
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
The date and time the resource was originally created. The date and
|
||||||
|
time are returned as a UTC datetime.datetime object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def xmp_createDate(self) -> datetime.datetime: # pragma: no cover
|
||||||
|
deprecate_with_replacement("xmp_createDate", "xmp_create_date", "4.0.0")
|
||||||
|
return self.xmp_create_date
|
||||||
|
|
||||||
|
@xmp_createDate.setter
|
||||||
|
def xmp_createDate(self, value: datetime.datetime) -> None: # pragma: no cover
|
||||||
|
deprecate_with_replacement("xmp_createDate", "xmp_create_date", "4.0.0")
|
||||||
|
self.xmp_create_date = value
|
||||||
|
|
||||||
|
xmp_modify_date = property(
|
||||||
|
_getter_single(XMP_NAMESPACE, "ModifyDate", _converter_date)
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
The date and time the resource was last modified. The date and time
|
||||||
|
are returned as a UTC datetime.datetime object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def xmp_modifyDate(self) -> datetime.datetime: # pragma: no cover
|
||||||
|
deprecate_with_replacement("xmp_modifyDate", "xmp_modify_date", "4.0.0")
|
||||||
|
return self.xmp_modify_date
|
||||||
|
|
||||||
|
@xmp_modifyDate.setter
|
||||||
|
def xmp_modifyDate(self, value: datetime.datetime) -> None: # pragma: no cover
|
||||||
|
deprecate_with_replacement("xmp_modifyDate", "xmp_modify_date", "4.0.0")
|
||||||
|
self.xmp_modify_date = value
|
||||||
|
|
||||||
|
xmp_metadata_date = property(
|
||||||
|
_getter_single(XMP_NAMESPACE, "MetadataDate", _converter_date)
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
The date and time that any metadata for this resource was last changed.
|
||||||
|
|
||||||
|
The date and time are returned as a UTC datetime.datetime object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def xmp_metadataDate(self) -> datetime.datetime: # pragma: no cover
|
||||||
|
deprecate_with_replacement("xmp_metadataDate", "xmp_metadata_date", "4.0.0")
|
||||||
|
return self.xmp_metadata_date
|
||||||
|
|
||||||
|
@xmp_metadataDate.setter
|
||||||
|
def xmp_metadataDate(self, value: datetime.datetime) -> None: # pragma: no cover
|
||||||
|
deprecate_with_replacement("xmp_metadataDate", "xmp_metadata_date", "4.0.0")
|
||||||
|
self.xmp_metadata_date = value
|
||||||
|
|
||||||
|
xmp_creator_tool = property(_getter_single(XMP_NAMESPACE, "CreatorTool"))
|
||||||
|
"""The name of the first known tool used to create the resource."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def xmp_creatorTool(self) -> str: # pragma: no cover
|
||||||
|
deprecation_with_replacement("xmp_creatorTool", "xmp_creator_tool", "3.0.0")
|
||||||
|
return self.xmp_creator_tool
|
||||||
|
|
||||||
|
@xmp_creatorTool.setter
|
||||||
|
def xmp_creatorTool(self, value: str) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("xmp_creatorTool", "xmp_creator_tool", "3.0.0")
|
||||||
|
self.xmp_creator_tool = value
|
||||||
|
|
||||||
|
xmpmm_document_id = property(_getter_single(XMPMM_NAMESPACE, "DocumentID"))
|
||||||
|
"""
|
||||||
|
The common identifier for all versions and renditions of this resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def xmpmm_documentId(self) -> str: # pragma: no cover
|
||||||
|
deprecation_with_replacement("xmpmm_documentId", "xmpmm_document_id", "3.0.0")
|
||||||
|
return self.xmpmm_document_id
|
||||||
|
|
||||||
|
@xmpmm_documentId.setter
|
||||||
|
def xmpmm_documentId(self, value: str) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("xmpmm_documentId", "xmpmm_document_id", "3.0.0")
|
||||||
|
self.xmpmm_document_id = value
|
||||||
|
|
||||||
|
xmpmm_instance_id = property(_getter_single(XMPMM_NAMESPACE, "InstanceID"))
|
||||||
|
"""
|
||||||
|
An identifier for a specific incarnation of a document, updated each
|
||||||
|
time a file is saved.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def xmpmm_instanceId(self) -> str: # pragma: no cover
|
||||||
|
deprecation_with_replacement("xmpmm_instanceId", "xmpmm_instance_id", "3.0.0")
|
||||||
|
return cast(str, self.xmpmm_instance_id)
|
||||||
|
|
||||||
|
@xmpmm_instanceId.setter
|
||||||
|
def xmpmm_instanceId(self, value: str) -> None: # pragma: no cover
|
||||||
|
deprecation_with_replacement("xmpmm_instanceId", "xmpmm_instance_id", "3.0.0")
|
||||||
|
self.xmpmm_instance_id = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def custom_properties(self) -> Dict[Any, Any]:
|
||||||
|
"""
|
||||||
|
Retrieve custom metadata properties defined in the undocumented pdfx
|
||||||
|
metadata schema.
|
||||||
|
|
||||||
|
:return: a dictionary of key/value items for custom metadata properties.
|
||||||
|
"""
|
||||||
|
if not hasattr(self, "_custom_properties"):
|
||||||
|
self._custom_properties = {}
|
||||||
|
for node in self.get_nodes_in_namespace("", PDFX_NAMESPACE):
|
||||||
|
key = node.localName
|
||||||
|
while True:
|
||||||
|
# see documentation about PDFX_NAMESPACE earlier in file
|
||||||
|
idx = key.find("\u2182")
|
||||||
|
if idx == -1:
|
||||||
|
break
|
||||||
|
key = (
|
||||||
|
key[:idx]
|
||||||
|
+ chr(int(key[idx + 1 : idx + 5], base=16))
|
||||||
|
+ key[idx + 5 :]
|
||||||
|
)
|
||||||
|
if node.nodeType == node.ATTRIBUTE_NODE:
|
||||||
|
value = node.nodeValue
|
||||||
|
else:
|
||||||
|
value = self._get_text(node)
|
||||||
|
self._custom_properties[key] = value
|
||||||
|
return self._custom_properties
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
pip
|
||||||
|
|
@ -0,0 +1,78 @@
|
||||||
|
Metadata-Version: 2.4
|
||||||
|
Name: certifi
|
||||||
|
Version: 2026.2.25
|
||||||
|
Summary: Python package for providing Mozilla's CA Bundle.
|
||||||
|
Home-page: https://github.com/certifi/python-certifi
|
||||||
|
Author: Kenneth Reitz
|
||||||
|
Author-email: me@kennethreitz.com
|
||||||
|
License: MPL-2.0
|
||||||
|
Project-URL: Source, https://github.com/certifi/python-certifi
|
||||||
|
Classifier: Development Status :: 5 - Production/Stable
|
||||||
|
Classifier: Intended Audience :: Developers
|
||||||
|
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
|
||||||
|
Classifier: Natural Language :: English
|
||||||
|
Classifier: Programming Language :: Python
|
||||||
|
Classifier: Programming Language :: Python :: 3
|
||||||
|
Classifier: Programming Language :: Python :: 3 :: Only
|
||||||
|
Classifier: Programming Language :: Python :: 3.7
|
||||||
|
Classifier: Programming Language :: Python :: 3.8
|
||||||
|
Classifier: Programming Language :: Python :: 3.9
|
||||||
|
Classifier: Programming Language :: Python :: 3.10
|
||||||
|
Classifier: Programming Language :: Python :: 3.11
|
||||||
|
Classifier: Programming Language :: Python :: 3.12
|
||||||
|
Classifier: Programming Language :: Python :: 3.13
|
||||||
|
Classifier: Programming Language :: Python :: 3.14
|
||||||
|
Requires-Python: >=3.7
|
||||||
|
License-File: LICENSE
|
||||||
|
Dynamic: author
|
||||||
|
Dynamic: author-email
|
||||||
|
Dynamic: classifier
|
||||||
|
Dynamic: description
|
||||||
|
Dynamic: home-page
|
||||||
|
Dynamic: license
|
||||||
|
Dynamic: license-file
|
||||||
|
Dynamic: project-url
|
||||||
|
Dynamic: requires-python
|
||||||
|
Dynamic: summary
|
||||||
|
|
||||||
|
Certifi: Python SSL Certificates
|
||||||
|
================================
|
||||||
|
|
||||||
|
Certifi provides Mozilla's carefully curated collection of Root Certificates for
|
||||||
|
validating the trustworthiness of SSL certificates while verifying the identity
|
||||||
|
of TLS hosts. It has been extracted from the `Requests`_ project.
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
``certifi`` is available on PyPI. Simply install it with ``pip``::
|
||||||
|
|
||||||
|
$ pip install certifi
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
To reference the installed certificate authority (CA) bundle, you can use the
|
||||||
|
built-in function::
|
||||||
|
|
||||||
|
>>> import certifi
|
||||||
|
|
||||||
|
>>> certifi.where()
|
||||||
|
'/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
|
||||||
|
|
||||||
|
Or from the command line::
|
||||||
|
|
||||||
|
$ python -m certifi
|
||||||
|
/usr/local/lib/python3.7/site-packages/certifi/cacert.pem
|
||||||
|
|
||||||
|
Enjoy!
|
||||||
|
|
||||||
|
.. _`Requests`: https://requests.readthedocs.io/en/master/
|
||||||
|
|
||||||
|
Addition/Removal of Certificates
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
Certifi does not support any addition/removal or other modification of the
|
||||||
|
CA trust store content. This project is intended to provide a reliable and
|
||||||
|
highly portable root of trust to python deployments. Look to upstream projects
|
||||||
|
for methods to use alternate trust.
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
certifi-2026.2.25.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||||
|
certifi-2026.2.25.dist-info/METADATA,sha256=4NMuGXdg_hBiRA3paKVXYcDmE3VXEBWxTvCL2xlDyPU,2474
|
||||||
|
certifi-2026.2.25.dist-info/RECORD,,
|
||||||
|
certifi-2026.2.25.dist-info/WHEEL,sha256=YCfwYGOYMi5Jhw2fU4yNgwErybb2IX5PEwBKV4ZbdBo,91
|
||||||
|
certifi-2026.2.25.dist-info/licenses/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989
|
||||||
|
certifi-2026.2.25.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
||||||
|
certifi/__init__.py,sha256=c9eaYufv1pSLl0Q8QNcMiMLLH4WquDcxdPyKjmI4opY,94
|
||||||
|
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
|
||||||
|
certifi/__pycache__/__init__.cpython-312.pyc,,
|
||||||
|
certifi/__pycache__/__main__.cpython-312.pyc,,
|
||||||
|
certifi/__pycache__/core.cpython-312.pyc,,
|
||||||
|
certifi/cacert.pem,sha256=_JFloSQDJj5-v72te-ej6sD6XTJdPHBGXyjTaQByyig,272441
|
||||||
|
certifi/core.py,sha256=XFXycndG5pf37ayeF8N32HUuDafsyhkVMbO4BAPWHa0,3394
|
||||||
|
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
Wheel-Version: 1.0
|
||||||
|
Generator: setuptools (82.0.0)
|
||||||
|
Root-Is-Purelib: true
|
||||||
|
Tag: py3-none-any
|
||||||
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue