Compare commits
27 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
76a47e0dd0 | ||
|
|
70a754fb46 | ||
|
|
21a7b13524 | ||
|
|
52bdbe95c9 | ||
|
|
495dda3809 | ||
|
|
52e83922c0 | ||
|
|
6cbc5285cb | ||
|
|
33b9630ca5 | ||
|
|
9346f86f73 | ||
|
|
8d2d6627cf | ||
|
|
6f198b247c | ||
|
|
9fb95eff41 | ||
|
|
7aab8a3711 | ||
|
|
2789cee331 | ||
|
|
823704cf36 | ||
|
|
a7974f0f14 | ||
|
|
ed412bee35 | ||
|
|
6cee615f26 | ||
|
|
c4581b4d72 | ||
|
|
f6a568bcc1 | ||
|
|
bf6170e613 | ||
|
|
afcd79c0cc | ||
|
|
fdf0389936 | ||
|
|
5599c1694b | ||
|
|
dff90fae6f | ||
|
|
d33f679eae | ||
|
|
225e74a334 |
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.DS_Store
|
||||
@@ -5,7 +5,7 @@
|
||||
# Copyright © 2008-2020 Apprentice Harper et al.
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__version__ = '7.0.0'
|
||||
__version__ = '7.1.0'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
@@ -69,8 +69,12 @@ __docformat__ = 'restructuredtext en'
|
||||
# 6.6.3 - More cleanup of kindle book names and start of support for .kinf2018
|
||||
# 6.7.0 - Handle new library in calibre.
|
||||
# 6.8.0 - Full support for .kinf2018 and new KFX encryption (Kindle for PC/Mac 2.5+)
|
||||
# 6.8.1 - Kindle key fix for Mac OS X Big Syr
|
||||
# 7.0.0 - Switched to Python 3 for calibre 5.0. Thanks to all who comtibuted
|
||||
# 6.8.1 - Kindle key fix for Mac OS X Big Sur
|
||||
# 7.0.0 - Switched to Python 3 for calibre 5.0. Thanks to all who contributed
|
||||
# 7.0.1 - More Python 3 changes. Adobe PDF decryption should now work in some cases
|
||||
# 7.0.2 - More Python 3 changes. Adobe PDF decryption should now work on PC too.
|
||||
# 7.0.3 - More Python 3 changes. Integer division in ineptpdf.py
|
||||
# 7.1.0 - Full release for calibre 5.x
|
||||
|
||||
"""
|
||||
Decrypt DRMed ebooks.
|
||||
|
||||
@@ -353,12 +353,16 @@ class Decryptor(object):
|
||||
|
||||
def decompress(self, bytes):
|
||||
dc = zlib.decompressobj(-15)
|
||||
bytes = dc.decompress(bytes)
|
||||
ex = dc.decompress(b'Z') + dc.flush()
|
||||
if ex:
|
||||
bytes = bytes + ex
|
||||
return bytes
|
||||
|
||||
try:
|
||||
decompressed_bytes = dc.decompress(bytes)
|
||||
ex = dc.decompress(b'Z') + dc.flush()
|
||||
if ex:
|
||||
decompressed_bytes = decompressed_bytes + ex
|
||||
except:
|
||||
# possibly not compressed by zip - just return bytes
|
||||
return bytes
|
||||
return decompressed_bytes
|
||||
|
||||
def decrypt(self, path, data):
|
||||
if path.encode('utf-8') in self._encrypted:
|
||||
data = self._aes.decrypt(data)[16:]
|
||||
@@ -411,12 +415,12 @@ def decryptBook(userkey, inpath, outpath):
|
||||
return 1
|
||||
bookkey = rsa.decrypt(codecs.decode(bookkey.encode('ascii'), 'base64'))
|
||||
# Padded as per RSAES-PKCS1-v1_5
|
||||
if len(bookkey) != 16:
|
||||
if bookkey[-17] != '\x00' and bookkey[-17] != 0:
|
||||
if len(bookkey) > 16:
|
||||
if bookkey[-17] == '\x00' or bookkey[-17] == 0:
|
||||
bookkey = bookkey[-16:]
|
||||
else:
|
||||
print("Could not decrypt {0:s}. Wrong key".format(os.path.basename(inpath)))
|
||||
return 2
|
||||
else:
|
||||
bookkey = bookkey[-16:]
|
||||
encryption = inf.read('META-INF/encryption.xml')
|
||||
decryptor = Decryptor(bookkey, encryption)
|
||||
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
||||
|
||||
367
DeDRM_plugin/ineptpdf.py
Normal file → Executable file
367
DeDRM_plugin/ineptpdf.py
Normal file → Executable file
@@ -54,12 +54,14 @@ Decrypts Adobe ADEPT-encrypted PDF files.
|
||||
__license__ = 'GPL v3'
|
||||
__version__ = "9.0.0"
|
||||
|
||||
import codecs
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import zlib
|
||||
import struct
|
||||
import hashlib
|
||||
from io import BytesIO
|
||||
from decimal import Decimal
|
||||
import itertools
|
||||
import xml.etree.ElementTree as etree
|
||||
@@ -258,7 +260,8 @@ def _load_crypto_pycrypto():
|
||||
from Crypto.PublicKey import RSA as _RSA
|
||||
from Crypto.Cipher import ARC4 as _ARC4
|
||||
from Crypto.Cipher import AES as _AES
|
||||
|
||||
from Crypto.Cipher import PKCS1_v1_5 as _PKCS1_v1_5
|
||||
|
||||
# ASN.1 parsing code from tlslite
|
||||
class ASN1Error(Exception):
|
||||
pass
|
||||
@@ -372,7 +375,7 @@ def _load_crypto_pycrypto():
|
||||
|
||||
class RSA(object):
|
||||
def __init__(self, der):
|
||||
key = ASN1Parser([ord(x) for x in der])
|
||||
key = ASN1Parser([x for x in der])
|
||||
key = [key.getChild(x).value for x in range(1, 4)]
|
||||
key = [self.bytesToNumber(v) for v in key]
|
||||
self._rsa = _RSA.construct(key)
|
||||
@@ -384,7 +387,7 @@ def _load_crypto_pycrypto():
|
||||
return total
|
||||
|
||||
def decrypt(self, data):
|
||||
return self._rsa.decrypt(data)
|
||||
return _PKCS1_v1_5.new(self._rsa).decrypt(data, 172)
|
||||
|
||||
return (ARC4, RSA, AES)
|
||||
|
||||
@@ -403,7 +406,6 @@ def _load_crypto():
|
||||
ARC4, RSA, AES = _load_crypto()
|
||||
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
# Do we generate cross reference streams on output?
|
||||
@@ -440,7 +442,7 @@ def nunpack(s, default=0):
|
||||
elif l == 2:
|
||||
return struct.unpack('>H', s)[0]
|
||||
elif l == 3:
|
||||
return struct.unpack('>L', '\x00'+s)[0]
|
||||
return struct.unpack('>L', b'\x00'+s)[0]
|
||||
elif l == 4:
|
||||
return struct.unpack('>L', s)[0]
|
||||
else:
|
||||
@@ -472,16 +474,16 @@ class PSLiteral(PSObject):
|
||||
Use PSLiteralTable.intern() instead.
|
||||
'''
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.name = name.decode('utf-8')
|
||||
return
|
||||
|
||||
def __repr__(self):
|
||||
name = []
|
||||
for char in self.name:
|
||||
if not char.isalnum():
|
||||
char = b'#%02x' % ord(char)
|
||||
char = '#%02x' % ord(char)
|
||||
name.append(char)
|
||||
return b'/%s' % ''.join(name)
|
||||
return '/%s' % ''.join(name)
|
||||
|
||||
# PSKeyword
|
||||
class PSKeyword(PSObject):
|
||||
@@ -491,7 +493,7 @@ class PSKeyword(PSObject):
|
||||
Use PSKeywordTable.intern() instead.
|
||||
'''
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.name = name.decode('utf-8')
|
||||
return
|
||||
|
||||
def __repr__(self):
|
||||
@@ -521,12 +523,12 @@ PSLiteralTable = PSSymbolTable(PSLiteral)
|
||||
PSKeywordTable = PSSymbolTable(PSKeyword)
|
||||
LIT = PSLiteralTable.intern
|
||||
KWD = PSKeywordTable.intern
|
||||
KEYWORD_BRACE_BEGIN = KWD('{')
|
||||
KEYWORD_BRACE_END = KWD('}')
|
||||
KEYWORD_ARRAY_BEGIN = KWD('[')
|
||||
KEYWORD_ARRAY_END = KWD(']')
|
||||
KEYWORD_DICT_BEGIN = KWD('<<')
|
||||
KEYWORD_DICT_END = KWD('>>')
|
||||
KEYWORD_BRACE_BEGIN = KWD(b'{')
|
||||
KEYWORD_BRACE_END = KWD(b'}')
|
||||
KEYWORD_ARRAY_BEGIN = KWD(b'[')
|
||||
KEYWORD_ARRAY_END = KWD(b']')
|
||||
KEYWORD_DICT_BEGIN = KWD(b'<<')
|
||||
KEYWORD_DICT_END = KWD(b'>>')
|
||||
|
||||
|
||||
def literal_name(x):
|
||||
@@ -548,18 +550,18 @@ def keyword_name(x):
|
||||
|
||||
## PSBaseParser
|
||||
##
|
||||
EOL = re.compile(r'[\r\n]')
|
||||
SPC = re.compile(r'\s')
|
||||
NONSPC = re.compile(r'\S')
|
||||
HEX = re.compile(r'[0-9a-fA-F]')
|
||||
END_LITERAL = re.compile(r'[#/%\[\]()<>{}\s]')
|
||||
END_HEX_STRING = re.compile(r'[^\s0-9a-fA-F]')
|
||||
HEX_PAIR = re.compile(r'[0-9a-fA-F]{2}|.')
|
||||
END_NUMBER = re.compile(r'[^0-9]')
|
||||
END_KEYWORD = re.compile(r'[#/%\[\]()<>{}\s]')
|
||||
END_STRING = re.compile(r'[()\134]')
|
||||
OCT_STRING = re.compile(r'[0-7]')
|
||||
ESC_STRING = { 'b':8, 't':9, 'n':10, 'f':12, 'r':13, '(':40, ')':41, '\\':92 }
|
||||
EOL = re.compile(rb'[\r\n]')
|
||||
SPC = re.compile(rb'\s')
|
||||
NONSPC = re.compile(rb'\S')
|
||||
HEX = re.compile(rb'[0-9a-fA-F]')
|
||||
END_LITERAL = re.compile(rb'[#/%\[\]()<>{}\s]')
|
||||
END_HEX_STRING = re.compile(rb'[^\s0-9a-fA-F]')
|
||||
HEX_PAIR = re.compile(rb'[0-9a-fA-F]{2}|.')
|
||||
END_NUMBER = re.compile(rb'[^0-9]')
|
||||
END_KEYWORD = re.compile(rb'[#/%\[\]()<>{}\s]')
|
||||
END_STRING = re.compile(rb'[()\\]')
|
||||
OCT_STRING = re.compile(rb'[0-7]')
|
||||
ESC_STRING = { b'b':8, b't':9, b'n':10, b'f':12, b'r':13, b'(':40, b')':41, b'\\':92 }
|
||||
|
||||
class PSBaseParser(object):
|
||||
|
||||
@@ -591,7 +593,6 @@ class PSBaseParser(object):
|
||||
if not pos:
|
||||
pos = self.bufpos+self.charpos
|
||||
self.fp.seek(pos)
|
||||
##print >>sys.stderr, 'poll(%d): %r' % (pos, self.fp.read(n))
|
||||
self.fp.seek(pos0)
|
||||
return
|
||||
|
||||
@@ -602,7 +603,7 @@ class PSBaseParser(object):
|
||||
self.fp.seek(pos)
|
||||
# reset the status for nextline()
|
||||
self.bufpos = pos
|
||||
self.buf = ''
|
||||
self.buf = b''
|
||||
self.charpos = 0
|
||||
# reset the status for nexttoken()
|
||||
self.parse1 = self.parse_main
|
||||
@@ -624,32 +625,32 @@ class PSBaseParser(object):
|
||||
if not m:
|
||||
return (self.parse_main, len(s))
|
||||
j = m.start(0)
|
||||
c = s[j]
|
||||
c = bytes([s[j]])
|
||||
self.tokenstart = self.bufpos+j
|
||||
if c == '%':
|
||||
self.token = '%'
|
||||
if c == b'%':
|
||||
self.token = c
|
||||
return (self.parse_comment, j+1)
|
||||
if c == '/':
|
||||
self.token = ''
|
||||
if c == b'/':
|
||||
self.token = b''
|
||||
return (self.parse_literal, j+1)
|
||||
if c in '-+' or c.isdigit():
|
||||
if c in b'-+' or c.isdigit():
|
||||
self.token = c
|
||||
return (self.parse_number, j+1)
|
||||
if c == '.':
|
||||
if c == b'.':
|
||||
self.token = c
|
||||
return (self.parse_decimal, j+1)
|
||||
if c.isalpha():
|
||||
self.token = c
|
||||
return (self.parse_keyword, j+1)
|
||||
if c == '(':
|
||||
self.token = ''
|
||||
if c == b'(':
|
||||
self.token = b''
|
||||
self.paren = 1
|
||||
return (self.parse_string, j+1)
|
||||
if c == '<':
|
||||
self.token = ''
|
||||
if c == b'<':
|
||||
self.token = b''
|
||||
return (self.parse_wopen, j+1)
|
||||
if c == '>':
|
||||
self.token = ''
|
||||
if c == b'>':
|
||||
self.token = b''
|
||||
return (self.parse_wclose, j+1)
|
||||
self.add_token(KWD(c))
|
||||
return (self.parse_main, j+1)
|
||||
@@ -676,20 +677,20 @@ class PSBaseParser(object):
|
||||
return (self.parse_literal, len(s))
|
||||
j = m.start(0)
|
||||
self.token += s[i:j]
|
||||
c = s[j]
|
||||
if c == '#':
|
||||
self.hex = ''
|
||||
c = bytes([s[j]])
|
||||
if c == b'#':
|
||||
self.hex = b''
|
||||
return (self.parse_literal_hex, j+1)
|
||||
self.add_token(LIT(self.token))
|
||||
return (self.parse_main, j)
|
||||
|
||||
def parse_literal_hex(self, s, i):
|
||||
c = s[i]
|
||||
c = bytes([s[i]])
|
||||
if HEX.match(c) and len(self.hex) < 2:
|
||||
self.hex += c
|
||||
return (self.parse_literal_hex, i+1)
|
||||
if self.hex:
|
||||
self.token += chr(int(self.hex, 16))
|
||||
self.token += bytes([int(self.hex, 16)])
|
||||
return (self.parse_literal, i)
|
||||
|
||||
def parse_number(self, s, i):
|
||||
@@ -699,8 +700,8 @@ class PSBaseParser(object):
|
||||
return (self.parse_number, len(s))
|
||||
j = m.start(0)
|
||||
self.token += s[i:j]
|
||||
c = s[j]
|
||||
if c == '.':
|
||||
c = bytes([s[j]])
|
||||
if c == b'.':
|
||||
self.token += c
|
||||
return (self.parse_decimal, j+1)
|
||||
try:
|
||||
@@ -716,7 +717,7 @@ class PSBaseParser(object):
|
||||
return (self.parse_decimal, len(s))
|
||||
j = m.start(0)
|
||||
self.token += s[i:j]
|
||||
self.add_token(Decimal(self.token))
|
||||
self.add_token(Decimal(self.token.decode('utf-8')))
|
||||
return (self.parse_main, j)
|
||||
|
||||
def parse_keyword(self, s, i):
|
||||
@@ -742,58 +743,59 @@ class PSBaseParser(object):
|
||||
return (self.parse_string, len(s))
|
||||
j = m.start(0)
|
||||
self.token += s[i:j]
|
||||
c = s[j]
|
||||
if c == '\\':
|
||||
c = bytes([s[j]])
|
||||
if c == b'\\':
|
||||
self.oct = ''
|
||||
return (self.parse_string_1, j+1)
|
||||
if c == '(':
|
||||
if c == b'(':
|
||||
self.paren += 1
|
||||
self.token += c
|
||||
return (self.parse_string, j+1)
|
||||
if c == ')':
|
||||
if c == b')':
|
||||
self.paren -= 1
|
||||
if self.paren:
|
||||
self.token += c
|
||||
return (self.parse_string, j+1)
|
||||
self.add_token(self.token)
|
||||
return (self.parse_main, j+1)
|
||||
|
||||
def parse_string_1(self, s, i):
|
||||
c = s[i]
|
||||
c = bytes([s[i]])
|
||||
if OCT_STRING.match(c) and len(self.oct) < 3:
|
||||
self.oct += c
|
||||
return (self.parse_string_1, i+1)
|
||||
if self.oct:
|
||||
self.token += chr(int(self.oct, 8))
|
||||
self.token += bytes([int(self.oct, 8)])
|
||||
return (self.parse_string, i)
|
||||
if c in ESC_STRING:
|
||||
self.token += chr(ESC_STRING[c])
|
||||
self.token += bytes([ESC_STRING[c]])
|
||||
return (self.parse_string, i+1)
|
||||
|
||||
def parse_wopen(self, s, i):
|
||||
c = s[i]
|
||||
c = bytes([s[i]])
|
||||
if c.isspace() or HEX.match(c):
|
||||
return (self.parse_hexstring, i)
|
||||
if c == '<':
|
||||
if c == b'<':
|
||||
self.add_token(KEYWORD_DICT_BEGIN)
|
||||
i += 1
|
||||
return (self.parse_main, i)
|
||||
|
||||
def parse_wclose(self, s, i):
|
||||
c = s[i]
|
||||
if c == '>':
|
||||
c = bytes([s[i]])
|
||||
if c == b'>':
|
||||
self.add_token(KEYWORD_DICT_END)
|
||||
i += 1
|
||||
return (self.parse_main, i)
|
||||
|
||||
def parse_hexstring(self, s, i):
|
||||
m = END_HEX_STRING.search(s, i)
|
||||
if not m:
|
||||
m1 = END_HEX_STRING.search(s, i)
|
||||
if not m1:
|
||||
self.token += s[i:]
|
||||
return (self.parse_hexstring, len(s))
|
||||
j = m.start(0)
|
||||
j = m1.start(0)
|
||||
self.token += s[i:j]
|
||||
token = HEX_PAIR.sub(lambda m: chr(int(m.group(0), 16)),
|
||||
SPC.sub('', self.token))
|
||||
token = HEX_PAIR.sub(lambda m2: bytes([int(m2.group(0), 16)]),
|
||||
SPC.sub(b'', self.token))
|
||||
self.add_token(token)
|
||||
return (self.parse_main, j)
|
||||
|
||||
@@ -808,15 +810,15 @@ class PSBaseParser(object):
|
||||
'''
|
||||
Fetches a next line that ends either with \\r or \\n.
|
||||
'''
|
||||
linebuf = ''
|
||||
linebuf = b''
|
||||
linepos = self.bufpos + self.charpos
|
||||
eol = False
|
||||
while 1:
|
||||
self.fillbuf()
|
||||
if eol:
|
||||
c = self.buf[self.charpos]
|
||||
c = bytes([self.buf[self.charpos]])
|
||||
# handle '\r\n'
|
||||
if c == '\n':
|
||||
if c == b'\n':
|
||||
linebuf += c
|
||||
self.charpos += 1
|
||||
break
|
||||
@@ -824,7 +826,7 @@ class PSBaseParser(object):
|
||||
if m:
|
||||
linebuf += self.buf[self.charpos:m.end(0)]
|
||||
self.charpos = m.end(0)
|
||||
if linebuf[-1] == '\r':
|
||||
if bytes([linebuf[-1]]) == b'\r':
|
||||
eol = True
|
||||
else:
|
||||
break
|
||||
@@ -840,7 +842,7 @@ class PSBaseParser(object):
|
||||
'''
|
||||
self.fp.seek(0, 2)
|
||||
pos = self.fp.tell()
|
||||
buf = ''
|
||||
buf = b''
|
||||
while 0 < pos:
|
||||
prevpos = pos
|
||||
pos = max(0, pos-self.BUFSIZ)
|
||||
@@ -848,13 +850,13 @@ class PSBaseParser(object):
|
||||
s = self.fp.read(prevpos-pos)
|
||||
if not s: break
|
||||
while 1:
|
||||
n = max(s.rfind('\r'), s.rfind('\n'))
|
||||
n = max(s.rfind(b'\r'), s.rfind(b'\n'))
|
||||
if n == -1:
|
||||
buf = s + buf
|
||||
break
|
||||
yield s[n:]+buf
|
||||
s = s[:n]
|
||||
buf = ''
|
||||
buf = b''
|
||||
return
|
||||
|
||||
|
||||
@@ -910,17 +912,17 @@ class PSStackParser(PSBaseParser):
|
||||
|
||||
def nextobject(self, direct=False):
|
||||
'''
|
||||
Yields a list of objects: keywords, literals, strings,
|
||||
Yields a list of objects: keywords, literals, strings (byte arrays),
|
||||
numbers, arrays and dictionaries. Arrays and dictionaries
|
||||
are represented as Python sequence and dictionaries.
|
||||
'''
|
||||
while not self.results:
|
||||
(pos, token) = self.nexttoken()
|
||||
##print (pos,token), (self.curtype, self.curstack)
|
||||
if (isinstance(token, int) or
|
||||
isinstance(token, Decimal) or
|
||||
isinstance(token, bool) or
|
||||
isinstance(token, str) or
|
||||
isinstance(token, bytearray) or
|
||||
isinstance(token, bytes) or
|
||||
isinstance(token, PSLiteral)):
|
||||
# normal token
|
||||
self.push((pos, token))
|
||||
@@ -963,10 +965,10 @@ class PSStackParser(PSBaseParser):
|
||||
return obj
|
||||
|
||||
|
||||
LITERAL_CRYPT = PSLiteralTable.intern('Crypt')
|
||||
LITERALS_FLATE_DECODE = (PSLiteralTable.intern('FlateDecode'), PSLiteralTable.intern('Fl'))
|
||||
LITERALS_LZW_DECODE = (PSLiteralTable.intern('LZWDecode'), PSLiteralTable.intern('LZW'))
|
||||
LITERALS_ASCII85_DECODE = (PSLiteralTable.intern('ASCII85Decode'), PSLiteralTable.intern('A85'))
|
||||
LITERAL_CRYPT = LIT(b'Crypt')
|
||||
LITERALS_FLATE_DECODE = (LIT(b'FlateDecode'), LIT(b'Fl'))
|
||||
LITERALS_LZW_DECODE = (LIT(b'LZWDecode'), LIT(b'LZW'))
|
||||
LITERALS_ASCII85_DECODE = (LIT(b'ASCII85Decode'), LIT(b'A85'))
|
||||
|
||||
|
||||
## PDF Objects
|
||||
@@ -1020,7 +1022,7 @@ def resolve_all(x):
|
||||
if isinstance(x, list):
|
||||
x = [ resolve_all(v) for v in x ]
|
||||
elif isinstance(x, dict):
|
||||
for (k,v) in x.iteritems():
|
||||
for (k,v) in iter(x.items()):
|
||||
x[k] = resolve_all(v)
|
||||
return x
|
||||
|
||||
@@ -1028,13 +1030,13 @@ def decipher_all(decipher, objid, genno, x):
|
||||
'''
|
||||
Recursively decipher X.
|
||||
'''
|
||||
if isinstance(x, str):
|
||||
if isinstance(x, bytearray) or isinstance(x,bytes):
|
||||
return decipher(objid, genno, x)
|
||||
decf = lambda v: decipher_all(decipher, objid, genno, v)
|
||||
if isinstance(x, list):
|
||||
x = [decf(v) for v in x]
|
||||
elif isinstance(x, dict):
|
||||
x = dict((k, decf(v)) for (k, v) in x.iteritems())
|
||||
x = dict((k, decf(v)) for (k, v) in iter(x.items()))
|
||||
return x
|
||||
|
||||
|
||||
@@ -1065,7 +1067,7 @@ def num_value(x):
|
||||
|
||||
def str_value(x):
|
||||
x = resolve1(x)
|
||||
if not isinstance(x, str):
|
||||
if not (isinstance(x, bytearray) or isinstance(x, bytes)):
|
||||
if STRICT:
|
||||
raise PDFTypeError('String required: %r' % x)
|
||||
return ''
|
||||
@@ -1166,7 +1168,6 @@ class PDFStream(PDFObject):
|
||||
if 'Filter' not in self.dic:
|
||||
self.data = data
|
||||
self.rawdata = None
|
||||
##print self.dict
|
||||
return
|
||||
filters = self.dic['Filter']
|
||||
if not isinstance(filters, list):
|
||||
@@ -1176,7 +1177,7 @@ class PDFStream(PDFObject):
|
||||
# will get errors if the document is encrypted.
|
||||
data = zlib.decompress(data)
|
||||
elif f in LITERALS_LZW_DECODE:
|
||||
data = ''.join(LZWDecoder(BytesIO(data)).run())
|
||||
data = b''.join(LZWDecoder(BytesIO(data)).run())
|
||||
elif f in LITERALS_ASCII85_DECODE:
|
||||
data = ascii85decode(data)
|
||||
elif f == LITERAL_CRYPT:
|
||||
@@ -1204,7 +1205,7 @@ class PDFStream(PDFObject):
|
||||
pred = data[i]
|
||||
ent1 = data[i+1:i+1+columns]
|
||||
if pred == b'\x02':
|
||||
ent1 = ''.join(bytes([(a+b) & 255]) \
|
||||
ent1 = b''.join(bytes([(a+b) & 255]) \
|
||||
for (a,b) in zip(ent0,ent1))
|
||||
buf += ent1
|
||||
ent0 = ent1
|
||||
@@ -1239,11 +1240,11 @@ class PDFEncryptionError(PDFException): pass
|
||||
class PDFPasswordIncorrect(PDFEncryptionError): pass
|
||||
|
||||
# some predefined literals and keywords.
|
||||
LITERAL_OBJSTM = PSLiteralTable.intern('ObjStm')
|
||||
LITERAL_XREF = PSLiteralTable.intern('XRef')
|
||||
LITERAL_PAGE = PSLiteralTable.intern('Page')
|
||||
LITERAL_PAGES = PSLiteralTable.intern('Pages')
|
||||
LITERAL_CATALOG = PSLiteralTable.intern('Catalog')
|
||||
LITERAL_OBJSTM = LIT(b'ObjStm')
|
||||
LITERAL_XREF = LIT(b'XRef')
|
||||
LITERAL_PAGE = LIT(b'Page')
|
||||
LITERAL_PAGES = LIT(b'Pages')
|
||||
LITERAL_CATALOG = LIT(b'Catalog')
|
||||
|
||||
|
||||
## XRefs
|
||||
@@ -1261,7 +1262,7 @@ class PDFXRef(object):
|
||||
return '<PDFXRef: objs=%d>' % len(self.offsets)
|
||||
|
||||
def objids(self):
|
||||
return self.offsets.iterkeys()
|
||||
return iter(self.offsets.keys())
|
||||
|
||||
def load(self, parser):
|
||||
self.offsets = {}
|
||||
@@ -1272,10 +1273,10 @@ class PDFXRef(object):
|
||||
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
|
||||
if not line:
|
||||
raise PDFNoValidXRef('Premature eof: %r' % parser)
|
||||
if line.startswith('trailer'):
|
||||
if line.startswith(b'trailer'):
|
||||
parser.seek(pos)
|
||||
break
|
||||
f = line.strip().split(' ')
|
||||
f = line.strip().split(b' ')
|
||||
if len(f) != 2:
|
||||
raise PDFNoValidXRef('Trailer not found: %r: line=%r' % (parser, line))
|
||||
try:
|
||||
@@ -1287,16 +1288,17 @@ class PDFXRef(object):
|
||||
(_, line) = parser.nextline()
|
||||
except PSEOF:
|
||||
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
|
||||
f = line.strip().split(' ')
|
||||
f = line.strip().split(b' ')
|
||||
if len(f) != 3:
|
||||
raise PDFNoValidXRef('Invalid XRef format: %r, line=%r' % (parser, line))
|
||||
(pos, genno, use) = f
|
||||
if use != 'n': continue
|
||||
self.offsets[objid] = (int(genno), int(pos))
|
||||
if use != b'n':
|
||||
continue
|
||||
self.offsets[objid] = (int(genno.decode('utf-8')), int(pos.decode('utf-8')))
|
||||
self.load_trailer(parser)
|
||||
return
|
||||
|
||||
KEYWORD_TRAILER = PSKeywordTable.intern('trailer')
|
||||
KEYWORD_TRAILER = KWD(b'trailer')
|
||||
def load_trailer(self, parser):
|
||||
try:
|
||||
(_,kwd) = parser.nexttoken()
|
||||
@@ -1401,7 +1403,8 @@ class PDFDocument(object):
|
||||
# set_parser(parser)
|
||||
# Associates the document with an (already initialized) parser object.
|
||||
def set_parser(self, parser):
|
||||
if self.parser: return
|
||||
if self.parser:
|
||||
return
|
||||
self.parser = parser
|
||||
# The document is set to be temporarily ready during collecting
|
||||
# all the basic information about the document, e.g.
|
||||
@@ -1423,13 +1426,13 @@ class PDFDocument(object):
|
||||
dict_value(trailer['Encrypt']))
|
||||
# fix for bad files
|
||||
except:
|
||||
self.encryption = ('ffffffffffffffffffffffffffffffffffff',
|
||||
self.encryption = (b'ffffffffffffffffffffffffffffffffffff',
|
||||
dict_value(trailer['Encrypt']))
|
||||
if 'Root' in trailer:
|
||||
self.set_root(dict_value(trailer['Root']))
|
||||
break
|
||||
else:
|
||||
raise PDFSyntaxError('No /Root object! - Is this really a PDF?')
|
||||
else:
|
||||
raise PDFSyntaxError('No /Root object! - Is this really a PDF?')
|
||||
# The document is set to be non-ready again, until all the
|
||||
# proper initialization (asking the password key and
|
||||
# verifying the access permission, so on) is finished.
|
||||
@@ -1450,7 +1453,7 @@ class PDFDocument(object):
|
||||
# Perform the initialization with a given password.
|
||||
# This step is mandatory even if there's no password associated
|
||||
# with the document.
|
||||
def initialize(self, password=''):
|
||||
def initialize(self, password=b''):
|
||||
if not self.encryption:
|
||||
self.is_printable = self.is_modifiable = self.is_extractable = True
|
||||
self.ready = True
|
||||
@@ -1477,14 +1480,14 @@ class PDFDocument(object):
|
||||
def genkey_adobe_ps(self, param):
|
||||
# nice little offline principal keys dictionary
|
||||
# global static principal key for German Onleihe / Bibliothek Digital
|
||||
principalkeys = { 'bibliothek-digital.de': 'rRwGv2tbpKov1krvv7PO0ws9S436/lArPlfipz5Pqhw='.decode('base64')}
|
||||
principalkeys = { b'bibliothek-digital.de': codecs.decode(b'rRwGv2tbpKov1krvv7PO0ws9S436/lArPlfipz5Pqhw=','base64')}
|
||||
self.is_printable = self.is_modifiable = self.is_extractable = True
|
||||
length = int_value(param.get('Length', 0)) / 8
|
||||
length = int_value(param.get('Length', 0)) // 8
|
||||
edcdata = str_value(param.get('EDCData')).decode('base64')
|
||||
pdrllic = str_value(param.get('PDRLLic')).decode('base64')
|
||||
pdrlpol = str_value(param.get('PDRLPol')).decode('base64')
|
||||
edclist = []
|
||||
for pair in edcdata.split('\n'):
|
||||
for pair in edcdata.split(b'\n'):
|
||||
edclist.append(pair)
|
||||
# principal key request
|
||||
for key in principalkeys:
|
||||
@@ -1493,9 +1496,9 @@ class PDFDocument(object):
|
||||
else:
|
||||
raise ADEPTError('Cannot find principal key for this pdf')
|
||||
shakey = SHA256(principalkey)
|
||||
ivector = 16 * chr(0)
|
||||
ivector = bytes(16) # 16 zero bytes
|
||||
plaintext = AES.new(shakey,AES.MODE_CBC,ivector).decrypt(edclist[9].decode('base64'))
|
||||
if plaintext[-16:] != 16 * chr(16):
|
||||
if plaintext[-16:] != bytearray(b'\0x10')*16:
|
||||
raise ADEPTError('Offlinekey cannot be decrypted, aborting ...')
|
||||
pdrlpol = AES.new(plaintext[16:32],AES.MODE_CBC,edclist[2].decode('base64')).decrypt(pdrlpol)
|
||||
if pdrlpol[-1] < 1 or pdrlpol[-1] > 16:
|
||||
@@ -1505,8 +1508,8 @@ class PDFDocument(object):
|
||||
pdrlpol = pdrlpol[:cutter]
|
||||
return plaintext[:16]
|
||||
|
||||
PASSWORD_PADDING = '(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..' \
|
||||
'\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz'
|
||||
PASSWORD_PADDING = b'(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..' \
|
||||
b'\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz'
|
||||
# experimental aes pw support
|
||||
def initialize_standard(self, password, docid, param):
|
||||
# copy from a global variable
|
||||
@@ -1523,7 +1526,7 @@ class PDFDocument(object):
|
||||
try:
|
||||
EncMetadata = str_value(param['EncryptMetadata'])
|
||||
except:
|
||||
EncMetadata = 'True'
|
||||
EncMetadata = b'True'
|
||||
self.is_printable = bool(P & 4)
|
||||
self.is_modifiable = bool(P & 8)
|
||||
self.is_extractable = bool(P & 16)
|
||||
@@ -1540,12 +1543,12 @@ class PDFDocument(object):
|
||||
hash.update(docid[0]) # 5
|
||||
# aes special handling if metadata isn't encrypted
|
||||
if EncMetadata == ('False' or 'false'):
|
||||
hash.update('ffffffff'.decode('hex'))
|
||||
hash.update(codecs.decode(b'ffffffff','hex'))
|
||||
if 5 <= R:
|
||||
# 8
|
||||
for _ in range(50):
|
||||
hash = hashlib.md5(hash.digest()[:length/8])
|
||||
key = hash.digest()[:length/8]
|
||||
hash = hashlib.md5(hash.digest()[:length//8])
|
||||
key = hash.digest()[:length//8]
|
||||
if R == 2:
|
||||
# Algorithm 3.4
|
||||
u1 = ARC4.new(key).decrypt(password)
|
||||
@@ -1555,7 +1558,7 @@ class PDFDocument(object):
|
||||
hash.update(docid[0]) # 3
|
||||
x = ARC4.new(key).decrypt(hash.digest()[:16]) # 4
|
||||
for i in range(1,19+1):
|
||||
k = ''.join(bytes([c ^ i]) for c in key )
|
||||
k = b''.join(bytes([c ^ i]) for c in key )
|
||||
x = ARC4.new(k).decrypt(x)
|
||||
u1 = x+x # 32bytes total
|
||||
if R == 2:
|
||||
@@ -1587,17 +1590,19 @@ class PDFDocument(object):
|
||||
def initialize_ebx(self, password, docid, param):
|
||||
self.is_printable = self.is_modifiable = self.is_extractable = True
|
||||
rsa = RSA(password)
|
||||
length = int_value(param.get('Length', 0)) / 8
|
||||
rights = str_value(param.get('ADEPT_LICENSE')).decode('base64')
|
||||
length = int_value(param.get('Length', 0)) // 8
|
||||
rights = codecs.decode(param.get('ADEPT_LICENSE'), 'base64')
|
||||
rights = zlib.decompress(rights, -15)
|
||||
rights = etree.fromstring(rights)
|
||||
expr = './/{http://ns.adobe.com/adept}encryptedKey'
|
||||
bookkey = ''.join(rights.findtext(expr)).decode('base64')
|
||||
bookkey = codecs.decode(''.join(rights.findtext(expr)).encode('utf-8'),'base64')
|
||||
bookkey = rsa.decrypt(bookkey)
|
||||
if bookkey[0] != '\x02':
|
||||
raise ADEPTError('error decrypting book session key')
|
||||
index = bookkey.index('\0') + 1
|
||||
bookkey = bookkey[index:]
|
||||
#if bookkey[0] != 2:
|
||||
# raise ADEPTError('error decrypting book session key')
|
||||
if len(bookkey) > 16:
|
||||
if bookkey[-17] == '\x00' or bookkey[-17] == 0:
|
||||
bookkey = bookkey[-16:]
|
||||
length = 16
|
||||
ebx_V = int_value(param.get('V', 4))
|
||||
ebx_type = int_value(param.get('EBX_ENCRYPTIONTYPE', 6))
|
||||
# added because of improper booktype / decryption book session key errors
|
||||
@@ -1643,7 +1648,7 @@ class PDFDocument(object):
|
||||
objid = struct.pack('<L', objid ^ 0x3569ac)
|
||||
genno = struct.pack('<L', genno ^ 0xca96)
|
||||
key = self.decrypt_key
|
||||
key += objid[0] + genno[0] + objid[1] + genno[1] + objid[2] + 'sAlT'
|
||||
key += objid[0] + genno[0] + objid[1] + genno[1] + objid[2] + b'sAlT'
|
||||
hash = hashlib.md5(key)
|
||||
key = hash.digest()[:min(len(self.decrypt_key) + 5, 16)]
|
||||
return key
|
||||
@@ -1652,7 +1657,7 @@ class PDFDocument(object):
|
||||
def genkey_v4(self, objid, genno):
|
||||
objid = struct.pack('<L', objid)[:3]
|
||||
genno = struct.pack('<L', genno)[:2]
|
||||
key = self.decrypt_key + objid + genno + 'sAlT'
|
||||
key = self.decrypt_key + objid + genno + b'sAlT'
|
||||
hash = hashlib.md5(key)
|
||||
key = hash.digest()[:min(len(self.decrypt_key) + 5, 16)]
|
||||
return key
|
||||
@@ -1664,7 +1669,6 @@ class PDFDocument(object):
|
||||
plaintext = AES.new(key,AES.MODE_CBC,ivector).decrypt(data)
|
||||
# remove pkcs#5 aes padding
|
||||
cutter = -1 * plaintext[-1]
|
||||
#print cutter
|
||||
plaintext = plaintext[:cutter]
|
||||
return plaintext
|
||||
|
||||
@@ -1675,7 +1679,6 @@ class PDFDocument(object):
|
||||
plaintext = AES.new(key,AES.MODE_CBC,ivector).decrypt(data)
|
||||
# remove pkcs#5 aes padding
|
||||
cutter = -1 * plaintext[-1]
|
||||
#print cutter
|
||||
plaintext = plaintext[:cutter]
|
||||
return plaintext
|
||||
|
||||
@@ -1684,7 +1687,7 @@ class PDFDocument(object):
|
||||
return ARC4.new(key).decrypt(data)
|
||||
|
||||
|
||||
KEYWORD_OBJ = PSKeywordTable.intern('obj')
|
||||
KEYWORD_OBJ = KWD(b'obj')
|
||||
|
||||
def getobj(self, objid):
|
||||
if not self.ready:
|
||||
@@ -1791,11 +1794,11 @@ class PDFParser(PSStackParser):
|
||||
def __repr__(self):
|
||||
return '<PDFParser>'
|
||||
|
||||
KEYWORD_R = PSKeywordTable.intern('R')
|
||||
KEYWORD_ENDOBJ = PSKeywordTable.intern('endobj')
|
||||
KEYWORD_STREAM = PSKeywordTable.intern('stream')
|
||||
KEYWORD_XREF = PSKeywordTable.intern('xref')
|
||||
KEYWORD_STARTXREF = PSKeywordTable.intern('startxref')
|
||||
KEYWORD_R = KWD(b'R')
|
||||
KEYWORD_ENDOBJ = KWD(b'endobj')
|
||||
KEYWORD_STREAM = KWD(b'stream')
|
||||
KEYWORD_XREF = KWD(b'xref')
|
||||
KEYWORD_STARTXREF = KWD(b'startxref')
|
||||
def do_keyword(self, pos, token):
|
||||
if token in (self.KEYWORD_XREF, self.KEYWORD_STARTXREF):
|
||||
self.add_results(*self.pop(1))
|
||||
@@ -1843,8 +1846,8 @@ class PDFParser(PSStackParser):
|
||||
if STRICT:
|
||||
raise PDFSyntaxError('Unexpected EOF')
|
||||
break
|
||||
if 'endstream' in line:
|
||||
i = line.index('endstream')
|
||||
if b'endstream' in line:
|
||||
i = line.index(b'endstream')
|
||||
objlen += i
|
||||
data += line[:i]
|
||||
break
|
||||
@@ -1864,7 +1867,7 @@ class PDFParser(PSStackParser):
|
||||
prev = None
|
||||
for line in self.revreadlines():
|
||||
line = line.strip()
|
||||
if line == 'startxref': break
|
||||
if line == b'startxref': break
|
||||
if line:
|
||||
prev = line
|
||||
else:
|
||||
@@ -1916,7 +1919,7 @@ class PDFParser(PSStackParser):
|
||||
except PDFNoValidXRef:
|
||||
# fallback
|
||||
self.seek(0)
|
||||
pat = re.compile(r'^(\d+)\s+(\d+)\s+obj\b')
|
||||
pat = re.compile(rb'^(\d+)\s+(\d+)\s+obj\b')
|
||||
offsets = {}
|
||||
xref = PDFXRef()
|
||||
while 1:
|
||||
@@ -1924,7 +1927,7 @@ class PDFParser(PSStackParser):
|
||||
(pos, line) = self.nextline()
|
||||
except PSEOF:
|
||||
break
|
||||
if line.startswith('trailer'):
|
||||
if line.startswith(b'trailer'):
|
||||
trailerpos = pos # remember last trailer
|
||||
m = pat.match(line)
|
||||
if not m: continue
|
||||
@@ -1951,7 +1954,7 @@ class PDFObjStrmParser(PDFParser):
|
||||
self.add_results(*self.popall())
|
||||
return
|
||||
|
||||
KEYWORD_R = KWD('R')
|
||||
KEYWORD_R = KWD(b'R')
|
||||
def do_keyword(self, pos, token):
|
||||
if token is self.KEYWORD_R:
|
||||
# reference to indirect object
|
||||
@@ -1994,7 +1997,7 @@ class PDFSerializer(object):
|
||||
def dump(self, outf):
|
||||
self.outf = outf
|
||||
self.write(self.version)
|
||||
self.write('\n%\xe2\xe3\xcf\xd3\n')
|
||||
self.write(b'\n%\xe2\xe3\xcf\xd3\n')
|
||||
doc = self.doc
|
||||
objids = self.objids
|
||||
xrefs = {}
|
||||
@@ -2016,18 +2019,18 @@ class PDFSerializer(object):
|
||||
startxref = self.tell()
|
||||
|
||||
if not gen_xref_stm:
|
||||
self.write('xref\n')
|
||||
self.write('0 %d\n' % (maxobj + 1,))
|
||||
self.write(b'xref\n')
|
||||
self.write(b'0 %d\n' % (maxobj + 1,))
|
||||
for objid in range(0, maxobj + 1):
|
||||
if objid in xrefs:
|
||||
# force the genno to be 0
|
||||
self.write("%010d 00000 n \n" % xrefs[objid][0])
|
||||
self.write(b"%010d 00000 n \n" % xrefs[objid][0])
|
||||
else:
|
||||
self.write("%010d %05d f \n" % (0, 65535))
|
||||
self.write(b"%010d %05d f \n" % (0, 65535))
|
||||
|
||||
self.write('trailer\n')
|
||||
self.write(b'trailer\n')
|
||||
self.serialize_object(trailer)
|
||||
self.write('\nstartxref\n%d\n%%%%EOF' % startxref)
|
||||
self.write(b'\nstartxref\n%d\n%%%%EOF' % startxref)
|
||||
|
||||
else: # Generate crossref stream.
|
||||
|
||||
@@ -2076,7 +2079,7 @@ class PDFSerializer(object):
|
||||
data.append(struct.pack('>L', f2)[-fl2:])
|
||||
data.append(struct.pack('>L', f3)[-fl3:])
|
||||
index.extend((first, prev - first + 1))
|
||||
data = zlib.compress(''.join(data))
|
||||
data = zlib.compress(b''.join(data))
|
||||
dic = {'Type': LITERAL_XREF, 'Size': prev + 1, 'Index': index,
|
||||
'W': [1, fl2, fl3], 'Length': len(data),
|
||||
'Filter': LITERALS_FLATE_DECODE[0],
|
||||
@@ -2085,7 +2088,7 @@ class PDFSerializer(object):
|
||||
dic['Info'] = trailer['Info']
|
||||
xrefstm = PDFStream(dic, data)
|
||||
self.serialize_indirect(maxobj, xrefstm)
|
||||
self.write('startxref\n%d\n%%%%EOF' % startxref)
|
||||
self.write(b'startxref\n%d\n%%%%EOF' % startxref)
|
||||
def write(self, data):
|
||||
self.outf.write(data)
|
||||
self.last = data[-1:]
|
||||
@@ -2094,13 +2097,10 @@ class PDFSerializer(object):
|
||||
return self.outf.tell()
|
||||
|
||||
def escape_string(self, string):
|
||||
string = string.replace('\\', '\\\\')
|
||||
string = string.replace('\n', r'\n')
|
||||
string = string.replace('(', r'\(')
|
||||
string = string.replace(')', r'\)')
|
||||
# get rid of ciando id
|
||||
regularexp = re.compile(r'http://www.ciando.com/index.cfm/intRefererID/\d{5}')
|
||||
if regularexp.match(string): return ('http://www.ciando.com')
|
||||
string = string.replace(b'\\', b'\\\\')
|
||||
string = string.replace(b'\n', rb'\n')
|
||||
string = string.replace(b'(', rb'\(')
|
||||
string = string.replace(b')', rb'\)')
|
||||
return string
|
||||
|
||||
def serialize_object(self, obj):
|
||||
@@ -2111,34 +2111,38 @@ class PDFSerializer(object):
|
||||
obj['Subtype'] = obj['Type']
|
||||
del obj['Type']
|
||||
# end - hope this doesn't have bad effects
|
||||
self.write('<<')
|
||||
self.write(b'<<')
|
||||
for key, val in obj.items():
|
||||
self.write('/%s' % key)
|
||||
self.write(str(LIT(key.encode('utf-8'))).encode('utf-8'))
|
||||
self.serialize_object(val)
|
||||
self.write('>>')
|
||||
self.write(b'>>')
|
||||
elif isinstance(obj, list):
|
||||
self.write('[')
|
||||
self.write(b'[')
|
||||
for val in obj:
|
||||
self.serialize_object(val)
|
||||
self.write(']')
|
||||
self.write(b']')
|
||||
elif isinstance(obj, bytearray):
|
||||
self.write(b'(%s)' % self.escape_string(obj))
|
||||
elif isinstance(obj, bytes):
|
||||
self.write(b'(%s)' % self.escape_string(obj))
|
||||
elif isinstance(obj, str):
|
||||
self.write('(%s)' % self.escape_string(obj))
|
||||
self.write(b'(%s)' % self.escape_string(obj.encode('utf-8')))
|
||||
elif isinstance(obj, bool):
|
||||
if self.last.isalnum():
|
||||
self.write(' ')
|
||||
self.write(str(obj).lower())
|
||||
self.write(b' ')
|
||||
self.write(str(obj).lower().encode('utf-8'))
|
||||
elif isinstance(obj, int):
|
||||
if self.last.isalnum():
|
||||
self.write(' ')
|
||||
self.write(str(obj))
|
||||
self.write(b' ')
|
||||
self.write(str(obj).encode('utf-8'))
|
||||
elif isinstance(obj, Decimal):
|
||||
if self.last.isalnum():
|
||||
self.write(' ')
|
||||
self.write(str(obj))
|
||||
self.write(b' ')
|
||||
self.write(str(obj).encode('utf-8'))
|
||||
elif isinstance(obj, PDFObjRef):
|
||||
if self.last.isalnum():
|
||||
self.write(' ')
|
||||
self.write('%d %d R' % (obj.objid, 0))
|
||||
self.write(b' ')
|
||||
self.write(b'%d %d R' % (obj.objid, 0))
|
||||
elif isinstance(obj, PDFStream):
|
||||
### If we don't generate cross ref streams the object streams
|
||||
### are no longer useful, as we have extracted all objects from
|
||||
@@ -2148,41 +2152,36 @@ class PDFSerializer(object):
|
||||
else:
|
||||
data = obj.get_decdata()
|
||||
self.serialize_object(obj.dic)
|
||||
self.write('stream\n')
|
||||
self.write(b'stream\n')
|
||||
self.write(data)
|
||||
self.write('\nendstream')
|
||||
self.write(b'\nendstream')
|
||||
else:
|
||||
data = str(obj)
|
||||
if data[0].isalnum() and self.last.isalnum():
|
||||
self.write(' ')
|
||||
data = str(obj).encode('utf-8')
|
||||
if bytes([data[0]]).isalnum() and self.last.isalnum():
|
||||
self.write(b' ')
|
||||
self.write(data)
|
||||
|
||||
def serialize_indirect(self, objid, obj):
|
||||
self.write('%d 0 obj' % (objid,))
|
||||
self.write(b'%d 0 obj' % (objid,))
|
||||
self.serialize_object(obj)
|
||||
if self.last.isalnum():
|
||||
self.write('\n')
|
||||
self.write('endobj\n')
|
||||
self.write(b'\n')
|
||||
self.write(b'endobj\n')
|
||||
|
||||
|
||||
|
||||
|
||||
def decryptBook(userkey, inpath, outpath):
|
||||
if RSA is None:
|
||||
raise ADEPTError("PyCrypto or OpenSSL must be installed.")
|
||||
raise ADEPTError("PyCryptodome or OpenSSL must be installed.")
|
||||
with open(inpath, 'rb') as inf:
|
||||
#try:
|
||||
serializer = PDFSerializer(inf, userkey)
|
||||
#except:
|
||||
# print "Error serializing pdf {0}. Probably wrong key.".format(os.path.basename(inpath))
|
||||
# return 2
|
||||
# hope this will fix the 'bad file descriptor' problem
|
||||
with open(outpath, 'wb') as outf:
|
||||
# help construct to make sure the method runs to the end
|
||||
try:
|
||||
serializer.dump(outf)
|
||||
except Exception as e:
|
||||
print("error writing pdf: {0}".format(e.args[0]))
|
||||
print("error writing pdf: {0}".format(e))
|
||||
return 2
|
||||
return 0
|
||||
|
||||
|
||||
@@ -815,18 +815,18 @@ class DrmIonVoucher(object):
|
||||
addprottable(self.envelope)
|
||||
|
||||
def decryptvoucher(self):
|
||||
shared = "PIDv3" + self.encalgorithm + self.enctransformation + self.hashalgorithm
|
||||
shared = ("PIDv3" + self.encalgorithm + self.enctransformation + self.hashalgorithm).encode('ASCII')
|
||||
|
||||
self.lockparams.sort()
|
||||
for param in self.lockparams:
|
||||
if param == "ACCOUNT_SECRET":
|
||||
shared += param + self.secret
|
||||
shared += param.encode('ASCII') + self.secret
|
||||
elif param == "CLIENT_ID":
|
||||
shared += param + self.dsn
|
||||
shared += param.encode('ASCII') + self.dsn
|
||||
else:
|
||||
_assert(False, "Unknown lock parameter: %s" % param)
|
||||
|
||||
sharedsecret = obfuscate(shared.encode('ASCII'), self.version)
|
||||
sharedsecret = obfuscate(shared, self.version)
|
||||
|
||||
key = hmac.new(sharedsecret, b"PIDv3", digestmod=hashlib.sha256).digest()
|
||||
aes = AES.new(key[:32], AES.MODE_CBC, self.cipheriv[:16])
|
||||
|
||||
@@ -174,14 +174,14 @@ def pidFromSerial(s, l):
|
||||
|
||||
# Parse the EXTH header records and use the Kindle serial number to calculate the book pid.
|
||||
def getKindlePids(rec209, token, serialnum):
|
||||
if isinstance(serialnum,str):
|
||||
serialnum = serialnum.encode('utf-8')
|
||||
|
||||
if rec209 is None:
|
||||
return [serialnum]
|
||||
|
||||
pids=[]
|
||||
|
||||
if isinstance(serialnum,str):
|
||||
serialnum = serialnum.encode('utf-8')
|
||||
|
||||
# Compute book PID
|
||||
pidHash = SHA1(serialnum+rec209+token)
|
||||
bookPID = encodePID(pidHash)
|
||||
|
||||
36
DeDRM_plugin/mobidedrm.py
Normal file → Executable file
36
DeDRM_plugin/mobidedrm.py
Normal file → Executable file
@@ -191,19 +191,21 @@ def PC1(key, src, decryption=True):
|
||||
dst+=bytes([curByte])
|
||||
return dst
|
||||
|
||||
# accepts unicode returns unicode
|
||||
def checksumPid(s):
|
||||
letters = 'ABCDEFGHIJKLMNPQRSTUVWXYZ123456789'
|
||||
crc = (~binascii.crc32(s,-1))&0xFFFFFFFF
|
||||
crc = (~binascii.crc32(s.encode('utf-8'),-1))&0xFFFFFFFF
|
||||
crc = crc ^ (crc >> 16)
|
||||
res = s
|
||||
l = len(letters)
|
||||
for i in (0,1):
|
||||
b = crc & 0xff
|
||||
pos = (b // l) ^ (b % l)
|
||||
res += letters[pos%l].encode('ascii')
|
||||
res += letters[pos%l]
|
||||
crc >>= 8
|
||||
return res
|
||||
|
||||
# expects bytearray
|
||||
def getSizeOfTrailingDataEntries(ptr, size, flags):
|
||||
def getSizeOfTrailingDataEntry(ptr, size):
|
||||
bitpos, result = 0, 0
|
||||
@@ -282,7 +284,7 @@ class MobiBook:
|
||||
self.mobi_codepage = 1252
|
||||
self.mobi_version = -1
|
||||
|
||||
if self.magic == 'TEXtREAd':
|
||||
if self.magic == b'TEXtREAd':
|
||||
print("PalmDoc format book detected.")
|
||||
return
|
||||
|
||||
@@ -301,7 +303,7 @@ class MobiBook:
|
||||
# if exth region exists parse it for metadata array
|
||||
try:
|
||||
exth_flag, = struct.unpack('>L', self.sect[0x80:0x84])
|
||||
exth = ''
|
||||
exth = b''
|
||||
if exth_flag & 0x40:
|
||||
exth = self.sect[16 + self.mobi_length:]
|
||||
if (len(exth) >= 12) and (exth[:4] == b'EXTH'):
|
||||
@@ -323,12 +325,13 @@ class MobiBook:
|
||||
except Exception as e:
|
||||
print("Cannot set meta_array: Error: {:s}".format(e.args[0]))
|
||||
|
||||
#returns unicode
|
||||
def getBookTitle(self):
|
||||
codec_map = {
|
||||
1252 : 'windows-1252',
|
||||
65001 : 'utf-8',
|
||||
}
|
||||
title = ''
|
||||
title = b''
|
||||
codec = 'windows-1252'
|
||||
if self.magic == b'BOOKMOBI':
|
||||
if 503 in self.meta_array:
|
||||
@@ -339,7 +342,7 @@ class MobiBook:
|
||||
title = self.sect[toff:tend]
|
||||
if self.mobi_codepage in codec_map.keys():
|
||||
codec = codec_map[self.mobi_codepage]
|
||||
if title == '':
|
||||
if title == b'':
|
||||
title = self.header[:32]
|
||||
title = title.split(b'\0')[0]
|
||||
return title.decode(codec)
|
||||
@@ -355,13 +358,15 @@ class MobiBook:
|
||||
# if that key exists in the meta_array, append its contents to the token
|
||||
for i in range(0,len(data),5):
|
||||
val, = struct.unpack('>I',data[i+1:i+5])
|
||||
sval = self.meta_array.get(val,'')
|
||||
sval = self.meta_array.get(val,b'')
|
||||
token += sval
|
||||
return rec209, token
|
||||
|
||||
# new must be byte array
|
||||
def patch(self, off, new):
|
||||
self.data_file = self.data_file[:off] + new + self.data_file[off+len(new):]
|
||||
|
||||
# new must be byte array
|
||||
def patchSection(self, section, new, in_off = 0):
|
||||
if (section + 1 == self.num_sections):
|
||||
endoff = len(self.data_file)
|
||||
@@ -371,12 +376,12 @@ class MobiBook:
|
||||
assert off + in_off + len(new) <= endoff
|
||||
self.patch(off + in_off, new)
|
||||
|
||||
# pids in pidlist must be unicode, returned key is byte array, pid is unicode
|
||||
def parseDRM(self, data, count, pidlist):
|
||||
found_key = None
|
||||
keyvec1 = b'\x72\x38\x33\xB0\xB4\xF2\xE3\xCA\xDF\x09\x01\xD6\xE2\xE0\x3F\x96'
|
||||
for pid in pidlist:
|
||||
bigpid = pid.ljust(16,b'\0')
|
||||
bigpid = bigpid
|
||||
bigpid = pid.encode('utf-8').ljust(16,b'\0')
|
||||
temp_key = PC1(keyvec1, bigpid, False)
|
||||
temp_key_sum = sum(temp_key) & 0xff
|
||||
found_key = None
|
||||
@@ -424,6 +429,7 @@ class MobiBook:
|
||||
return ".azw3"
|
||||
return ".mobi"
|
||||
|
||||
# pids in pidlist may be unicode or bytearrays or bytes
|
||||
def processBook(self, pidlist):
|
||||
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||
print("Crypto Type is: {0:d}".format(crypto_type))
|
||||
@@ -445,6 +451,8 @@ class MobiBook:
|
||||
goodpids = []
|
||||
# print("DEBUG ==== pidlist = ", pidlist)
|
||||
for pid in pidlist:
|
||||
if isinstance(pid,(bytearray,bytes)):
|
||||
pid = pid.decode('utf-8')
|
||||
if len(pid)==10:
|
||||
if checksumPid(pid[0:-2]) != pid:
|
||||
print("Warning: PID {0} has incorrect checksum, should have been {1}".format(pid,checksumPid(pid[0:-2])))
|
||||
@@ -457,8 +465,8 @@ class MobiBook:
|
||||
# print("======= DEBUG good pids = ", goodpids)
|
||||
|
||||
if self.crypto_type == 1:
|
||||
t1_keyvec = 'QDCVEPMU675RUBSZ'
|
||||
if self.magic == 'TEXtREAd':
|
||||
t1_keyvec = b'QDCVEPMU675RUBSZ'
|
||||
if self.magic == b'TEXtREAd':
|
||||
bookkey_data = self.sect[0x0E:0x0E+16]
|
||||
elif self.mobi_version < 0:
|
||||
bookkey_data = self.sect[0x90:0x90+16]
|
||||
@@ -473,7 +481,7 @@ class MobiBook:
|
||||
raise DrmException("Encryption not initialised. Must be opened with Mobipocket Reader first.")
|
||||
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
||||
if not found_key:
|
||||
raise DrmException("No key found in {0:d} keys tried.".format(len(goodpids)))
|
||||
raise DrmException("No key found in {0:d} PIDs tried.".format(len(goodpids)))
|
||||
# kill the drm keys
|
||||
self.patchSection(0, b'\0' * drm_size, drm_ptr)
|
||||
# kill the drm pointers
|
||||
@@ -509,6 +517,7 @@ class MobiBook:
|
||||
print("done")
|
||||
return
|
||||
|
||||
# pids in pidlist must be unicode
|
||||
def getUnencryptedBook(infile,pidlist):
|
||||
if not os.path.isfile(infile):
|
||||
raise DrmException("Input File Not Found.")
|
||||
@@ -530,8 +539,7 @@ def cli_main():
|
||||
infile = argv[1]
|
||||
outfile = argv[2]
|
||||
if len(argv) == 4:
|
||||
# convert from unicode to bytearray before splitting.
|
||||
pidlist = argv[3].encode('utf-8').split(b',')
|
||||
pidlist = argv[3].split(',')
|
||||
else:
|
||||
pidlist = []
|
||||
try:
|
||||
|
||||
@@ -66,21 +66,21 @@ class fixZip:
|
||||
|
||||
def uncompress(self, cmpdata):
|
||||
dc = zlib.decompressobj(-15)
|
||||
data = ''
|
||||
data = b''
|
||||
while len(cmpdata) > 0:
|
||||
if len(cmpdata) > _MAX_SIZE :
|
||||
newdata = cmpdata[0:_MAX_SIZE]
|
||||
cmpdata = cmpdata[_MAX_SIZE:]
|
||||
else:
|
||||
newdata = cmpdata
|
||||
cmpdata = ''
|
||||
cmpdata = b''
|
||||
newdata = dc.decompress(newdata)
|
||||
unprocessed = dc.unconsumed_tail
|
||||
if len(unprocessed) == 0:
|
||||
newdata += dc.flush()
|
||||
data += newdata
|
||||
cmpdata += unprocessed
|
||||
unprocessed = ''
|
||||
unprocessed = b''
|
||||
return data
|
||||
|
||||
def getfiledata(self, zi):
|
||||
@@ -123,7 +123,7 @@ class fixZip:
|
||||
mimeinfo.internal_attr = 1 # text file
|
||||
try:
|
||||
# if the mimetype is present, get its info, including time-stamp
|
||||
oldmimeinfo = self.inzip.getinfo('mimetype')
|
||||
oldmimeinfo = self.inzip.getinfo(b'mimetype')
|
||||
# copy across useful fields
|
||||
mimeinfo.date_time = oldmimeinfo.date_time
|
||||
mimeinfo.comment = oldmimeinfo.comment
|
||||
@@ -137,7 +137,7 @@ class fixZip:
|
||||
|
||||
# write the rest of the files
|
||||
for zinfo in self.inzip.infolist():
|
||||
if zinfo.filename != "mimetype" or self.ztype != 'epub':
|
||||
if zinfo.filename != b"mimetype" or self.ztype != 'epub':
|
||||
data = None
|
||||
try:
|
||||
data = self.inzip.read(zinfo.filename)
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__version__ = '6.7.0'
|
||||
__version__ = '7.1.0'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
#####################################################################
|
||||
@@ -20,7 +20,7 @@ except NameError:
|
||||
PLUGIN_NAME = 'Obok DeDRM'
|
||||
PLUGIN_SAFE_NAME = PLUGIN_NAME.strip().lower().replace(' ', '_')
|
||||
PLUGIN_DESCRIPTION = _('Removes DRM from Kobo kepubs and adds them to the library.')
|
||||
PLUGIN_VERSION_TUPLE = (6, 7, 0)
|
||||
PLUGIN_VERSION_TUPLE = (7, 1, 0)
|
||||
PLUGIN_VERSION = '.'.join([str(x) for x in PLUGIN_VERSION_TUPLE])
|
||||
HELPFILE_NAME = PLUGIN_SAFE_NAME + '_Help.htm'
|
||||
PLUGIN_AUTHORS = 'Anon'
|
||||
|
||||
@@ -197,7 +197,7 @@ class InterfacePluginAction(InterfaceAction):
|
||||
# We will write the help file out every time, in case the user upgrades the plugin zip
|
||||
# and there is a newer help file contained within it.
|
||||
file_path = os.path.join(config_dir, 'plugins', HELPFILE_NAME)
|
||||
file_data = self.load_resources(HELPFILE_NAME)[HELPFILE_NAME]
|
||||
file_data = self.load_resources(HELPFILE_NAME)[HELPFILE_NAME].decode('utf-8')
|
||||
with open(file_path,'w') as f:
|
||||
f.write(file_data)
|
||||
return file_path
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Version 4.1.0 February 2021
|
||||
# Add detection for Kobo directory location on Linux
|
||||
|
||||
# Version 4.0.0 September 2020
|
||||
# Python 3.0
|
||||
#
|
||||
@@ -365,9 +368,33 @@ class KoboLibrary(object):
|
||||
self.kobodir = os.path.join(self.kobodir, "Kobo", "Kobo Desktop Edition")
|
||||
elif sys.platform.startswith('darwin'):
|
||||
self.kobodir = os.path.join(os.environ['HOME'], "Library", "Application Support", "Kobo", "Kobo Desktop Edition")
|
||||
#elif linux_path != None:
|
||||
# Probably Linux, let's get the wine prefix and path to Kobo.
|
||||
# self.kobodir = os.path.join(linux_path, "Local Settings", "Application Data", "Kobo", "Kobo Desktop Edition")
|
||||
elif sys.platform.startswith('linux'):
|
||||
|
||||
#sets ~/.config/calibre as the location to store the kobodir location info file and creates this directory if necessary
|
||||
kobodir_cache_dir = os.path.join(os.environ['HOME'], ".config", "calibre")
|
||||
if not os.path.isdir(kobodir_cache_dir):
|
||||
os.mkdir(kobodir_cache_dir)
|
||||
|
||||
#appends the name of the file we're storing the kobodir location info to the above path
|
||||
kobodir_cache_file = str(kobodir_cache_dir) + "/" + "kobo location"
|
||||
|
||||
"""if the above file does not exist, recursively searches from the root
|
||||
of the filesystem until kobodir is found and stores the location of kobodir
|
||||
in that file so this loop can be skipped in the future"""
|
||||
original_stdout = sys.stdout
|
||||
if not os.path.isfile(kobodir_cache_file):
|
||||
for root, dirs, files in os.walk('/'):
|
||||
for file in files:
|
||||
if file == 'Kobo.sqlite':
|
||||
kobo_linux_path = str(root)
|
||||
with open(kobodir_cache_file, 'w') as f:
|
||||
sys.stdout = f
|
||||
print(kobo_linux_path, end='')
|
||||
sys.stdout = original_stdout
|
||||
|
||||
f = open(kobodir_cache_file, 'r' )
|
||||
self.kobodir = f.read()
|
||||
|
||||
# desktop versions use Kobo.sqlite
|
||||
kobodb = os.path.join(self.kobodir, "Kobo.sqlite")
|
||||
# check for existence of file
|
||||
|
||||
Binary file not shown.
@@ -8,7 +8,7 @@ msgstr ""
|
||||
"Project-Id-Version: \n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2014-11-17 12:51+0100\n"
|
||||
"PO-Revision-Date: 2020-02-02 09:18+0100\n"
|
||||
"PO-Revision-Date: 2021-01-19 12:20+0100\n"
|
||||
"Language: sv\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
@@ -16,14 +16,14 @@ msgstr ""
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: \n"
|
||||
"X-Generator: Poedit 2.2.4\n"
|
||||
"X-Generator: Poedit 2.4.2\n"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:80
|
||||
msgid ""
|
||||
"<p>No books found in Kobo Library\n"
|
||||
"Are you sure it's installed\\configured\\synchronized?"
|
||||
msgstr ""
|
||||
"<p>Inga böcker finns i Kobo-bibliotek\n"
|
||||
"<p>Inga böcker hittades i Kobo-bibliotek\n"
|
||||
"Är du säker på att den är installerad\\konfigurerad\\synkroniserad?"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:87
|
||||
@@ -36,7 +36,7 @@ msgstr "Problem med att hämta nycklar med nyare obok-metod."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:97
|
||||
msgid "Found {0} possible keys to try."
|
||||
msgstr "Hittade {0} möjliga nycklar att pröva med."
|
||||
msgstr "Hittade {0} möjliga nycklar att försöka med."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:99
|
||||
msgid "<p>No userkeys found to decrypt books with. No point in proceeding."
|
||||
@@ -46,7 +46,7 @@ msgstr ""
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:115
|
||||
msgid "{} - Decryption canceled by user."
|
||||
msgstr "{} - Dekryptering avbryts av användaren."
|
||||
msgstr "{} - Dekryptering avbröts av användaren."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:135
|
||||
msgid "{} - \"Add books\" canceled by user."
|
||||
@@ -87,14 +87,14 @@ msgstr "dubblett upptäcktes"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:233
|
||||
msgid "{0} - Successfully added EPUB format to existing {1}"
|
||||
msgstr "{0} - Lade till EPUB-format till befintliga {1}"
|
||||
msgstr "{0} - EPUB-formatet har lagts till i befintliga {1}"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:236
|
||||
msgid ""
|
||||
"{0} - Error adding EPUB format to existing {1}. This really shouldn't happen."
|
||||
msgstr ""
|
||||
"{0} - Fel vid tillägg av EPUB-format till befintligt {1}. Det här borde inte "
|
||||
"hända."
|
||||
"{0} - Fel vid tilläggning av EPUB-formatet till befintliga {1}. Detta borde "
|
||||
"verkligen inte hända."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:259
|
||||
msgid "{} - \"Insert formats\" canceled by user."
|
||||
@@ -103,44 +103,44 @@ msgstr "{} - \"Infoga format\" avbröts av användaren."
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:291
|
||||
msgid ""
|
||||
"<p><b>{0}</b> EPUB{2} successfully added to library.<br /><br /><b>{1}</b> "
|
||||
msgstr "<p><b>{0}</b> EPUB{2} lades till bibliotek.<br /><br /><b>{1}</b> "
|
||||
msgstr "<p><b>{0}</b> EPUB{2} har lagts till i bibliotek.<br /><br /><b>{1}</b> "
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:292
|
||||
msgid ""
|
||||
"not added because books with the same title/author were detected.<br /><br /"
|
||||
">Would you like to try and add the EPUB format{0}"
|
||||
msgstr ""
|
||||
"inte tillagd eftersom böcker med samma titel/författare upptäcktes.<br/><br /"
|
||||
">Vill du försöka lägga till EPUB-formatet{0}"
|
||||
"lades inte till eftersom böcker med samma titel/författare upptäcktes.<br/"
|
||||
"><br />Vill du försöka lägga till EPUB-formatet{0}"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:293
|
||||
msgid ""
|
||||
" to those existing entries?<br /><br />NOTE: no pre-existing EPUBs will be "
|
||||
"overwritten."
|
||||
msgstr ""
|
||||
" till dessa befintliga poster?<br /><br />OBS: inga befintliga EPUB:er "
|
||||
"kommer att skrivas över."
|
||||
" till dessa befintliga poster?<br /><br />OBS: inga befintliga EPUB:er kommer "
|
||||
"att skrivas över."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:295
|
||||
msgid ""
|
||||
"{0} -- not added because of {1} in your library.\n"
|
||||
"\n"
|
||||
msgstr ""
|
||||
"{0} -- inte tillagd på grund av {1} i ditt bibliotek.\n"
|
||||
"{0} -- lades inte till på grund av {1} i ditt bibliotek.\n"
|
||||
"\n"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:297
|
||||
msgid "<p><b>{0}</b> -- not added because of {1} in your library.<br /><br />"
|
||||
msgstr ""
|
||||
"<p><b>{0}</b> -- inte tillagd på grund av {1} i ditt bibliotek.<br /><br />"
|
||||
"<p><b>{0}</b> -- lades inte till på grund av {1} i ditt bibliotek.<br /><br />"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:298
|
||||
msgid ""
|
||||
"Would you like to try and add the EPUB format to an available calibre "
|
||||
"duplicate?<br /><br />"
|
||||
msgstr ""
|
||||
"Vill du försöka lägga till EPUB-formatet till en tillgänglig calibre-"
|
||||
"dubblett?<br /><br />"
|
||||
"Vill du försöka lägga till EPUB-formatet till en tillgänglig calibre-dubblett?"
|
||||
"<br /><br />"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:299
|
||||
msgid "NOTE: no pre-existing EPUB will be overwritten."
|
||||
@@ -148,23 +148,23 @@ msgstr "OBS: ingen befintlig EPUB kommer att skrivas över."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:346
|
||||
msgid "Trying key: "
|
||||
msgstr "Prövar nyckel: "
|
||||
msgstr "Försöker med nyckel: "
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:378
|
||||
msgid "Decryption failed, trying next key."
|
||||
msgstr "Det gick inte att dekryptera, prövar nästa nyckel."
|
||||
msgstr "Dekryptering misslyckades, försöker med nästa nyckel."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:382
|
||||
msgid "Unknown Error decrypting, trying next key.."
|
||||
msgstr "Okänt fel dekryptering, prövar nästa nyckel.."
|
||||
msgstr "Okänt fel vid dekryptering, försöker med nästa nyckel.."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:395
|
||||
msgid ""
|
||||
"<p>All selected Kobo books added as new calibre books or inserted into "
|
||||
"existing calibre ebooks.<br /><br />No issues."
|
||||
msgstr ""
|
||||
"<p>Alla valda Kobo-böcker läggs till som nya calibre-böcker eller infogas i "
|
||||
"befintliga calibre-e-böcker.<br /><br />Inga problem."
|
||||
"<p>Alla valda Kobo-böcker har lagts till som nya calibre-böcker eller infogats "
|
||||
"i befintliga calibre-e-böcker.<br /><br />Inga problem."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:399
|
||||
msgid "<p>{0} successfully added."
|
||||
@@ -192,15 +192,14 @@ msgstr "<p><b>Nya böcker skapade:</b> {}</p>\n"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:418
|
||||
msgid "<p><b>Duplicates that weren't added:</b> {}</p>\n"
|
||||
msgstr "<p><b>Dubbletter som inte tillsattes:</b> {}</p>\n"
|
||||
msgstr "<p><b>Dubbletter som inte har lagts till:</b> {}</p>\n"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:426
|
||||
msgid "<p><b>Book imports cancelled by user:</b> {}</p>\n"
|
||||
msgstr "<p><b>Bokimport avbröts av användaren:</b> {}</p>\n"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:428
|
||||
msgid ""
|
||||
"<p><b>New EPUB formats inserted in existing calibre books:</b> {0}</p>\n"
|
||||
msgid "<p><b>New EPUB formats inserted in existing calibre books:</b> {0}</p>\n"
|
||||
msgstr ""
|
||||
"<p><b>Nya EPUB-format infogade i befintliga calibre-böcker:</b> {0}</p>\n"
|
||||
|
||||
@@ -208,8 +207,7 @@ msgstr ""
|
||||
msgid ""
|
||||
"<p><b>EPUB formats NOT inserted into existing calibre books:</b> {}<br />\n"
|
||||
msgstr ""
|
||||
"<p><b>EPUB-format som INTE infogats i befintliga calibre-böcker:</b> {}<br /"
|
||||
">\n"
|
||||
"<p><b>EPUB-format som INTE infogats i befintliga calibre-böcker:</b> {}<br />\n"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:435
|
||||
msgid ""
|
||||
@@ -221,7 +219,7 @@ msgstr ""
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:444
|
||||
msgid "<p><b>Format imports cancelled by user:</b> {}</p>\n"
|
||||
msgstr "<p><b>Format-import avbröts av användaren:</b> {}</p>\n"
|
||||
msgstr "<p><b>Format-importen avbröts av användaren:</b> {}</p>\n"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:458
|
||||
msgid "Unknown Book Title"
|
||||
@@ -233,11 +231,11 @@ msgstr "den kunde inte dekrypteras."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:462
|
||||
msgid ""
|
||||
"user CHOSE not to insert the new EPUB format, or all existing calibre "
|
||||
"entries HAD an EPUB format already."
|
||||
"user CHOSE not to insert the new EPUB format, or all existing calibre entries "
|
||||
"HAD an EPUB format already."
|
||||
msgstr ""
|
||||
"användaren VALDE att inte infoga det nya EPUB-formatet, eller alla "
|
||||
"befintliga calibre-poster hade redan ett EPUB-format."
|
||||
"användaren VALDE att inte infoga det nya EPUB-formatet, eller alla befintliga "
|
||||
"calibre-poster hade redan ett EPUB-format."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:464
|
||||
msgid "of unknown reasons. Gosh I'm embarrassed!"
|
||||
@@ -245,7 +243,7 @@ msgstr "av okända skäl. Jag skäms!"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\action.py:465
|
||||
msgid "<p>{0} not added because {1}"
|
||||
msgstr "<p>{0} inte tillagd eftersom {1}"
|
||||
msgstr "<p>{0} lades inte till på grund av {1}"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\common_utils.py:226
|
||||
msgid "Help"
|
||||
@@ -254,31 +252,31 @@ msgstr "Hjälp"
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\common_utils.py:235
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\utilities.py:214
|
||||
msgid "Restart required"
|
||||
msgstr "Omstart krävs"
|
||||
msgstr "Kräver omstart"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\common_utils.py:236
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\utilities.py:215
|
||||
msgid ""
|
||||
"Title image not found - you must restart Calibre before using this plugin!"
|
||||
msgstr ""
|
||||
"Titelbild hittades inte - du måste starta calibre innan du använder denna "
|
||||
"Titelbild hittades inte - du måste starta om calibre innan du använder denna "
|
||||
"insticksmodul!"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\common_utils.py:322
|
||||
msgid "Undefined"
|
||||
msgstr "Obestämd"
|
||||
msgstr "Odefinierad"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\config.py:30
|
||||
msgid "When should Obok try to insert EPUBs into existing calibre entries?"
|
||||
msgstr "När ska Obok försöka infoga EPUB:er i befintliga calibre-böcker?"
|
||||
msgstr "När ska Obok försöka infoga EPUB:er i befintliga calibre-poster?"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\config.py:33
|
||||
msgid ""
|
||||
"<p>Default behavior when duplicates are detected. None of the choices will "
|
||||
"cause calibre ebooks to be overwritten"
|
||||
msgstr ""
|
||||
"<p>Standardbeteende när dubbletter upptäcks. Inget av alternativen kommer "
|
||||
"att orsaka calibre-e-böcker att skrivas över"
|
||||
"<p>Standardbeteende när dubbletter upptäcks. Inget av alternativen kommer att "
|
||||
"orsaka att calibre-e-böcker skrivs över"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\config.py:35
|
||||
msgid "Ask"
|
||||
@@ -323,7 +321,7 @@ msgstr "Välj alla böcker med DRM."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\dialogs.py:95
|
||||
msgid "All DRM free"
|
||||
msgstr "Alla DRM fria"
|
||||
msgstr "Alla utan DRM"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\dialogs.py:96
|
||||
msgid "Select all books without DRM."
|
||||
@@ -355,12 +353,12 @@ msgstr "Tar bort DRM från Kobo-kepubs och lägger till dem i biblioteket."
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\obok\obok.py:162
|
||||
msgid "AES improper key used"
|
||||
msgstr "AES felaktig nyckel används"
|
||||
msgstr "Felaktig AES-nyckel används"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\obok\obok.py:167
|
||||
msgid "Failed to initialize AES key"
|
||||
msgstr "Det gick inte att initiera AES-nyckel"
|
||||
msgstr "Misslyckades med att initiera AES-nyckel"
|
||||
|
||||
#: I:\Herramientas\PoeditPortable\App\Poedit\bin\obok_plugin-3.1.0_trad\obok\obok.py:175
|
||||
msgid "AES decryption failed"
|
||||
msgstr "AES dekryptering misslyckades"
|
||||
msgstr "AES-dekryptering misslyckades"
|
||||
|
||||
@@ -2290,10 +2290,13 @@ class PDFDocument(object):
|
||||
import win32api
|
||||
import win32security
|
||||
import win32file
|
||||
import winreg
|
||||
except:
|
||||
raise ADEPTError('PyWin Extension (Win32API module) needed.\n'+\
|
||||
'Download from http://sourceforge.net/projects/pywin32/files/ ')
|
||||
try:
|
||||
import winreg
|
||||
except ImportError:
|
||||
import _winreg as winreg
|
||||
try:
|
||||
v0 = win32api.GetVolumeInformation('C:\\')
|
||||
v1 = win32api.GetSystemInfo()[6]
|
||||
|
||||
Reference in New Issue
Block a user