Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
93f02c625a | ||
|
|
e95ed1a8ed | ||
|
|
ba5927a20d | ||
|
|
297a9ddc66 | ||
|
|
4f34a9a196 | ||
|
|
529dd3f160 | ||
|
|
4163d5ccf4 | ||
|
|
867ac35b45 | ||
|
|
427137b0fe | ||
|
|
ac9cdb1e98 | ||
|
|
2bedd75005 | ||
|
|
8b632e309f | ||
|
|
bc968f8eca | ||
|
|
00ac669f76 | ||
|
|
694dfafd39 | ||
|
|
a7856f5c32 | ||
|
|
38eabe7612 |
@@ -1,6 +1,6 @@
|
|||||||
From Apprentice Alf's Blog
|
From Apprentice Alf's Blog
|
||||||
|
|
||||||
Adobe Adept ePub and PDF, .epub, .pdf
|
Adobe Adept ePub, .epub
|
||||||
|
|
||||||
This directory includes modified versions of the I♥CABBAGES Adobe Adept inept scripts for epubs. These scripts have been modified to work with OpenSSL on Windows as well as Linux and Mac OS X. His original scripts can be found in the clearly labelled folder. If a Windows User has OpenSSL installed, these scripts will make use of it in place of PyCrypto.
|
This directory includes modified versions of the I♥CABBAGES Adobe Adept inept scripts for epubs. These scripts have been modified to work with OpenSSL on Windows as well as Linux and Mac OS X. His original scripts can be found in the clearly labelled folder. If a Windows User has OpenSSL installed, these scripts will make use of it in place of PyCrypto.
|
||||||
|
|
||||||
@@ -11,20 +11,8 @@ http://i-u2665-cabbages.blogspot.com/2009_02_01_archive.html
|
|||||||
|
|
||||||
There are two scripts:
|
There are two scripts:
|
||||||
|
|
||||||
The first is called ineptkey_v5.1.pyw. Simply double-click to launch it and it will create a key file that is needed later to actually remove the DRM. This script need only be run once unless you change your ADE account information.
|
The first is called ineptkey_vX.X.pyw. Simply double-click to launch it and it will create a key file that is needed later to actually remove the DRM. This script need only be run once unless you change your ADE account information.
|
||||||
|
|
||||||
The second is called in ineptepub_v5.3.pyw. Simply double-click to launch it. It will ask for your previously generated key file and the path to the book you want to remove the DRM from.
|
The second is called in ineptepub_vX.X.pyw. Simply double-click to launch it. It will ask for your previously generated key file and the path to the book you want to remove the DRM from.
|
||||||
|
|
||||||
Both of these scripts are gui python programs. Python 2.X (32 bit) is already installed in Mac OSX. We recommend ActiveState's Active Python Version 2.X (32 bit) for Windows users.
|
Both of these scripts are gui python programs. Python 2.X (32 bit) is already installed in Mac OSX. We recommend ActiveState's Active Python Version 2.X (32 bit) for Windows users.
|
||||||
|
|
||||||
The latest version of ineptpdf to use is version 8.4.42, which improves support for some PDF files.
|
|
||||||
|
|
||||||
ineptpdf version 8.4.42 can be found here:
|
|
||||||
|
|
||||||
http://pastebin.com/kuKMXXsC
|
|
||||||
|
|
||||||
It is not included in the tools archive.
|
|
||||||
|
|
||||||
If that link is down, please check out the following website for some of the latest releases of these tools:
|
|
||||||
|
|
||||||
http://ainept.freewebspace.com/
|
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# ineptepub.pyw, version 5.2
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ineptepub.pyw, version 5.6
|
||||||
# Copyright © 2009-2010 i♥cabbages
|
# Copyright © 2009-2010 i♥cabbages
|
||||||
|
|
||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
@@ -25,12 +27,13 @@
|
|||||||
# 5.1 - Improve OpenSSL error checking
|
# 5.1 - Improve OpenSSL error checking
|
||||||
# 5.2 - Fix ctypes error causing segfaults on some systems
|
# 5.2 - Fix ctypes error causing segfaults on some systems
|
||||||
# 5.3 - add support for OpenSSL on Windows, fix bug with some versions of libcrypto 0.9.8 prior to path level o
|
# 5.3 - add support for OpenSSL on Windows, fix bug with some versions of libcrypto 0.9.8 prior to path level o
|
||||||
|
# 5.4 - add support for encoding to 'utf-8' when building up list of files to decrypt from encryption.xml
|
||||||
|
# 5.5 - On Windows try PyCrypto first, OpenSSL next
|
||||||
|
# 5.6 - Modify interface to allow use with import
|
||||||
"""
|
"""
|
||||||
Decrypt Adobe ADEPT-encrypted EPUB books.
|
Decrypt Adobe ADEPT-encrypted EPUB books.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -257,7 +260,10 @@ def _load_crypto_pycrypto():
|
|||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
AES = RSA = None
|
AES = RSA = None
|
||||||
for loader in (_load_crypto_libcrypto, _load_crypto_pycrypto):
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
try:
|
try:
|
||||||
AES, RSA = loader()
|
AES, RSA = loader()
|
||||||
break
|
break
|
||||||
@@ -288,6 +294,7 @@ class Decryptor(object):
|
|||||||
for elem in encryption.findall(expr):
|
for elem in encryption.findall(expr):
|
||||||
path = elem.get('URI', None)
|
path = elem.get('URI', None)
|
||||||
if path is not None:
|
if path is not None:
|
||||||
|
path = path.encode('utf-8')
|
||||||
encrypted.add(path)
|
encrypted.add(path)
|
||||||
|
|
||||||
def decompress(self, bytes):
|
def decompress(self, bytes):
|
||||||
@@ -305,45 +312,6 @@ class Decryptor(object):
|
|||||||
data = self.decompress(data)
|
data = self.decompress(data)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
if AES is None:
|
|
||||||
print "%s: This script requires OpenSSL or PyCrypto, which must be" \
|
|
||||||
" installed separately. Read the top-of-script comment for" \
|
|
||||||
" details." % (progname,)
|
|
||||||
return 1
|
|
||||||
if len(argv) != 4:
|
|
||||||
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
|
||||||
return 1
|
|
||||||
keypath, inpath, outpath = argv[1:]
|
|
||||||
with open(keypath, 'rb') as f:
|
|
||||||
keyder = f.read()
|
|
||||||
rsa = RSA(keyder)
|
|
||||||
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
|
||||||
namelist = set(inf.namelist())
|
|
||||||
if 'META-INF/rights.xml' not in namelist or \
|
|
||||||
'META-INF/encryption.xml' not in namelist:
|
|
||||||
raise ADEPTError('%s: not an ADEPT EPUB' % (inpath,))
|
|
||||||
for name in META_NAMES:
|
|
||||||
namelist.remove(name)
|
|
||||||
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
|
||||||
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
|
||||||
expr = './/%s' % (adept('encryptedKey'),)
|
|
||||||
bookkey = ''.join(rights.findtext(expr))
|
|
||||||
bookkey = rsa.decrypt(bookkey.decode('base64'))
|
|
||||||
# Padded as per RSAES-PKCS1-v1_5
|
|
||||||
if bookkey[-17] != '\x00':
|
|
||||||
raise ADEPTError('problem decrypting session key')
|
|
||||||
encryption = inf.read('META-INF/encryption.xml')
|
|
||||||
decryptor = Decryptor(bookkey[-16:], encryption)
|
|
||||||
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
|
||||||
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
|
||||||
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
|
||||||
outf.writestr(zi, inf.read('mimetype'))
|
|
||||||
for path in namelist:
|
|
||||||
data = inf.read(path)
|
|
||||||
outf.writestr(path, decryptor.decrypt(path, data))
|
|
||||||
return 0
|
|
||||||
|
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
@@ -439,6 +407,52 @@ class DecryptionDialog(Tkinter.Frame):
|
|||||||
return
|
return
|
||||||
self.status['text'] = 'File successfully decrypted'
|
self.status['text'] = 'File successfully decrypted'
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(keypath, inpath, outpath):
|
||||||
|
with open(keypath, 'rb') as f:
|
||||||
|
keyder = f.read()
|
||||||
|
rsa = RSA(keyder)
|
||||||
|
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
||||||
|
namelist = set(inf.namelist())
|
||||||
|
if 'META-INF/rights.xml' not in namelist or \
|
||||||
|
'META-INF/encryption.xml' not in namelist:
|
||||||
|
raise ADEPTError('%s: not an ADEPT EPUB' % (inpath,))
|
||||||
|
for name in META_NAMES:
|
||||||
|
namelist.remove(name)
|
||||||
|
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = './/%s' % (adept('encryptedKey'),)
|
||||||
|
bookkey = ''.join(rights.findtext(expr))
|
||||||
|
bookkey = rsa.decrypt(bookkey.decode('base64'))
|
||||||
|
# Padded as per RSAES-PKCS1-v1_5
|
||||||
|
if bookkey[-17] != '\x00':
|
||||||
|
raise ADEPTError('problem decrypting session key')
|
||||||
|
encryption = inf.read('META-INF/encryption.xml')
|
||||||
|
decryptor = Decryptor(bookkey[-16:], encryption)
|
||||||
|
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
||||||
|
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
||||||
|
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
||||||
|
outf.writestr(zi, inf.read('mimetype'))
|
||||||
|
for path in namelist:
|
||||||
|
data = inf.read(path)
|
||||||
|
outf.writestr(path, decryptor.decrypt(path, data))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if AES is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be" \
|
||||||
|
" installed separately. Read the top-of-script comment for" \
|
||||||
|
" details." % (progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
||||||
|
return 1
|
||||||
|
keypath, inpath, outpath = argv[1:]
|
||||||
|
return decryptBook(keypath, inpath, outpath)
|
||||||
|
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
if AES is None:
|
if AES is None:
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# ineptkey.pyw, version 5
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ineptkey.pyw, version 5.4
|
||||||
# Copyright © 2009-2010 i♥cabbages
|
# Copyright © 2009-2010 i♥cabbages
|
||||||
|
|
||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
@@ -32,13 +34,13 @@
|
|||||||
# Clean up and merge OS X support by unknown
|
# Clean up and merge OS X support by unknown
|
||||||
# 5.1 - add support for using OpenSSL on Windows in place of PyCrypto
|
# 5.1 - add support for using OpenSSL on Windows in place of PyCrypto
|
||||||
# 5.2 - added support for output of key to a particular file
|
# 5.2 - added support for output of key to a particular file
|
||||||
|
# 5.3 - On Windows try PyCrypto first, OpenSSL next
|
||||||
|
# 5.4 - Modify interface to allow use of import
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Retrieve Adobe ADEPT user key.
|
Retrieve Adobe ADEPT user key.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -115,7 +117,7 @@ if sys.platform.startswith('win'):
|
|||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
AES = None
|
AES = None
|
||||||
for loader in (_load_crypto_libcrypto, _load_crypto_pycrypto):
|
for loader in (_load_crypto_pycrypto, _load_crypto_libcrypto):
|
||||||
try:
|
try:
|
||||||
AES = loader()
|
AES = loader()
|
||||||
break
|
break
|
||||||
@@ -414,10 +416,11 @@ class ExceptionDialog(Tkinter.Frame):
|
|||||||
label.pack(fill=Tkconstants.X, expand=0)
|
label.pack(fill=Tkconstants.X, expand=0)
|
||||||
self.text = Tkinter.Text(self)
|
self.text = Tkinter.Text(self)
|
||||||
self.text.pack(fill=Tkconstants.BOTH, expand=1)
|
self.text.pack(fill=Tkconstants.BOTH, expand=1)
|
||||||
|
|
||||||
self.text.insert(Tkconstants.END, text)
|
self.text.insert(Tkconstants.END, text)
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
keypath = argv[1]
|
def extractKeyfile(keypath):
|
||||||
try:
|
try:
|
||||||
success = retrieve_key(keypath)
|
success = retrieve_key(keypath)
|
||||||
except ADEPTError, e:
|
except ADEPTError, e:
|
||||||
@@ -430,6 +433,12 @@ def cli_main(argv=sys.argv):
|
|||||||
return 1
|
return 1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
keypath = argv[1]
|
||||||
|
return extractKeyfile(keypath)
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
root.withdraw()
|
root.withdraw()
|
||||||
|
|||||||
18
Adobe_PDF_Tools/README_ineptpdf.txt
Normal file
18
Adobe_PDF_Tools/README_ineptpdf.txt
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
From Apprentice Alf's Blog
|
||||||
|
|
||||||
|
Adobe Adept PDF, .pdf
|
||||||
|
|
||||||
|
This directory includes modified versions of the I♥CABBAGES Adobe Adept inept scripts for pdfs. These scripts have been modified to work with OpenSSL on Windows as well as Linux and Mac OS X. If a Windows User has OpenSSL installed, these scripts will make use of it in place of PyCrypto.
|
||||||
|
|
||||||
|
The wonderful I♥CABBAGES has produced scripts that will remove the DRM from ePubs and PDFs encryped with Adobe’s DRM. These scripts require installation of the PyCrypto python package *or* the OpenSSL library on Windows. For Mac OS X and Linux boxes, these scripts use the already installed OpenSSL libcrypto so there is no additional requirements for these platforms.
|
||||||
|
|
||||||
|
For more info, see the author's blog:
|
||||||
|
http://i-u2665-cabbages.blogspot.com/2009_02_01_archive.html
|
||||||
|
|
||||||
|
There are two scripts:
|
||||||
|
|
||||||
|
The first is called ineptkey_vX.X.pyw. Simply double-click to launch it and it will create a key file that is needed later to actually remove the DRM. This script need only be run once unless you change your ADE account information.
|
||||||
|
|
||||||
|
The second is called in ineptpdf_vX.X.pyw. Simply double-click to launch it. It will ask for your previously generated key file and the path to the book you want to remove the DRM from.
|
||||||
|
|
||||||
|
Both of these scripts are gui python programs. Python 2.X (32 bit) is already installed in Mac OSX. We recommend ActiveState's Active Python Version 2.X (32 bit) for Windows users.
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# ineptkey.pyw, version 5
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ineptkey.pyw, version 5.4
|
||||||
# Copyright © 2009-2010 i♥cabbages
|
# Copyright © 2009-2010 i♥cabbages
|
||||||
|
|
||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
@@ -32,13 +34,13 @@
|
|||||||
# Clean up and merge OS X support by unknown
|
# Clean up and merge OS X support by unknown
|
||||||
# 5.1 - add support for using OpenSSL on Windows in place of PyCrypto
|
# 5.1 - add support for using OpenSSL on Windows in place of PyCrypto
|
||||||
# 5.2 - added support for output of key to a particular file
|
# 5.2 - added support for output of key to a particular file
|
||||||
|
# 5.3 - On Windows try PyCrypto first, OpenSSL next
|
||||||
|
# 5.4 - Modify interface to allow use of import
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Retrieve Adobe ADEPT user key.
|
Retrieve Adobe ADEPT user key.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -115,7 +117,7 @@ if sys.platform.startswith('win'):
|
|||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
AES = None
|
AES = None
|
||||||
for loader in (_load_crypto_libcrypto, _load_crypto_pycrypto):
|
for loader in (_load_crypto_pycrypto, _load_crypto_libcrypto):
|
||||||
try:
|
try:
|
||||||
AES = loader()
|
AES = loader()
|
||||||
break
|
break
|
||||||
@@ -414,10 +416,11 @@ class ExceptionDialog(Tkinter.Frame):
|
|||||||
label.pack(fill=Tkconstants.X, expand=0)
|
label.pack(fill=Tkconstants.X, expand=0)
|
||||||
self.text = Tkinter.Text(self)
|
self.text = Tkinter.Text(self)
|
||||||
self.text.pack(fill=Tkconstants.BOTH, expand=1)
|
self.text.pack(fill=Tkconstants.BOTH, expand=1)
|
||||||
|
|
||||||
self.text.insert(Tkconstants.END, text)
|
self.text.insert(Tkconstants.END, text)
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
keypath = argv[1]
|
def extractKeyfile(keypath):
|
||||||
try:
|
try:
|
||||||
success = retrieve_key(keypath)
|
success = retrieve_key(keypath)
|
||||||
except ADEPTError, e:
|
except ADEPTError, e:
|
||||||
@@ -430,6 +433,12 @@ def cli_main(argv=sys.argv):
|
|||||||
return 1
|
return 1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
keypath = argv[1]
|
||||||
|
return extractKeyfile(keypath)
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
root.withdraw()
|
root.withdraw()
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
#! /usr/bin/env python
|
#! /usr/bin/env python
|
||||||
# ineptpdf.pyw, version 7.6
|
# ineptpdf.pyw, version 7.9
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
# To run this program install Python 2.6 from http://www.python.org/download/
|
# To run this program install Python 2.6 from http://www.python.org/download/
|
||||||
# and OpenSSL (already installed on Mac OS X and Linux) OR
|
# and OpenSSL (already installed on Mac OS X and Linux) OR
|
||||||
@@ -29,13 +31,14 @@
|
|||||||
# implemented ARC4 interface to OpenSSL
|
# implemented ARC4 interface to OpenSSL
|
||||||
# fixed minor typos
|
# fixed minor typos
|
||||||
# 7.6 - backported AES and other fixes from version 8.4.48
|
# 7.6 - backported AES and other fixes from version 8.4.48
|
||||||
|
# 7.7 - On Windows try PyCrypto first and OpenSSL next
|
||||||
|
# 7.8 - Modify interface to allow use of import
|
||||||
|
# 7.9 - Bug fix for some session key errors when len(bookkey) > length required
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypts Adobe ADEPT-encrypted PDF files.
|
Decrypts Adobe ADEPT-encrypted PDF files.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -154,6 +157,7 @@ def _load_crypto_libcrypto():
|
|||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
class AES(object):
|
class AES(object):
|
||||||
|
MODE_CBC = 0
|
||||||
@classmethod
|
@classmethod
|
||||||
def new(cls, userkey, mode, iv):
|
def new(cls, userkey, mode, iv):
|
||||||
self = AES()
|
self = AES()
|
||||||
@@ -319,7 +323,10 @@ def _load_crypto_pycrypto():
|
|||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
ARC4 = RSA = AES = None
|
ARC4 = RSA = AES = None
|
||||||
for loader in (_load_crypto_libcrypto, _load_crypto_pycrypto):
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
try:
|
try:
|
||||||
ARC4, RSA, AES = loader()
|
ARC4, RSA, AES = loader()
|
||||||
break
|
break
|
||||||
@@ -1526,16 +1533,30 @@ class PDFDocument(object):
|
|||||||
bookkey = bookkey[index:]
|
bookkey = bookkey[index:]
|
||||||
ebx_V = int_value(param.get('V', 4))
|
ebx_V = int_value(param.get('V', 4))
|
||||||
ebx_type = int_value(param.get('EBX_ENCRYPTIONTYPE', 6))
|
ebx_type = int_value(param.get('EBX_ENCRYPTIONTYPE', 6))
|
||||||
# added because of the booktype / decryption book session key error
|
# added because of improper booktype / decryption book session key errors
|
||||||
|
if length > 0:
|
||||||
|
if len(bookkey) == length:
|
||||||
if ebx_V == 3:
|
if ebx_V == 3:
|
||||||
V = 3
|
V = 3
|
||||||
elif ebx_V < 4 or ebx_type < 6:
|
else:
|
||||||
|
V = 2
|
||||||
|
elif len(bookkey) == length + 1:
|
||||||
V = ord(bookkey[0])
|
V = ord(bookkey[0])
|
||||||
bookkey = bookkey[1:]
|
bookkey = bookkey[1:]
|
||||||
|
else:
|
||||||
|
print "ebx_V is %d and ebx_type is %d" % (ebx_V, ebx_type)
|
||||||
|
print "length is %d and len(bookkey) is %d" % (length, len(bookkey))
|
||||||
|
print "bookkey[0] is %d" % ord(bookkey[0])
|
||||||
|
raise ADEPTError('error decrypting book session key - mismatched length')
|
||||||
|
else:
|
||||||
|
# proper length unknown try with whatever you have
|
||||||
|
print "ebx_V is %d and ebx_type is %d" % (ebx_V, ebx_type)
|
||||||
|
print "length is %d and len(bookkey) is %d" % (length, len(bookkey))
|
||||||
|
print "bookkey[0] is %d" % ord(bookkey[0])
|
||||||
|
if ebx_V == 3:
|
||||||
|
V = 3
|
||||||
else:
|
else:
|
||||||
V = 2
|
V = 2
|
||||||
if length and len(bookkey) != length:
|
|
||||||
raise ADEPTError('error decrypting book session key')
|
|
||||||
self.decrypt_key = bookkey
|
self.decrypt_key = bookkey
|
||||||
self.genkey = self.genkey_v3 if V == 3 else self.genkey_v2
|
self.genkey = self.genkey_v3 if V == 3 else self.genkey_v2
|
||||||
self.decipher = self.decrypt_rc4
|
self.decipher = self.decrypt_rc4
|
||||||
@@ -2072,25 +2093,6 @@ class PDFSerializer(object):
|
|||||||
self.write('\n')
|
self.write('\n')
|
||||||
self.write('endobj\n')
|
self.write('endobj\n')
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
if RSA is None:
|
|
||||||
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
|
||||||
"separately. Read the top-of-script comment for details." % \
|
|
||||||
(progname,)
|
|
||||||
return 1
|
|
||||||
if len(argv) != 4:
|
|
||||||
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
|
||||||
return 1
|
|
||||||
keypath, inpath, outpath = argv[1:]
|
|
||||||
with open(inpath, 'rb') as inf:
|
|
||||||
serializer = PDFSerializer(inf, keypath)
|
|
||||||
# hope this will fix the 'bad file descriptor' problem
|
|
||||||
with open(outpath, 'wb') as outf:
|
|
||||||
# help construct to make sure the method runs to the end
|
|
||||||
serializer.dump(outf)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
@@ -2194,6 +2196,31 @@ class DecryptionDialog(Tkinter.Frame):
|
|||||||
'Close this window or decrypt another pdf file.'
|
'Close this window or decrypt another pdf file.'
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(keypath, inpath, outpath):
|
||||||
|
with open(inpath, 'rb') as inf:
|
||||||
|
serializer = PDFSerializer(inf, keypath)
|
||||||
|
# hope this will fix the 'bad file descriptor' problem
|
||||||
|
with open(outpath, 'wb') as outf:
|
||||||
|
# help construct to make sure the method runs to the end
|
||||||
|
serializer.dump(outf)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if RSA is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
||||||
|
"separately. Read the top-of-script comment for details." % \
|
||||||
|
(progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
||||||
|
return 1
|
||||||
|
keypath, inpath, outpath = argv[1:]
|
||||||
|
return decryptBook(keypath, inpath, outpath)
|
||||||
|
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
if RSA is None:
|
if RSA is None:
|
||||||
@@ -5,13 +5,13 @@ Barnes and Noble EPUB ebooks use a form of Social DRM which requires information
|
|||||||
For more info, see the author's blog:
|
For more info, see the author's blog:
|
||||||
http://i-u2665-cabbages.blogspot.com/2009_12_01_archive.html
|
http://i-u2665-cabbages.blogspot.com/2009_12_01_archive.html
|
||||||
|
|
||||||
The original scripts by IHeartCabbages are available here as well. These scripts have been modified to allow the use of OpenSSL in place of PyCrypto to make them easier to run on Linux and Mac OS X.
|
The original scripts by IHeartCabbages are available here as well. These scripts have been modified to allow the use of OpenSSL in place of PyCrypto to make them easier to run on Linux and Mac OS X, as well as to fix some minor bugs/
|
||||||
|
|
||||||
There are 2 scripts:
|
There are 2 scripts:
|
||||||
|
|
||||||
The first is ignoblekeygen_v2.pyw. Double-click to launch it and provide the required information, and this program will generate a key file needed to remove the DRM from the books. This key file need only be generated once unless either you change your credit card number or your name on the credit card (or if you use a different credit card to purchase your book).
|
The first is ignoblekeygen_vX.X.pyw. Double-click to launch it and provide the required information, and this program will generate a key file needed to remove the DRM from the books. This key file need only be generated once unless either you change your credit card number or your name on the credit card (or if you use a different credit card to purchase your book).
|
||||||
|
|
||||||
The second is ignobleepub_v3.pyw. Double-click it and it will ask for your key file and the path to the book to remove the DRM from.
|
The second is ignobleepub_vX.X.pyw. Double-click it and it will ask for your key file and the path to the book to remove the DRM from.
|
||||||
|
|
||||||
All of these scripts are gui python programs. Python 2.X (32 bit) is already installed in Mac OSX. We recommend ActiveState's Active Python Version 2.X (32 bit) for Windows users.
|
All of these scripts are gui python programs. Python 2.X (32 bit) is already installed in Mac OSX. We recommend ActiveState's Active Python Version 2.X (32 bit) for Windows users.
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
|
|
||||||
# ignobleepub.pyw, version 3
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ignobleepub.pyw, version 3.4
|
||||||
|
|
||||||
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
||||||
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
||||||
@@ -12,8 +14,10 @@
|
|||||||
# 2 - Added OS X support by using OpenSSL when available
|
# 2 - Added OS X support by using OpenSSL when available
|
||||||
# 3 - screen out improper key lengths to prevent segfaults on Linux
|
# 3 - screen out improper key lengths to prevent segfaults on Linux
|
||||||
# 3.1 - Allow Windows versions of libcrypto to be found
|
# 3.1 - Allow Windows versions of libcrypto to be found
|
||||||
|
# 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml
|
||||||
|
# 3.3 - On Windows try PyCrypto first and OpenSSL next
|
||||||
|
# 3.4 - Modify interace to allow use with import
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
@@ -105,15 +109,18 @@ def _load_crypto_pycrypto():
|
|||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
AES = None
|
AES = None
|
||||||
for loader in (_load_crypto_libcrypto, _load_crypto_pycrypto):
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
try:
|
try:
|
||||||
AES = loader()
|
AES = loader()
|
||||||
break
|
break
|
||||||
except (ImportError, IGNOBLEError):
|
except (ImportError, IGNOBLEError):
|
||||||
pass
|
pass
|
||||||
return AES
|
return AES
|
||||||
AES = _load_crypto()
|
|
||||||
|
|
||||||
|
AES = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -144,6 +151,7 @@ class Decryptor(object):
|
|||||||
enc('CipherReference'))
|
enc('CipherReference'))
|
||||||
for elem in encryption.findall(expr):
|
for elem in encryption.findall(expr):
|
||||||
path = elem.get('URI', None)
|
path = elem.get('URI', None)
|
||||||
|
path = path.encode('utf-8')
|
||||||
if path is not None:
|
if path is not None:
|
||||||
encrypted.add(path)
|
encrypted.add(path)
|
||||||
|
|
||||||
@@ -163,49 +171,6 @@ class Decryptor(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
if AES is None:
|
|
||||||
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
|
||||||
"separately. Read the top-of-script comment for details." % \
|
|
||||||
(progname,)
|
|
||||||
return 1
|
|
||||||
if len(argv) != 4:
|
|
||||||
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
|
||||||
return 1
|
|
||||||
keypath, inpath, outpath = argv[1:]
|
|
||||||
with open(keypath, 'rb') as f:
|
|
||||||
keyb64 = f.read()
|
|
||||||
key = keyb64.decode('base64')[:16]
|
|
||||||
# aes = AES.new(key, AES.MODE_CBC)
|
|
||||||
aes = AES(key)
|
|
||||||
|
|
||||||
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
|
||||||
namelist = set(inf.namelist())
|
|
||||||
if 'META-INF/rights.xml' not in namelist or \
|
|
||||||
'META-INF/encryption.xml' not in namelist:
|
|
||||||
raise IGNOBLEError('%s: not an B&N ADEPT EPUB' % (inpath,))
|
|
||||||
for name in META_NAMES:
|
|
||||||
namelist.remove(name)
|
|
||||||
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
|
||||||
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
|
||||||
expr = './/%s' % (adept('encryptedKey'),)
|
|
||||||
bookkey = ''.join(rights.findtext(expr))
|
|
||||||
bookkey = aes.decrypt(bookkey.decode('base64'))
|
|
||||||
bookkey = bookkey[:-ord(bookkey[-1])]
|
|
||||||
encryption = inf.read('META-INF/encryption.xml')
|
|
||||||
decryptor = Decryptor(bookkey[-16:], encryption)
|
|
||||||
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
|
||||||
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
|
||||||
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
|
||||||
outf.writestr(zi, inf.read('mimetype'))
|
|
||||||
for path in namelist:
|
|
||||||
data = inf.read(path)
|
|
||||||
outf.writestr(path, decryptor.decrypt(path, data))
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
Tkinter.Frame.__init__(self, root, border=5)
|
Tkinter.Frame.__init__(self, root, border=5)
|
||||||
@@ -301,6 +266,53 @@ class DecryptionDialog(Tkinter.Frame):
|
|||||||
return
|
return
|
||||||
self.status['text'] = 'File successfully decrypted'
|
self.status['text'] = 'File successfully decrypted'
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(keypath, inpath, outpath):
|
||||||
|
with open(keypath, 'rb') as f:
|
||||||
|
keyb64 = f.read()
|
||||||
|
key = keyb64.decode('base64')[:16]
|
||||||
|
# aes = AES.new(key, AES.MODE_CBC)
|
||||||
|
aes = AES(key)
|
||||||
|
|
||||||
|
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
||||||
|
namelist = set(inf.namelist())
|
||||||
|
if 'META-INF/rights.xml' not in namelist or \
|
||||||
|
'META-INF/encryption.xml' not in namelist:
|
||||||
|
raise IGNOBLEError('%s: not an B&N ADEPT EPUB' % (inpath,))
|
||||||
|
for name in META_NAMES:
|
||||||
|
namelist.remove(name)
|
||||||
|
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = './/%s' % (adept('encryptedKey'),)
|
||||||
|
bookkey = ''.join(rights.findtext(expr))
|
||||||
|
bookkey = aes.decrypt(bookkey.decode('base64'))
|
||||||
|
bookkey = bookkey[:-ord(bookkey[-1])]
|
||||||
|
encryption = inf.read('META-INF/encryption.xml')
|
||||||
|
decryptor = Decryptor(bookkey[-16:], encryption)
|
||||||
|
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
||||||
|
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
||||||
|
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
||||||
|
outf.writestr(zi, inf.read('mimetype'))
|
||||||
|
for path in namelist:
|
||||||
|
data = inf.read(path)
|
||||||
|
outf.writestr(path, decryptor.decrypt(path, data))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if AES is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
||||||
|
"separately. Read the top-of-script comment for details." % \
|
||||||
|
(progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
||||||
|
return 1
|
||||||
|
keypath, inpath, outpath = argv[1:]
|
||||||
|
return decryptBook(keypath, inpath, outpath)
|
||||||
|
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
if AES is None:
|
if AES is None:
|
||||||
@@ -317,6 +329,7 @@ def gui_main():
|
|||||||
root.mainloop()
|
root.mainloop()
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
sys.exit(cli_main())
|
sys.exit(cli_main())
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
|
|
||||||
# ignoblekeygen.pyw, version 2
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ignoblekeygen.pyw, version 2.3
|
||||||
|
|
||||||
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
||||||
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
||||||
@@ -11,13 +13,13 @@
|
|||||||
# 1 - Initial release
|
# 1 - Initial release
|
||||||
# 2 - Add OS X support by using OpenSSL when available (taken/modified from ineptepub v5)
|
# 2 - Add OS X support by using OpenSSL when available (taken/modified from ineptepub v5)
|
||||||
# 2.1 - Allow Windows versions of libcrypto to be found
|
# 2.1 - Allow Windows versions of libcrypto to be found
|
||||||
|
# 2.2 - On Windows try PyCrypto first and then OpenSSL next
|
||||||
|
# 2.3 - Modify interface to allow use of import
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Generate Barnes & Noble EPUB user key from name and credit card number.
|
Generate Barnes & Noble EPUB user key from name and credit card number.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -102,11 +104,12 @@ def _load_crypto_pycrypto():
|
|||||||
|
|
||||||
return AES
|
return AES
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
AES = None
|
AES = None
|
||||||
for loader in (_load_crypto_libcrypto, _load_crypto_pycrypto):
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
try:
|
try:
|
||||||
AES = loader()
|
AES = loader()
|
||||||
break
|
break
|
||||||
@@ -119,6 +122,7 @@ AES = _load_crypto()
|
|||||||
def normalize_name(name):
|
def normalize_name(name):
|
||||||
return ''.join(x for x in name.lower() if x != ' ')
|
return ''.join(x for x in name.lower() if x != ' ')
|
||||||
|
|
||||||
|
|
||||||
def generate_keyfile(name, ccn, outpath):
|
def generate_keyfile(name, ccn, outpath):
|
||||||
name = normalize_name(name) + '\x00'
|
name = normalize_name(name) + '\x00'
|
||||||
ccn = ccn + '\x00'
|
ccn = ccn + '\x00'
|
||||||
@@ -132,19 +136,6 @@ def generate_keyfile(name, ccn, outpath):
|
|||||||
f.write(userkey.encode('base64'))
|
f.write(userkey.encode('base64'))
|
||||||
return userkey
|
return userkey
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
if AES is None:
|
|
||||||
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
|
||||||
"separately. Read the top-of-script comment for details." % \
|
|
||||||
(progname,)
|
|
||||||
return 1
|
|
||||||
if len(argv) != 4:
|
|
||||||
print "usage: %s NAME CC# OUTFILE" % (progname,)
|
|
||||||
return 1
|
|
||||||
name, ccn, outpath = argv[1:]
|
|
||||||
generate_keyfile(name, ccn, outpath)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
@@ -210,6 +201,22 @@ class DecryptionDialog(Tkinter.Frame):
|
|||||||
return
|
return
|
||||||
self.status['text'] = 'Keyfile successfully generated'
|
self.status['text'] = 'Keyfile successfully generated'
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if AES is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
||||||
|
"separately. Read the top-of-script comment for details." % \
|
||||||
|
(progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s NAME CC# OUTFILE" % (progname,)
|
||||||
|
return 1
|
||||||
|
name, ccn, outpath = argv[1:]
|
||||||
|
generate_keyfile(name, ccn, outpath)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
if AES is None:
|
if AES is None:
|
||||||
|
|||||||
119
Calibre_Plugins/K4MobiDeDRM_plugin/__init__.py
Normal file
119
Calibre_Plugins/K4MobiDeDRM_plugin/__init__.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
from calibre.customize import FileTypePlugin
|
||||||
|
from calibre.gui2 import is_ok_to_use_qt
|
||||||
|
# from calibre.ptempfile import PersistentTemporaryDirectory
|
||||||
|
|
||||||
|
from calibre_plugins.k4mobidedrm import kgenpids
|
||||||
|
from calibre_plugins.k4mobidedrm import topazextract
|
||||||
|
from calibre_plugins.k4mobidedrm import mobidedrm
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
class K4DeDRM(FileTypePlugin):
|
||||||
|
name = 'K4PC, K4Mac, Kindle Mobi and Topaz DeDRM' # Name of the plugin
|
||||||
|
description = 'Removes DRM from Mobipocket, Kindle/Mobi, Kindle/Topaz and Kindle/Print Replica files. Provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc.'
|
||||||
|
supported_platforms = ['osx', 'windows', 'linux'] # Platforms this plugin will run on
|
||||||
|
author = 'DiapDealer, SomeUpdates' # The author of this plugin
|
||||||
|
version = (0, 3, 8) # The version number of this plugin
|
||||||
|
file_types = set(['prc','mobi','azw','azw1','azw4','tpz']) # The file types that this plugin will be applied to
|
||||||
|
on_import = True # Run this plugin during the import
|
||||||
|
priority = 210 # run this plugin before mobidedrm, k4pcdedrm, k4dedrm
|
||||||
|
minimum_calibre_version = (0, 7, 55)
|
||||||
|
|
||||||
|
def run(self, path_to_ebook):
|
||||||
|
plug_ver = '.'.join(str(self.version).strip('()').replace(' ', '').split(','))
|
||||||
|
k4 = True
|
||||||
|
if sys.platform.startswith('linux'):
|
||||||
|
k4 = False
|
||||||
|
pids = []
|
||||||
|
serials = []
|
||||||
|
kInfoFiles = []
|
||||||
|
# Get supplied list of PIDs to try from plugin customization.
|
||||||
|
customvalues = self.site_customization.split(',')
|
||||||
|
for customvalue in customvalues:
|
||||||
|
customvalue = str(customvalue)
|
||||||
|
customvalue = customvalue.strip()
|
||||||
|
if len(customvalue) == 10 or len(customvalue) == 8:
|
||||||
|
pids.append(customvalue)
|
||||||
|
else :
|
||||||
|
if len(customvalue) == 16 and customvalue[0] == 'B':
|
||||||
|
serials.append(customvalue)
|
||||||
|
else:
|
||||||
|
print "%s is not a valid Kindle serial number or PID." % str(customvalue)
|
||||||
|
|
||||||
|
# Load any kindle info files (*.info) included Calibre's config directory.
|
||||||
|
try:
|
||||||
|
# Find Calibre's configuration directory.
|
||||||
|
confpath = os.path.split(os.path.split(self.plugin_path)[0])[0]
|
||||||
|
print 'K4MobiDeDRM v%s: Calibre configuration directory = %s' % (plug_ver, confpath)
|
||||||
|
files = os.listdir(confpath)
|
||||||
|
filefilter = re.compile("\.info$|\.kinf$", re.IGNORECASE)
|
||||||
|
files = filter(filefilter.search, files)
|
||||||
|
if files:
|
||||||
|
for filename in files:
|
||||||
|
fpath = os.path.join(confpath, filename)
|
||||||
|
kInfoFiles.append(fpath)
|
||||||
|
print 'K4MobiDeDRM v%s: Kindle info/kinf file %s found in config folder.' % (plug_ver, filename)
|
||||||
|
except IOError:
|
||||||
|
print 'K4MobiDeDRM v%s: Error reading kindle info/kinf files from config directory.' % plug_ver
|
||||||
|
pass
|
||||||
|
|
||||||
|
mobi = True
|
||||||
|
magic3 = file(path_to_ebook,'rb').read(3)
|
||||||
|
if magic3 == 'TPZ':
|
||||||
|
mobi = False
|
||||||
|
|
||||||
|
bookname = os.path.splitext(os.path.basename(path_to_ebook))[0]
|
||||||
|
|
||||||
|
if mobi:
|
||||||
|
mb = mobidedrm.MobiBook(path_to_ebook)
|
||||||
|
else:
|
||||||
|
mb = topazextract.TopazBook(path_to_ebook)
|
||||||
|
|
||||||
|
title = mb.getBookTitle()
|
||||||
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
|
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
||||||
|
|
||||||
|
try:
|
||||||
|
mb.processBook(pidlst)
|
||||||
|
|
||||||
|
except mobidedrm.DrmException, e:
|
||||||
|
#if you reached here then no luck raise and exception
|
||||||
|
if is_ok_to_use_qt():
|
||||||
|
from PyQt4.Qt import QMessageBox
|
||||||
|
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM v%s Plugin" % plug_ver, "Error: " + str(e) + "... %s\n" % path_to_ebook)
|
||||||
|
d.show()
|
||||||
|
d.raise_()
|
||||||
|
d.exec_()
|
||||||
|
raise Exception("K4MobiDeDRM plugin v%s Error: %s" % (plug_ver, str(e)))
|
||||||
|
except topazextract.TpzDRMError, e:
|
||||||
|
#if you reached here then no luck raise and exception
|
||||||
|
if is_ok_to_use_qt():
|
||||||
|
from PyQt4.Qt import QMessageBox
|
||||||
|
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM v%s Plugin" % plug_ver, "Error: " + str(e) + "... %s\n" % path_to_ebook)
|
||||||
|
d.show()
|
||||||
|
d.raise_()
|
||||||
|
d.exec_()
|
||||||
|
raise Exception("K4MobiDeDRM plugin v%s Error: %s" % (plug_ver, str(e)))
|
||||||
|
|
||||||
|
print "Success!"
|
||||||
|
if mobi:
|
||||||
|
if mb.getPrintReplica():
|
||||||
|
of = self.temporary_file(bookname+'.azw4')
|
||||||
|
print 'K4MobiDeDRM v%s: Print Replica format detected.' % plug_ver
|
||||||
|
else:
|
||||||
|
of = self.temporary_file(bookname+'.mobi')
|
||||||
|
mb.getMobiFile(of.name)
|
||||||
|
else:
|
||||||
|
of = self.temporary_file(bookname+'.htmlz')
|
||||||
|
mb.getHTMLZip(of.name)
|
||||||
|
mb.cleanup()
|
||||||
|
return of.name
|
||||||
|
|
||||||
|
def customization_help(self, gui=False):
|
||||||
|
return 'Enter 10 character PIDs and/or Kindle serial numbers, use a comma (no spaces) to separate each PID or SerialNumber from the next.'
|
||||||
@@ -20,6 +20,8 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# Get a 7 bit encoded number from string. The most
|
# Get a 7 bit encoded number from string. The most
|
||||||
# significant byte comes first and has the high bit (8th) set
|
# significant byte comes first and has the high bit (8th) set
|
||||||
@@ -138,7 +140,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside of string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
@@ -235,6 +238,7 @@ class PageParser(object):
|
|||||||
|
|
||||||
'group' : (1, 'snippets', 1, 0),
|
'group' : (1, 'snippets', 1, 0),
|
||||||
'group.type' : (1, 'scalar_text', 0, 0),
|
'group.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'group._tag' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
'region' : (1, 'snippets', 1, 0),
|
'region' : (1, 'snippets', 1, 0),
|
||||||
'region.type' : (1, 'scalar_text', 0, 0),
|
'region.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -257,6 +261,11 @@ class PageParser(object):
|
|||||||
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
||||||
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
|
||||||
'word_semantic' : (1, 'snippets', 1, 1),
|
'word_semantic' : (1, 'snippets', 1, 1),
|
||||||
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -271,11 +280,17 @@ class PageParser(object):
|
|||||||
|
|
||||||
'_span' : (1, 'snippets', 1, 0),
|
'_span' : (1, 'snippets', 1, 0),
|
||||||
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'-span.lastWord' : (1, 'scalar_number', 0, 0),
|
'_span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'span' : (1, 'snippets', 1, 0),
|
'span' : (1, 'snippets', 1, 0),
|
||||||
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'extratokens' : (1, 'snippets', 1, 0),
|
'extratokens' : (1, 'snippets', 1, 0),
|
||||||
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -730,6 +745,19 @@ class PageParser(object):
|
|||||||
return xmlpage
|
return xmlpage
|
||||||
|
|
||||||
|
|
||||||
|
def fromData(dict, fname):
|
||||||
|
flat_xml = True
|
||||||
|
debug = False
|
||||||
|
pp = PageParser(fname, dict, debug, flat_xml)
|
||||||
|
xmlpage = pp.process()
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
|
def getXML(dict, fname):
|
||||||
|
flat_xml = False
|
||||||
|
debug = False
|
||||||
|
pp = PageParser(fname, dict, debug, flat_xml)
|
||||||
|
xmlpage = pp.process()
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
print 'Usage: '
|
print 'Usage: '
|
||||||
@@ -12,15 +12,14 @@ from struct import unpack
|
|||||||
|
|
||||||
|
|
||||||
class DocParser(object):
|
class DocParser(object):
|
||||||
def __init__(self, flatxml, classlst, fileid, bookDir, fixedimage):
|
def __init__(self, flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
||||||
self.id = os.path.basename(fileid).replace('.dat','')
|
self.id = os.path.basename(fileid).replace('.dat','')
|
||||||
self.svgcount = 0
|
self.svgcount = 0
|
||||||
self.docList = flatxml.split('\n')
|
self.docList = flatxml.split('\n')
|
||||||
self.docSize = len(self.docList)
|
self.docSize = len(self.docList)
|
||||||
self.classList = {}
|
self.classList = {}
|
||||||
self.bookDir = bookDir
|
self.bookDir = bookDir
|
||||||
self.glyphPaths = { }
|
self.gdict = gdict
|
||||||
self.numPaths = 0
|
|
||||||
tmpList = classlst.split('\n')
|
tmpList = classlst.split('\n')
|
||||||
for pclass in tmpList:
|
for pclass in tmpList:
|
||||||
if pclass != '':
|
if pclass != '':
|
||||||
@@ -41,9 +40,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
def getGlyph(self, gid):
|
def getGlyph(self, gid):
|
||||||
result = ''
|
result = ''
|
||||||
id='gl%d' % gid
|
id='id="gl%d"' % gid
|
||||||
return self.glyphPaths[id]
|
return self.gdict.lookup(id)
|
||||||
|
|
||||||
|
|
||||||
def glyphs_to_image(self, glyphList):
|
def glyphs_to_image(self, glyphList):
|
||||||
|
|
||||||
@@ -52,31 +50,12 @@ class DocParser(object):
|
|||||||
e = path.find(' ',b)
|
e = path.find(' ',b)
|
||||||
return int(path[b:e])
|
return int(path[b:e])
|
||||||
|
|
||||||
def extractID(path, key):
|
|
||||||
b = path.find(key) + len(key)
|
|
||||||
e = path.find('"',b)
|
|
||||||
return path[b:e]
|
|
||||||
|
|
||||||
|
|
||||||
svgDir = os.path.join(self.bookDir,'svg')
|
svgDir = os.path.join(self.bookDir,'svg')
|
||||||
glyfile = os.path.join(svgDir,'glyphs.svg')
|
|
||||||
|
|
||||||
imgDir = os.path.join(self.bookDir,'img')
|
imgDir = os.path.join(self.bookDir,'img')
|
||||||
imgname = self.id + '_%04d.svg' % self.svgcount
|
imgname = self.id + '_%04d.svg' % self.svgcount
|
||||||
imgfile = os.path.join(imgDir,imgname)
|
imgfile = os.path.join(imgDir,imgname)
|
||||||
|
|
||||||
# build hashtable of glyph paths keyed by glyph id
|
|
||||||
if self.numPaths == 0:
|
|
||||||
gfile = open(glyfile, 'r')
|
|
||||||
while True:
|
|
||||||
path = gfile.readline()
|
|
||||||
if (path == ''): break
|
|
||||||
glyphid = extractID(path,'id="')
|
|
||||||
self.glyphPaths[glyphid] = path
|
|
||||||
self.numPaths += 1
|
|
||||||
gfile.close()
|
|
||||||
|
|
||||||
|
|
||||||
# get glyph information
|
# get glyph information
|
||||||
gxList = self.getData('info.glyph.x',0,-1)
|
gxList = self.getData('info.glyph.x',0,-1)
|
||||||
gyList = self.getData('info.glyph.y',0,-1)
|
gyList = self.getData('info.glyph.y',0,-1)
|
||||||
@@ -89,7 +68,7 @@ class DocParser(object):
|
|||||||
ys = []
|
ys = []
|
||||||
gdefs = []
|
gdefs = []
|
||||||
|
|
||||||
# get path defintions, positions, dimensions for ecah glyph
|
# get path defintions, positions, dimensions for each glyph
|
||||||
# that makes up the image, and find min x and min y to reposition origin
|
# that makes up the image, and find min x and min y to reposition origin
|
||||||
minx = -1
|
minx = -1
|
||||||
miny = -1
|
miny = -1
|
||||||
@@ -292,6 +271,9 @@ class DocParser(object):
|
|||||||
|
|
||||||
pclass = self.getClass(pclass)
|
pclass = self.getClass(pclass)
|
||||||
|
|
||||||
|
# if paragraph uses extratokens (extra glyphs) then make it fixed
|
||||||
|
(pos, extraglyphs) = self.findinDoc('paragraph.extratokens',start,end)
|
||||||
|
|
||||||
# build up a description of the paragraph in result and return it
|
# build up a description of the paragraph in result and return it
|
||||||
# first check for the basic - all words paragraph
|
# first check for the basic - all words paragraph
|
||||||
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
||||||
@@ -301,6 +283,7 @@ class DocParser(object):
|
|||||||
last = int(slast)
|
last = int(slast)
|
||||||
|
|
||||||
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
||||||
|
makeImage = makeImage or (extraglyphs != None)
|
||||||
if self.fixedimage:
|
if self.fixedimage:
|
||||||
makeImage = makeImage or (regtype == 'fixed')
|
makeImage = makeImage or (regtype == 'fixed')
|
||||||
|
|
||||||
@@ -326,6 +309,15 @@ class DocParser(object):
|
|||||||
lastGlyph = firstglyphList[last]
|
lastGlyph = firstglyphList[last]
|
||||||
else :
|
else :
|
||||||
lastGlyph = len(gidList)
|
lastGlyph = len(gidList)
|
||||||
|
|
||||||
|
# handle case of white sapce paragraphs with no actual glyphs in them
|
||||||
|
# by reverting to text based paragraph
|
||||||
|
if firstGlyph >= lastGlyph:
|
||||||
|
# revert to standard text based paragraph
|
||||||
|
for wordnum in xrange(first, last):
|
||||||
|
result.append(('ocr', wordnum))
|
||||||
|
return pclass, result
|
||||||
|
|
||||||
for glyphnum in xrange(firstGlyph, lastGlyph):
|
for glyphnum in xrange(firstGlyph, lastGlyph):
|
||||||
glyphList.append(glyphnum)
|
glyphList.append(glyphnum)
|
||||||
# include any extratokens if they exist
|
# include any extratokens if they exist
|
||||||
@@ -365,6 +357,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
word_class = ''
|
word_class = ''
|
||||||
|
|
||||||
|
word_semantic_type = ''
|
||||||
|
|
||||||
while (line < end) :
|
while (line < end) :
|
||||||
|
|
||||||
(name, argres) = self.lineinDoc(line)
|
(name, argres) = self.lineinDoc(line)
|
||||||
@@ -524,6 +518,72 @@ class DocParser(object):
|
|||||||
return parares
|
return parares
|
||||||
|
|
||||||
|
|
||||||
|
def buildTOCEntry(self, pdesc) :
|
||||||
|
parares = ''
|
||||||
|
sep =''
|
||||||
|
tocentry = ''
|
||||||
|
handle_links = len(self.link_id) > 0
|
||||||
|
|
||||||
|
lstart = 0
|
||||||
|
|
||||||
|
cnt = len(pdesc)
|
||||||
|
for j in xrange( 0, cnt) :
|
||||||
|
|
||||||
|
(wtype, num) = pdesc[j]
|
||||||
|
|
||||||
|
if wtype == 'ocr' :
|
||||||
|
word = self.ocrtext[num]
|
||||||
|
sep = ' '
|
||||||
|
|
||||||
|
if handle_links:
|
||||||
|
link = self.link_id[num]
|
||||||
|
if (link > 0):
|
||||||
|
linktype = self.link_type[link-1]
|
||||||
|
title = self.link_title[link-1]
|
||||||
|
title = title.rstrip('. ')
|
||||||
|
alt_title = parares[lstart:]
|
||||||
|
alt_title = alt_title.strip()
|
||||||
|
# now strip off the actual printed page number
|
||||||
|
alt_title = alt_title.rstrip('01234567890ivxldIVXLD-.')
|
||||||
|
alt_title = alt_title.rstrip('. ')
|
||||||
|
# skip over any external links - can't have them in a books toc
|
||||||
|
if linktype == 'external' :
|
||||||
|
title = ''
|
||||||
|
alt_title = ''
|
||||||
|
linkpage = ''
|
||||||
|
else :
|
||||||
|
if len(self.link_page) >= link :
|
||||||
|
ptarget = self.link_page[link-1] - 1
|
||||||
|
linkpage = '%04d' % ptarget
|
||||||
|
else :
|
||||||
|
# just link to the current page
|
||||||
|
linkpage = self.id[4:]
|
||||||
|
if len(alt_title) >= len(title):
|
||||||
|
title = alt_title
|
||||||
|
if title != '' and linkpage != '':
|
||||||
|
tocentry += title + '|' + linkpage + '\n'
|
||||||
|
lstart = len(parares)
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
elif (link < 0) :
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
|
||||||
|
if word == '_lb_':
|
||||||
|
word = ''
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
if num in self.dehyphen_rootid :
|
||||||
|
word = word[0:-1]
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
parares += word + sep
|
||||||
|
|
||||||
|
else :
|
||||||
|
continue
|
||||||
|
|
||||||
|
return tocentry
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# walk the document tree collecting the information needed
|
# walk the document tree collecting the information needed
|
||||||
# to build an html page using the ocrText
|
# to build an html page using the ocrText
|
||||||
@@ -531,6 +591,7 @@ class DocParser(object):
|
|||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
htmlpage = ''
|
htmlpage = ''
|
||||||
|
tocinfo = ''
|
||||||
|
|
||||||
# get the ocr text
|
# get the ocr text
|
||||||
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
||||||
@@ -656,9 +717,9 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
|
tocinfo += self.buildTOCEntry(pdesc)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
||||||
|
|
||||||
|
|
||||||
elif (regtype == 'vertical') or (regtype == 'table') :
|
elif (regtype == 'vertical') or (regtype == 'table') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
if inGroup:
|
if inGroup:
|
||||||
@@ -716,15 +777,11 @@ class DocParser(object):
|
|||||||
htmlpage = htmlpage[0:-4]
|
htmlpage = htmlpage[0:-4]
|
||||||
last_para_continued = False
|
last_para_continued = False
|
||||||
|
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|
||||||
|
|
||||||
|
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
||||||
def convert2HTML(flatxml, classlst, fileid, bookDir, fixedimage):
|
|
||||||
|
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, classlst, fileid, bookDir, fixedimage)
|
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
||||||
|
htmlpage, tocinfo = dp.process()
|
||||||
htmlpage = dp.process()
|
return htmlpage, tocinfo
|
||||||
|
|
||||||
return htmlpage
|
|
||||||
250
Calibre_Plugins/K4MobiDeDRM_plugin/flatxml2svg.py
Normal file
250
Calibre_Plugins/K4MobiDeDRM_plugin/flatxml2svg.py
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import csv
|
||||||
|
import os
|
||||||
|
import getopt
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
|
||||||
|
|
||||||
|
class PParser(object):
|
||||||
|
def __init__(self, gd, flatxml, meta_array):
|
||||||
|
self.gd = gd
|
||||||
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
self.docSize = len(self.flatdoc)
|
||||||
|
self.temp = []
|
||||||
|
|
||||||
|
self.ph = -1
|
||||||
|
self.pw = -1
|
||||||
|
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.ph = max(self.ph, int(argres))
|
||||||
|
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.pw = max(self.pw, int(argres))
|
||||||
|
|
||||||
|
if self.ph <= 0:
|
||||||
|
self.ph = int(meta_array.get('pageHeight', '11000'))
|
||||||
|
if self.pw <= 0:
|
||||||
|
self.pw = int(meta_array.get('pageWidth', '8500'))
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.x')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.x', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gx = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.y')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.y', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gy = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.glyphID')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.glyphID', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gid = res
|
||||||
|
|
||||||
|
|
||||||
|
# return tag at line pos in document
|
||||||
|
def lineinDoc(self, pos) :
|
||||||
|
if (pos >= 0) and (pos < self.docSize) :
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
return name, argres
|
||||||
|
|
||||||
|
# find tag in doc if within pos to end inclusive
|
||||||
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
|
result = None
|
||||||
|
if end == -1 :
|
||||||
|
end = self.docSize
|
||||||
|
else:
|
||||||
|
end = min(self.docSize, end)
|
||||||
|
foundat = -1
|
||||||
|
for j in xrange(pos, end):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
if name.endswith(tagpath) :
|
||||||
|
result = argres
|
||||||
|
foundat = j
|
||||||
|
break
|
||||||
|
return foundat, result
|
||||||
|
|
||||||
|
# return list of start positions for the tagpath
|
||||||
|
def posinDoc(self, tagpath):
|
||||||
|
startpos = []
|
||||||
|
pos = 0
|
||||||
|
res = ""
|
||||||
|
while res != None :
|
||||||
|
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
|
||||||
|
if res != None :
|
||||||
|
startpos.append(foundpos)
|
||||||
|
pos = foundpos + 1
|
||||||
|
return startpos
|
||||||
|
|
||||||
|
def getData(self, path):
|
||||||
|
result = None
|
||||||
|
cnt = len(self.flatdoc)
|
||||||
|
for j in xrange(cnt):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
break
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getDataatPos(self, path, pos):
|
||||||
|
result = None
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getDataTemp(self, path):
|
||||||
|
result = None
|
||||||
|
cnt = len(self.temp)
|
||||||
|
for j in xrange(cnt):
|
||||||
|
item = self.temp[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
self.temp.pop(j)
|
||||||
|
break
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getImages(self):
|
||||||
|
result = []
|
||||||
|
self.temp = self.flatdoc
|
||||||
|
while (self.getDataTemp('img') != None):
|
||||||
|
h = self.getDataTemp('img.h')[0]
|
||||||
|
w = self.getDataTemp('img.w')[0]
|
||||||
|
x = self.getDataTemp('img.x')[0]
|
||||||
|
y = self.getDataTemp('img.y')[0]
|
||||||
|
src = self.getDataTemp('img.src')[0]
|
||||||
|
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getGlyphs(self):
|
||||||
|
result = []
|
||||||
|
if (self.gid != None) and (len(self.gid) > 0):
|
||||||
|
glyphs = []
|
||||||
|
for j in set(self.gid):
|
||||||
|
glyphs.append(j)
|
||||||
|
glyphs.sort()
|
||||||
|
for gid in glyphs:
|
||||||
|
id='id="gl%d"' % gid
|
||||||
|
path = self.gd.lookup(id)
|
||||||
|
if path:
|
||||||
|
result.append(id + ' ' + path)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
|
||||||
|
ml = ''
|
||||||
|
pp = PParser(gdict, flat_xml, meta_array)
|
||||||
|
ml += '<?xml version="1.0" standalone="no"?>\n'
|
||||||
|
if (raw):
|
||||||
|
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
|
||||||
|
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1)
|
||||||
|
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors'])
|
||||||
|
else:
|
||||||
|
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
||||||
|
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n'
|
||||||
|
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors'])
|
||||||
|
ml += '<script><![CDATA[\n'
|
||||||
|
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n'
|
||||||
|
ml += 'var dpi=%d;\n' % scaledpi
|
||||||
|
if (previd) :
|
||||||
|
ml += 'var prevpage="page%04d.xhtml";\n' % (previd)
|
||||||
|
if (nextid) :
|
||||||
|
ml += 'var nextpage="page%04d.xhtml";\n' % (nextid)
|
||||||
|
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph)
|
||||||
|
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n'
|
||||||
|
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n'
|
||||||
|
ml += 'function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n'
|
||||||
|
ml += 'function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n'
|
||||||
|
ml += 'function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n'
|
||||||
|
ml += 'var gt=gd();if(gt>0){dpi=gt;}\n'
|
||||||
|
ml += 'window.onload=setsize;\n'
|
||||||
|
ml += ']]></script>\n'
|
||||||
|
ml += '</head>\n'
|
||||||
|
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n'
|
||||||
|
ml += '<div style="white-space:nowrap;">\n'
|
||||||
|
if previd == None:
|
||||||
|
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
||||||
|
else:
|
||||||
|
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n'
|
||||||
|
|
||||||
|
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph)
|
||||||
|
if (pp.gid != None):
|
||||||
|
ml += '<defs>\n'
|
||||||
|
gdefs = pp.getGlyphs()
|
||||||
|
for j in xrange(0,len(gdefs)):
|
||||||
|
ml += gdefs[j]
|
||||||
|
ml += '</defs>\n'
|
||||||
|
img = pp.getImages()
|
||||||
|
if (img != None):
|
||||||
|
for j in xrange(0,len(img)):
|
||||||
|
ml += img[j]
|
||||||
|
if (pp.gid != None):
|
||||||
|
for j in xrange(0,len(pp.gid)):
|
||||||
|
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j])
|
||||||
|
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
||||||
|
xpos = "%d" % (pp.pw // 3)
|
||||||
|
ypos = "%d" % (pp.ph // 3)
|
||||||
|
ml += '<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n'
|
||||||
|
if (raw) :
|
||||||
|
ml += '</svg>'
|
||||||
|
else :
|
||||||
|
ml += '</svg></a>\n'
|
||||||
|
if nextid == None:
|
||||||
|
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
||||||
|
else :
|
||||||
|
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n'
|
||||||
|
ml += '</div>\n'
|
||||||
|
ml += '<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n'
|
||||||
|
ml += '</body>\n'
|
||||||
|
ml += '</html>\n'
|
||||||
|
return ml
|
||||||
|
|
||||||
686
Calibre_Plugins/K4MobiDeDRM_plugin/genbook.py
Normal file
686
Calibre_Plugins/K4MobiDeDRM_plugin/genbook.py
Normal file
@@ -0,0 +1,686 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
|
||||||
|
import csv
|
||||||
|
import os
|
||||||
|
import getopt
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# local support routines
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre :
|
||||||
|
from calibre_plugins.k4mobidedrm import convert2xml
|
||||||
|
from calibre_plugins.k4mobidedrm import flatxml2html
|
||||||
|
from calibre_plugins.k4mobidedrm import flatxml2svg
|
||||||
|
from calibre_plugins.k4mobidedrm import stylexml2css
|
||||||
|
else :
|
||||||
|
import convert2xml
|
||||||
|
import flatxml2html
|
||||||
|
import flatxml2svg
|
||||||
|
import stylexml2css
|
||||||
|
|
||||||
|
|
||||||
|
# Get a 7 bit encoded number from a file
|
||||||
|
def readEncodedNumber(file):
|
||||||
|
flag = False
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
if data == 0xFF:
|
||||||
|
flag = True
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
if data >= 0x80:
|
||||||
|
datax = (data & 0x7F)
|
||||||
|
while data >= 0x80 :
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
|
data = datax
|
||||||
|
if flag:
|
||||||
|
data = -data
|
||||||
|
return data
|
||||||
|
|
||||||
|
# Get a length prefixed string from the file
|
||||||
|
def lengthPrefixString(data):
|
||||||
|
return encodeNumber(len(data))+data
|
||||||
|
|
||||||
|
def readString(file):
|
||||||
|
stringLength = readEncodedNumber(file)
|
||||||
|
if (stringLength == None):
|
||||||
|
return None
|
||||||
|
sv = file.read(stringLength)
|
||||||
|
if (len(sv) != stringLength):
|
||||||
|
return ""
|
||||||
|
return unpack(str(stringLength)+"s",sv)[0]
|
||||||
|
|
||||||
|
def getMetaArray(metaFile):
|
||||||
|
# parse the meta file
|
||||||
|
result = {}
|
||||||
|
fo = file(metaFile,'rb')
|
||||||
|
size = readEncodedNumber(fo)
|
||||||
|
for i in xrange(size):
|
||||||
|
tag = readString(fo)
|
||||||
|
value = readString(fo)
|
||||||
|
result[tag] = value
|
||||||
|
# print tag, value
|
||||||
|
fo.close()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# dictionary of all text strings by index value
|
||||||
|
class Dictionary(object):
|
||||||
|
def __init__(self, dictFile):
|
||||||
|
self.filename = dictFile
|
||||||
|
self.size = 0
|
||||||
|
self.fo = file(dictFile,'rb')
|
||||||
|
self.stable = []
|
||||||
|
self.size = readEncodedNumber(self.fo)
|
||||||
|
for i in xrange(self.size):
|
||||||
|
self.stable.append(self.escapestr(readString(self.fo)))
|
||||||
|
self.pos = 0
|
||||||
|
def escapestr(self, str):
|
||||||
|
str = str.replace('&','&')
|
||||||
|
str = str.replace('<','<')
|
||||||
|
str = str.replace('>','>')
|
||||||
|
str = str.replace('=','=')
|
||||||
|
return str
|
||||||
|
def lookup(self,val):
|
||||||
|
if ((val >= 0) and (val < self.size)) :
|
||||||
|
self.pos = val
|
||||||
|
return self.stable[self.pos]
|
||||||
|
else:
|
||||||
|
print "Error - %d outside of string table limits" % val
|
||||||
|
raise TpzDRMError('outside or string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
def getSize(self):
|
||||||
|
return self.size
|
||||||
|
def getPos(self):
|
||||||
|
return self.pos
|
||||||
|
|
||||||
|
|
||||||
|
class PageDimParser(object):
|
||||||
|
def __init__(self, flatxml):
|
||||||
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
# find tag if within pos to end inclusive
|
||||||
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
|
result = None
|
||||||
|
docList = self.flatdoc
|
||||||
|
cnt = len(docList)
|
||||||
|
if end == -1 :
|
||||||
|
end = cnt
|
||||||
|
else:
|
||||||
|
end = min(cnt,end)
|
||||||
|
foundat = -1
|
||||||
|
for j in xrange(pos, end):
|
||||||
|
item = docList[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=')
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
if name.endswith(tagpath) :
|
||||||
|
result = argres
|
||||||
|
foundat = j
|
||||||
|
break
|
||||||
|
return foundat, result
|
||||||
|
def process(self):
|
||||||
|
(pos, sph) = self.findinDoc('page.h',0,-1)
|
||||||
|
(pos, spw) = self.findinDoc('page.w',0,-1)
|
||||||
|
if (sph == None): sph = '-1'
|
||||||
|
if (spw == None): spw = '-1'
|
||||||
|
return sph, spw
|
||||||
|
|
||||||
|
def getPageDim(flatxml):
|
||||||
|
# create a document parser
|
||||||
|
dp = PageDimParser(flatxml)
|
||||||
|
(ph, pw) = dp.process()
|
||||||
|
return ph, pw
|
||||||
|
|
||||||
|
class GParser(object):
|
||||||
|
def __init__(self, flatxml):
|
||||||
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
self.dpi = 1440
|
||||||
|
self.gh = self.getData('info.glyph.h')
|
||||||
|
self.gw = self.getData('info.glyph.w')
|
||||||
|
self.guse = self.getData('info.glyph.use')
|
||||||
|
if self.guse :
|
||||||
|
self.count = len(self.guse)
|
||||||
|
else :
|
||||||
|
self.count = 0
|
||||||
|
self.gvtx = self.getData('info.glyph.vtx')
|
||||||
|
self.glen = self.getData('info.glyph.len')
|
||||||
|
self.gdpi = self.getData('info.glyph.dpi')
|
||||||
|
self.vx = self.getData('info.vtx.x')
|
||||||
|
self.vy = self.getData('info.vtx.y')
|
||||||
|
self.vlen = self.getData('info.len.n')
|
||||||
|
if self.vlen :
|
||||||
|
self.glen.append(len(self.vlen))
|
||||||
|
elif self.glen:
|
||||||
|
self.glen.append(0)
|
||||||
|
if self.vx :
|
||||||
|
self.gvtx.append(len(self.vx))
|
||||||
|
elif self.gvtx :
|
||||||
|
self.gvtx.append(0)
|
||||||
|
def getData(self, path):
|
||||||
|
result = None
|
||||||
|
cnt = len(self.flatdoc)
|
||||||
|
for j in xrange(cnt):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (name == path):
|
||||||
|
result = argres
|
||||||
|
break
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
return result
|
||||||
|
def getGlyphDim(self, gly):
|
||||||
|
if self.gdpi[gly] == 0:
|
||||||
|
return 0, 0
|
||||||
|
maxh = (self.gh[gly] * self.dpi) / self.gdpi[gly]
|
||||||
|
maxw = (self.gw[gly] * self.dpi) / self.gdpi[gly]
|
||||||
|
return maxh, maxw
|
||||||
|
def getPath(self, gly):
|
||||||
|
path = ''
|
||||||
|
if (gly < 0) or (gly >= self.count):
|
||||||
|
return path
|
||||||
|
tx = self.vx[self.gvtx[gly]:self.gvtx[gly+1]]
|
||||||
|
ty = self.vy[self.gvtx[gly]:self.gvtx[gly+1]]
|
||||||
|
p = 0
|
||||||
|
for k in xrange(self.glen[gly], self.glen[gly+1]):
|
||||||
|
if (p == 0):
|
||||||
|
zx = tx[0:self.vlen[k]+1]
|
||||||
|
zy = ty[0:self.vlen[k]+1]
|
||||||
|
else:
|
||||||
|
zx = tx[self.vlen[k-1]+1:self.vlen[k]+1]
|
||||||
|
zy = ty[self.vlen[k-1]+1:self.vlen[k]+1]
|
||||||
|
p += 1
|
||||||
|
j = 0
|
||||||
|
while ( j < len(zx) ):
|
||||||
|
if (j == 0):
|
||||||
|
# Start Position.
|
||||||
|
path += 'M %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly])
|
||||||
|
elif (j <= len(zx)-3):
|
||||||
|
# Cubic Bezier Curve
|
||||||
|
path += 'C %d %d %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[j+1] * self.dpi / self.gdpi[gly], zy[j+1] * self.dpi / self.gdpi[gly], zx[j+2] * self.dpi / self.gdpi[gly], zy[j+2] * self.dpi / self.gdpi[gly])
|
||||||
|
j += 2
|
||||||
|
elif (j == len(zx)-2):
|
||||||
|
# Cubic Bezier Curve to Start Position
|
||||||
|
path += 'C %d %d %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[j+1] * self.dpi / self.gdpi[gly], zy[j+1] * self.dpi / self.gdpi[gly], zx[0] * self.dpi / self.gdpi[gly], zy[0] * self.dpi / self.gdpi[gly])
|
||||||
|
j += 1
|
||||||
|
elif (j == len(zx)-1):
|
||||||
|
# Quadratic Bezier Curve to Start Position
|
||||||
|
path += 'Q %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[0] * self.dpi / self.gdpi[gly], zy[0] * self.dpi / self.gdpi[gly])
|
||||||
|
|
||||||
|
j += 1
|
||||||
|
path += 'z'
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# dictionary of all text strings by index value
|
||||||
|
class GlyphDict(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.gdict = {}
|
||||||
|
def lookup(self, id):
|
||||||
|
# id='id="gl%d"' % val
|
||||||
|
if id in self.gdict:
|
||||||
|
return self.gdict[id]
|
||||||
|
return None
|
||||||
|
def addGlyph(self, val, path):
|
||||||
|
id='id="gl%d"' % val
|
||||||
|
self.gdict[id] = path
|
||||||
|
|
||||||
|
|
||||||
|
def generateBook(bookDir, raw, fixedimage):
|
||||||
|
# sanity check Topaz file extraction
|
||||||
|
if not os.path.exists(bookDir) :
|
||||||
|
print "Can not find directory with unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
dictFile = os.path.join(bookDir,'dict0000.dat')
|
||||||
|
if not os.path.exists(dictFile) :
|
||||||
|
print "Can not find dict0000.dat file"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
pageDir = os.path.join(bookDir,'page')
|
||||||
|
if not os.path.exists(pageDir) :
|
||||||
|
print "Can not find page directory in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
imgDir = os.path.join(bookDir,'img')
|
||||||
|
if not os.path.exists(imgDir) :
|
||||||
|
print "Can not find image directory in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
glyphsDir = os.path.join(bookDir,'glyphs')
|
||||||
|
if not os.path.exists(glyphsDir) :
|
||||||
|
print "Can not find glyphs directory in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
metaFile = os.path.join(bookDir,'metadata0000.dat')
|
||||||
|
if not os.path.exists(metaFile) :
|
||||||
|
print "Can not find metadata0000.dat in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
svgDir = os.path.join(bookDir,'svg')
|
||||||
|
if not os.path.exists(svgDir) :
|
||||||
|
os.makedirs(svgDir)
|
||||||
|
|
||||||
|
xmlDir = os.path.join(bookDir,'xml')
|
||||||
|
if not os.path.exists(xmlDir) :
|
||||||
|
os.makedirs(xmlDir)
|
||||||
|
|
||||||
|
otherFile = os.path.join(bookDir,'other0000.dat')
|
||||||
|
if not os.path.exists(otherFile) :
|
||||||
|
print "Can not find other0000.dat in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
print "Updating to color images if available"
|
||||||
|
spath = os.path.join(bookDir,'color_img')
|
||||||
|
dpath = os.path.join(bookDir,'img')
|
||||||
|
filenames = os.listdir(spath)
|
||||||
|
filenames = sorted(filenames)
|
||||||
|
for filename in filenames:
|
||||||
|
imgname = filename.replace('color','img')
|
||||||
|
sfile = os.path.join(spath,filename)
|
||||||
|
dfile = os.path.join(dpath,imgname)
|
||||||
|
imgdata = file(sfile,'rb').read()
|
||||||
|
file(dfile,'wb').write(imgdata)
|
||||||
|
|
||||||
|
print "Creating cover.jpg"
|
||||||
|
isCover = False
|
||||||
|
cpath = os.path.join(bookDir,'img')
|
||||||
|
cpath = os.path.join(cpath,'img0000.jpg')
|
||||||
|
if os.path.isfile(cpath):
|
||||||
|
cover = file(cpath, 'rb').read()
|
||||||
|
cpath = os.path.join(bookDir,'cover.jpg')
|
||||||
|
file(cpath, 'wb').write(cover)
|
||||||
|
isCover = True
|
||||||
|
|
||||||
|
|
||||||
|
print 'Processing Dictionary'
|
||||||
|
dict = Dictionary(dictFile)
|
||||||
|
|
||||||
|
print 'Processing Meta Data and creating OPF'
|
||||||
|
meta_array = getMetaArray(metaFile)
|
||||||
|
|
||||||
|
# replace special chars in title and authors like & < >
|
||||||
|
title = meta_array.get('Title','No Title Provided')
|
||||||
|
title = title.replace('&','&')
|
||||||
|
title = title.replace('<','<')
|
||||||
|
title = title.replace('>','>')
|
||||||
|
meta_array['Title'] = title
|
||||||
|
authors = meta_array.get('Authors','No Authors Provided')
|
||||||
|
authors = authors.replace('&','&')
|
||||||
|
authors = authors.replace('<','<')
|
||||||
|
authors = authors.replace('>','>')
|
||||||
|
meta_array['Authors'] = authors
|
||||||
|
|
||||||
|
xname = os.path.join(xmlDir, 'metadata.xml')
|
||||||
|
metastr = ''
|
||||||
|
for key in meta_array:
|
||||||
|
metastr += '<meta name="' + key + '" content="' + meta_array[key] + '" />\n'
|
||||||
|
file(xname, 'wb').write(metastr)
|
||||||
|
|
||||||
|
print 'Processing StyleSheet'
|
||||||
|
# get some scaling info from metadata to use while processing styles
|
||||||
|
fontsize = '135'
|
||||||
|
if 'fontSize' in meta_array:
|
||||||
|
fontsize = meta_array['fontSize']
|
||||||
|
|
||||||
|
# also get the size of a normal text page
|
||||||
|
spage = '1'
|
||||||
|
if 'firstTextPage' in meta_array:
|
||||||
|
spage = meta_array['firstTextPage']
|
||||||
|
pnum = int(spage)
|
||||||
|
|
||||||
|
# get page height and width from first text page for use in stylesheet scaling
|
||||||
|
pname = 'page%04d.dat' % (pnum + 1)
|
||||||
|
fname = os.path.join(pageDir,pname)
|
||||||
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
(ph, pw) = getPageDim(flat_xml)
|
||||||
|
if (ph == '-1') or (ph == '0') : ph = '11000'
|
||||||
|
if (pw == '-1') or (pw == '0') : pw = '8500'
|
||||||
|
meta_array['pageHeight'] = ph
|
||||||
|
meta_array['pageWidth'] = pw
|
||||||
|
if 'fontSize' not in meta_array.keys():
|
||||||
|
meta_array['fontSize'] = fontsize
|
||||||
|
|
||||||
|
# process other.dat for css info and for map of page files to svg images
|
||||||
|
# this map is needed because some pages actually are made up of multiple
|
||||||
|
# pageXXXX.xml files
|
||||||
|
xname = os.path.join(bookDir, 'style.css')
|
||||||
|
flat_xml = convert2xml.fromData(dict, otherFile)
|
||||||
|
|
||||||
|
# extract info.original.pid to get original page information
|
||||||
|
pageIDMap = {}
|
||||||
|
pageidnums = stylexml2css.getpageIDMap(flat_xml)
|
||||||
|
if len(pageidnums) == 0:
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
for k in range(numfiles):
|
||||||
|
pageidnums.append(k)
|
||||||
|
# create a map from page ids to list of page file nums to process for that page
|
||||||
|
for i in range(len(pageidnums)):
|
||||||
|
id = pageidnums[i]
|
||||||
|
if id in pageIDMap.keys():
|
||||||
|
pageIDMap[id].append(i)
|
||||||
|
else:
|
||||||
|
pageIDMap[id] = [i]
|
||||||
|
|
||||||
|
# now get the css info
|
||||||
|
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
||||||
|
file(xname, 'wb').write(cssstr)
|
||||||
|
xname = os.path.join(xmlDir, 'other0000.xml')
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
||||||
|
|
||||||
|
print 'Processing Glyphs'
|
||||||
|
gd = GlyphDict()
|
||||||
|
filenames = os.listdir(glyphsDir)
|
||||||
|
filenames = sorted(filenames)
|
||||||
|
glyfname = os.path.join(svgDir,'glyphs.svg')
|
||||||
|
glyfile = open(glyfname, 'w')
|
||||||
|
glyfile.write('<?xml version="1.0" standalone="no"?>\n')
|
||||||
|
glyfile.write('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||||
|
glyfile.write('<svg width="512" height="512" viewBox="0 0 511 511" xmlns="http://www.w3.org/2000/svg" version="1.1">\n')
|
||||||
|
glyfile.write('<title>Glyphs for %s</title>\n' % meta_array['Title'])
|
||||||
|
glyfile.write('<defs>\n')
|
||||||
|
counter = 0
|
||||||
|
for filename in filenames:
|
||||||
|
# print ' ', filename
|
||||||
|
print '.',
|
||||||
|
fname = os.path.join(glyphsDir,filename)
|
||||||
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
|
gp = GParser(flat_xml)
|
||||||
|
for i in xrange(0, gp.count):
|
||||||
|
path = gp.getPath(i)
|
||||||
|
maxh, maxw = gp.getGlyphDim(i)
|
||||||
|
fullpath = '<path id="gl%d" d="%s" fill="black" /><!-- width=%d height=%d -->\n' % (counter * 256 + i, path, maxw, maxh)
|
||||||
|
glyfile.write(fullpath)
|
||||||
|
gd.addGlyph(counter * 256 + i, fullpath)
|
||||||
|
counter += 1
|
||||||
|
glyfile.write('</defs>\n')
|
||||||
|
glyfile.write('</svg>\n')
|
||||||
|
glyfile.close()
|
||||||
|
print " "
|
||||||
|
|
||||||
|
# build up tocentries while processing html
|
||||||
|
tocentries = ''
|
||||||
|
|
||||||
|
# start up the html
|
||||||
|
htmlFileName = "book.html"
|
||||||
|
htmlstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||||
|
htmlstr += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n'
|
||||||
|
htmlstr += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n'
|
||||||
|
htmlstr += '<head>\n'
|
||||||
|
htmlstr += '<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n'
|
||||||
|
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n'
|
||||||
|
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
||||||
|
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
htmlstr += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
htmlstr += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
||||||
|
htmlstr += '<link href="style.css" rel="stylesheet" type="text/css" />\n'
|
||||||
|
htmlstr += '</head>\n<body>\n'
|
||||||
|
|
||||||
|
print 'Processing Pages'
|
||||||
|
# Books are at 1440 DPI. This is rendering at twice that size for
|
||||||
|
# readability when rendering to the screen.
|
||||||
|
scaledpi = 1440.0
|
||||||
|
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
filenames = sorted(filenames)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
|
||||||
|
xmllst = []
|
||||||
|
|
||||||
|
for filename in filenames:
|
||||||
|
# print ' ', filename
|
||||||
|
print ".",
|
||||||
|
fname = os.path.join(pageDir,filename)
|
||||||
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
# keep flat_xml for later svg processing
|
||||||
|
xmllst.append(flat_xml)
|
||||||
|
|
||||||
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
|
# first get the html
|
||||||
|
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
||||||
|
tocentries += tocinfo
|
||||||
|
htmlstr += pagehtml
|
||||||
|
|
||||||
|
# finish up the html string and output it
|
||||||
|
htmlstr += '</body>\n</html>\n'
|
||||||
|
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
print 'Extracting Table of Contents from Amazon OCR'
|
||||||
|
|
||||||
|
# first create a table of contents file for the svg images
|
||||||
|
tochtml = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||||
|
tochtml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
||||||
|
tochtml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >'
|
||||||
|
tochtml += '<head>\n'
|
||||||
|
tochtml += '<title>' + meta_array['Title'] + '</title>\n'
|
||||||
|
tochtml += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
||||||
|
tochtml += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
tochtml += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
tochtml += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
||||||
|
tochtml += '</head>\n'
|
||||||
|
tochtml += '<body>\n'
|
||||||
|
|
||||||
|
tochtml += '<h2>Table of Contents</h2>\n'
|
||||||
|
start = pageidnums[0]
|
||||||
|
if (raw):
|
||||||
|
startname = 'page%04d.svg' % start
|
||||||
|
else:
|
||||||
|
startname = 'page%04d.xhtml' % start
|
||||||
|
|
||||||
|
tochtml += '<h3><a href="' + startname + '">Start of Book</a></h3>\n'
|
||||||
|
# build up a table of contents for the svg xhtml output
|
||||||
|
toclst = tocentries.split('\n')
|
||||||
|
toclst.pop()
|
||||||
|
for entry in toclst:
|
||||||
|
print entry
|
||||||
|
title, pagenum = entry.split('|')
|
||||||
|
id = pageidnums[int(pagenum)]
|
||||||
|
if (raw):
|
||||||
|
fname = 'page%04d.svg' % id
|
||||||
|
else:
|
||||||
|
fname = 'page%04d.xhtml' % id
|
||||||
|
tochtml += '<h3><a href="'+ fname + '">' + title + '</a></h3>\n'
|
||||||
|
tochtml += '</body>\n'
|
||||||
|
tochtml += '</html>\n'
|
||||||
|
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
|
||||||
|
|
||||||
|
|
||||||
|
# now create index_svg.xhtml that points to all required files
|
||||||
|
svgindex = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||||
|
svgindex += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
||||||
|
svgindex += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >'
|
||||||
|
svgindex += '<head>\n'
|
||||||
|
svgindex += '<title>' + meta_array['Title'] + '</title>\n'
|
||||||
|
svgindex += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
||||||
|
svgindex += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
svgindex += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
svgindex += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
||||||
|
svgindex += '</head>\n'
|
||||||
|
svgindex += '<body>\n'
|
||||||
|
|
||||||
|
print "Building svg images of each book page"
|
||||||
|
svgindex += '<h2>List of Pages</h2>\n'
|
||||||
|
svgindex += '<div>\n'
|
||||||
|
idlst = sorted(pageIDMap.keys())
|
||||||
|
numids = len(idlst)
|
||||||
|
cnt = len(idlst)
|
||||||
|
previd = None
|
||||||
|
for j in range(cnt):
|
||||||
|
pageid = idlst[j]
|
||||||
|
if j < cnt - 1:
|
||||||
|
nextid = idlst[j+1]
|
||||||
|
else:
|
||||||
|
nextid = None
|
||||||
|
print '.',
|
||||||
|
pagelst = pageIDMap[pageid]
|
||||||
|
flat_svg = ''
|
||||||
|
for page in pagelst:
|
||||||
|
flat_svg += xmllst[page]
|
||||||
|
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
|
||||||
|
if (raw) :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
|
||||||
|
svgindex += '<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid)
|
||||||
|
else :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
|
||||||
|
svgindex += '<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid)
|
||||||
|
previd = pageid
|
||||||
|
pfile.write(svgxml)
|
||||||
|
pfile.close()
|
||||||
|
counter += 1
|
||||||
|
svgindex += '</div>\n'
|
||||||
|
svgindex += '<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n'
|
||||||
|
svgindex += '</body>\n</html>\n'
|
||||||
|
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
|
||||||
|
# build the opf file
|
||||||
|
opfname = os.path.join(bookDir, 'book.opf')
|
||||||
|
opfstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||||
|
opfstr += '<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n'
|
||||||
|
# adding metadata
|
||||||
|
opfstr += ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n'
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
opfstr += ' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n'
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
opfstr += ' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n'
|
||||||
|
if 'oASIN' in meta_array:
|
||||||
|
opfstr += ' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n'
|
||||||
|
opfstr += ' <dc:title>' + meta_array['Title'] + '</dc:title>\n'
|
||||||
|
opfstr += ' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n'
|
||||||
|
opfstr += ' <dc:language>en</dc:language>\n'
|
||||||
|
opfstr += ' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n'
|
||||||
|
if isCover:
|
||||||
|
opfstr += ' <meta name="cover" content="bookcover"/>\n'
|
||||||
|
opfstr += ' </metadata>\n'
|
||||||
|
opfstr += '<manifest>\n'
|
||||||
|
opfstr += ' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n'
|
||||||
|
opfstr += ' <item id="stylesheet" href="style.css" media-type="text/css"/>\n'
|
||||||
|
# adding image files to manifest
|
||||||
|
filenames = os.listdir(imgDir)
|
||||||
|
filenames = sorted(filenames)
|
||||||
|
for filename in filenames:
|
||||||
|
imgname, imgext = os.path.splitext(filename)
|
||||||
|
if imgext == '.jpg':
|
||||||
|
imgext = 'jpeg'
|
||||||
|
if imgext == '.svg':
|
||||||
|
imgext = 'svg+xml'
|
||||||
|
opfstr += ' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n'
|
||||||
|
if isCover:
|
||||||
|
opfstr += ' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n'
|
||||||
|
opfstr += '</manifest>\n'
|
||||||
|
# adding spine
|
||||||
|
opfstr += '<spine>\n <itemref idref="book" />\n</spine>\n'
|
||||||
|
if isCover:
|
||||||
|
opfstr += ' <guide>\n'
|
||||||
|
opfstr += ' <reference href="cover.jpg" type="cover" title="Cover"/>\n'
|
||||||
|
opfstr += ' </guide>\n'
|
||||||
|
opfstr += '</package>\n'
|
||||||
|
file(opfname, 'wb').write(opfstr)
|
||||||
|
|
||||||
|
print 'Processing Complete'
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def usage():
|
||||||
|
print "genbook.py generates a book from the extract Topaz Files"
|
||||||
|
print "Usage:"
|
||||||
|
print " genbook.py [-r] [-h [--fixed-image] <bookDir> "
|
||||||
|
print " "
|
||||||
|
print "Options:"
|
||||||
|
print " -h : help - print this usage message"
|
||||||
|
print " -r : generate raw svg files (not wrapped in xhtml)"
|
||||||
|
print " --fixed-image : genearate any Fixed Area as an svg image in the html"
|
||||||
|
print " "
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv):
|
||||||
|
bookDir = ''
|
||||||
|
|
||||||
|
if len(argv) == 0:
|
||||||
|
argv = sys.argv
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(argv[1:], "rh:",["fixed-image"])
|
||||||
|
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
print str(err)
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if len(opts) == 0 and len(args) == 0 :
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
raw = 0
|
||||||
|
fixedimage = True
|
||||||
|
for o, a in opts:
|
||||||
|
if o =="-h":
|
||||||
|
usage()
|
||||||
|
return 0
|
||||||
|
if o =="-r":
|
||||||
|
raw = 1
|
||||||
|
if o =="--fixed-image":
|
||||||
|
fixedimage = True
|
||||||
|
|
||||||
|
bookDir = args[0]
|
||||||
|
|
||||||
|
rv = generateBook(bookDir, raw, fixedimage)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main(''))
|
||||||
208
Calibre_Plugins/K4MobiDeDRM_plugin/k4mobidedrm_orig.py
Normal file
208
Calibre_Plugins/K4MobiDeDRM_plugin/k4mobidedrm_orig.py
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# engine to remove drm from Kindle for Mac and Kindle for PC books
|
||||||
|
# for personal use for archiving and converting your ebooks
|
||||||
|
|
||||||
|
# PLEASE DO NOT PIRATE EBOOKS!
|
||||||
|
|
||||||
|
# We want all authors and publishers, and eBook stores to live
|
||||||
|
# long and prosperous lives but at the same time we just want to
|
||||||
|
# be able to read OUR books on whatever device we want and to keep
|
||||||
|
# readable for a long, long time
|
||||||
|
|
||||||
|
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
|
||||||
|
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
|
||||||
|
# and many many others
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = '3.9'
|
||||||
|
|
||||||
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os, csv, getopt
|
||||||
|
import string
|
||||||
|
import re
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.k4mobidedrm import mobidedrm
|
||||||
|
from calibre_plugins.k4mobidedrm import topazextract
|
||||||
|
from calibre_plugins.k4mobidedrm import kgenpids
|
||||||
|
else:
|
||||||
|
import mobidedrm
|
||||||
|
import topazextract
|
||||||
|
import kgenpids
|
||||||
|
|
||||||
|
|
||||||
|
# cleanup bytestring filenames
|
||||||
|
# borrowed from calibre from calibre/src/calibre/__init__.py
|
||||||
|
# added in removal of non-printing chars
|
||||||
|
# and removal of . at start
|
||||||
|
# convert spaces to underscores
|
||||||
|
def cleanup_name(name):
|
||||||
|
_filename_sanitize = re.compile(r'[\xae\0\\|\?\*<":>\+/]')
|
||||||
|
substitute='_'
|
||||||
|
one = ''.join(char for char in name if char in string.printable)
|
||||||
|
one = _filename_sanitize.sub(substitute, one)
|
||||||
|
one = re.sub(r'\s', ' ', one).strip()
|
||||||
|
one = re.sub(r'^\.+$', '_', one)
|
||||||
|
one = one.replace('..', substitute)
|
||||||
|
# Windows doesn't like path components that end with a period
|
||||||
|
if one.endswith('.'):
|
||||||
|
one = one[:-1]+substitute
|
||||||
|
# Mac and Unix don't like file names that begin with a full stop
|
||||||
|
if len(one) > 0 and one[0] == '.':
|
||||||
|
one = substitute+one[1:]
|
||||||
|
one = one.replace(' ','_')
|
||||||
|
return one
|
||||||
|
|
||||||
|
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
||||||
|
# handle the obvious cases at the beginning
|
||||||
|
if not os.path.isfile(infile):
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
mobi = True
|
||||||
|
magic3 = file(infile,'rb').read(3)
|
||||||
|
if magic3 == 'TPZ':
|
||||||
|
mobi = False
|
||||||
|
|
||||||
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
|
|
||||||
|
if mobi:
|
||||||
|
mb = mobidedrm.MobiBook(infile)
|
||||||
|
else:
|
||||||
|
mb = topazextract.TopazBook(infile)
|
||||||
|
|
||||||
|
title = mb.getBookTitle()
|
||||||
|
print "Processing Book: ", title
|
||||||
|
filenametitle = cleanup_name(title)
|
||||||
|
outfilename = bookname
|
||||||
|
if len(outfilename)<=8 or len(filenametitle)<=8:
|
||||||
|
outfilename = outfilename + "_" + filenametitle
|
||||||
|
elif outfilename[:8] != filenametitle[:8]:
|
||||||
|
outfilename = outfilename[:8] + "_" + filenametitle
|
||||||
|
|
||||||
|
# avoid excessively long file names
|
||||||
|
if len(outfilename)>150:
|
||||||
|
outfilename = outfilename[:150]
|
||||||
|
|
||||||
|
# build pid list
|
||||||
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
|
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
||||||
|
|
||||||
|
try:
|
||||||
|
mb.processBook(pidlst)
|
||||||
|
|
||||||
|
except mobidedrm.DrmException, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
except topazextract.TpzDRMError, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
except Exception, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if mobi:
|
||||||
|
if mb.getPrintReplica():
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw4')
|
||||||
|
else:
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
||||||
|
mb.getMobiFile(outfile)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# topaz:
|
||||||
|
print " Creating NoDRM HTMLZ Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
||||||
|
mb.getHTMLZip(zipname)
|
||||||
|
|
||||||
|
print " Creating SVG ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
|
||||||
|
mb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
print " Creating XML ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
||||||
|
mb.getXMLZip(zipname)
|
||||||
|
|
||||||
|
# remove internal temporary directory of Topaz pieces
|
||||||
|
mb.cleanup()
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def usage(progname):
|
||||||
|
print "Removes DRM protection from K4PC/M, Kindle, Mobi and Topaz ebooks"
|
||||||
|
print "Usage:"
|
||||||
|
print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname
|
||||||
|
|
||||||
|
#
|
||||||
|
# Main
|
||||||
|
#
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
|
||||||
|
k4 = False
|
||||||
|
kInfoFiles = []
|
||||||
|
serials = []
|
||||||
|
pids = []
|
||||||
|
|
||||||
|
print ('K4MobiDeDrm v%(__version__)s '
|
||||||
|
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
print str(err)
|
||||||
|
usage(progname)
|
||||||
|
sys.exit(2)
|
||||||
|
if len(args)<2:
|
||||||
|
usage(progname)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
for o, a in opts:
|
||||||
|
if o == "-k":
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -k")
|
||||||
|
kInfoFiles.append(a)
|
||||||
|
if o == "-p":
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -p")
|
||||||
|
pids = a.split(',')
|
||||||
|
if o == "-s":
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -s")
|
||||||
|
serials = a.split(',')
|
||||||
|
|
||||||
|
# try with built in Kindle Info files
|
||||||
|
k4 = True
|
||||||
|
if sys.platform.startswith('linux'):
|
||||||
|
k4 = False
|
||||||
|
kInfoFiles = None
|
||||||
|
infile = args[0]
|
||||||
|
outdir = args[1]
|
||||||
|
return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
sys.exit(main())
|
||||||
|
|
||||||
270
Calibre_Plugins/K4MobiDeDRM_plugin/kgenpids.py
Normal file
270
Calibre_Plugins/K4MobiDeDRM_plugin/kgenpids.py
Normal file
@@ -0,0 +1,270 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
import sys
|
||||||
|
import os, csv
|
||||||
|
import binascii
|
||||||
|
import zlib
|
||||||
|
import re
|
||||||
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
global charMap1
|
||||||
|
global charMap3
|
||||||
|
global charMap4
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre:
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
from calibre_plugins.k4mobidedrm.k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
from calibre_plugins.k4mobidedrm.k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
else:
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
from k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
from k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
|
||||||
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
||||||
|
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
||||||
|
|
||||||
|
# crypto digestroutines
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
|
||||||
|
# Encode the bytes in data with the characters in map
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data)-1,2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
if (high == -1) or (low == -1) :
|
||||||
|
break
|
||||||
|
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
#
|
||||||
|
# PID generation routines
|
||||||
|
#
|
||||||
|
|
||||||
|
# Returns two bit at offset from a bit field
|
||||||
|
def getTwoBitsFromBitField(bitField,offset):
|
||||||
|
byteNumber = offset // 4
|
||||||
|
bitPosition = 6 - 2*(offset % 4)
|
||||||
|
return ord(bitField[byteNumber]) >> bitPosition & 3
|
||||||
|
|
||||||
|
# Returns the six bits at offset from a bit field
|
||||||
|
def getSixBitsFromBitField(bitField,offset):
|
||||||
|
offset *= 3
|
||||||
|
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
||||||
|
return value
|
||||||
|
|
||||||
|
# 8 bits to six bits encoding from hash to generate PID string
|
||||||
|
def encodePID(hash):
|
||||||
|
global charMap3
|
||||||
|
PID = ""
|
||||||
|
for position in range (0,8):
|
||||||
|
PID += charMap3[getSixBitsFromBitField(hash,position)]
|
||||||
|
return PID
|
||||||
|
|
||||||
|
# Encryption table used to generate the device PID
|
||||||
|
def generatePidEncryptionTable() :
|
||||||
|
table = []
|
||||||
|
for counter1 in range (0,0x100):
|
||||||
|
value = counter1
|
||||||
|
for counter2 in range (0,8):
|
||||||
|
if (value & 1 == 0) :
|
||||||
|
value = value >> 1
|
||||||
|
else :
|
||||||
|
value = value >> 1
|
||||||
|
value = value ^ 0xEDB88320
|
||||||
|
table.append(value)
|
||||||
|
return table
|
||||||
|
|
||||||
|
# Seed value used to generate the device PID
|
||||||
|
def generatePidSeed(table,dsn) :
|
||||||
|
value = 0
|
||||||
|
for counter in range (0,4) :
|
||||||
|
index = (ord(dsn[counter]) ^ value) &0xFF
|
||||||
|
value = (value >> 8) ^ table[index]
|
||||||
|
return value
|
||||||
|
|
||||||
|
# Generate the device PID
|
||||||
|
def generateDevicePID(table,dsn,nbRoll):
|
||||||
|
global charMap4
|
||||||
|
seed = generatePidSeed(table,dsn)
|
||||||
|
pidAscii = ""
|
||||||
|
pid = [(seed >>24) &0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF,(seed>>24) & 0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF]
|
||||||
|
index = 0
|
||||||
|
for counter in range (0,nbRoll):
|
||||||
|
pid[index] = pid[index] ^ ord(dsn[counter])
|
||||||
|
index = (index+1) %8
|
||||||
|
for counter in range (0,8):
|
||||||
|
index = ((((pid[counter] >>5) & 3) ^ pid[counter]) & 0x1f) + (pid[counter] >> 7)
|
||||||
|
pidAscii += charMap4[index]
|
||||||
|
return pidAscii
|
||||||
|
|
||||||
|
def crc32(s):
|
||||||
|
return (~binascii.crc32(s,-1))&0xFFFFFFFF
|
||||||
|
|
||||||
|
# convert from 8 digit PID to 10 digit PID with checksum
|
||||||
|
def checksumPid(s):
|
||||||
|
global charMap4
|
||||||
|
crc = crc32(s)
|
||||||
|
crc = crc ^ (crc >> 16)
|
||||||
|
res = s
|
||||||
|
l = len(charMap4)
|
||||||
|
for i in (0,1):
|
||||||
|
b = crc & 0xff
|
||||||
|
pos = (b // l) ^ (b % l)
|
||||||
|
res += charMap4[pos%l]
|
||||||
|
crc >>= 8
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
# old kindle serial number to fixed pid
|
||||||
|
def pidFromSerial(s, l):
|
||||||
|
global charMap4
|
||||||
|
crc = crc32(s)
|
||||||
|
arr1 = [0]*l
|
||||||
|
for i in xrange(len(s)):
|
||||||
|
arr1[i%l] ^= ord(s[i])
|
||||||
|
crc_bytes = [crc >> 24 & 0xff, crc >> 16 & 0xff, crc >> 8 & 0xff, crc & 0xff]
|
||||||
|
for i in xrange(l):
|
||||||
|
arr1[i] ^= crc_bytes[i&3]
|
||||||
|
pid = ""
|
||||||
|
for i in xrange(l):
|
||||||
|
b = arr1[i] & 0xff
|
||||||
|
pid+=charMap4[(b >> 7) + ((b >> 5 & 3) ^ (b & 0x1f))]
|
||||||
|
return pid
|
||||||
|
|
||||||
|
|
||||||
|
# Parse the EXTH header records and use the Kindle serial number to calculate the book pid.
|
||||||
|
def getKindlePid(pidlst, rec209, token, serialnum):
|
||||||
|
# Compute book PID
|
||||||
|
pidHash = SHA1(serialnum+rec209+token)
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
# compute fixed pid for old pre 2.5 firmware update pid as well
|
||||||
|
bookPID = pidFromSerial(serialnum, 7) + "*"
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
return pidlst
|
||||||
|
|
||||||
|
|
||||||
|
# parse the Kindleinfo file to calculate the book pid.
|
||||||
|
|
||||||
|
keynames = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
||||||
|
|
||||||
|
def getK4Pids(pidlst, rec209, token, kInfoFile):
|
||||||
|
global charMap1
|
||||||
|
kindleDatabase = None
|
||||||
|
try:
|
||||||
|
kindleDatabase = getDBfromFile(kInfoFile)
|
||||||
|
except Exception, message:
|
||||||
|
print(message)
|
||||||
|
kindleDatabase = None
|
||||||
|
pass
|
||||||
|
|
||||||
|
if kindleDatabase == None :
|
||||||
|
return pidlst
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get the Mazama Random number
|
||||||
|
MazamaRandomNumber = kindleDatabase["MazamaRandomNumber"]
|
||||||
|
|
||||||
|
# Get the kindle account token
|
||||||
|
kindleAccountToken = kindleDatabase["kindle.account.tokens"]
|
||||||
|
except KeyError:
|
||||||
|
print "Keys not found in " + kInfoFile
|
||||||
|
return pidlst
|
||||||
|
|
||||||
|
# Get the ID string used
|
||||||
|
encodedIDString = encodeHash(GetIDString(),charMap1)
|
||||||
|
|
||||||
|
# Get the current user name
|
||||||
|
encodedUsername = encodeHash(GetUserName(),charMap1)
|
||||||
|
|
||||||
|
# concat, hash and encode to calculate the DSN
|
||||||
|
DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1)
|
||||||
|
|
||||||
|
# Compute the device PID (for which I can tell, is used for nothing).
|
||||||
|
table = generatePidEncryptionTable()
|
||||||
|
devicePID = generateDevicePID(table,DSN,4)
|
||||||
|
devicePID = checksumPid(devicePID)
|
||||||
|
pidlst.append(devicePID)
|
||||||
|
|
||||||
|
# Compute book PIDs
|
||||||
|
|
||||||
|
# book pid
|
||||||
|
pidHash = SHA1(DSN+kindleAccountToken+rec209+token)
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
# variant 1
|
||||||
|
pidHash = SHA1(kindleAccountToken+rec209+token)
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
# variant 2
|
||||||
|
pidHash = SHA1(DSN+rec209+token)
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
return pidlst
|
||||||
|
|
||||||
|
def getPidList(md1, md2, k4, pids, serials, kInfoFiles):
|
||||||
|
pidlst = []
|
||||||
|
if kInfoFiles is None:
|
||||||
|
kInfoFiles = []
|
||||||
|
if k4:
|
||||||
|
kInfoFiles = getKindleInfoFiles(kInfoFiles)
|
||||||
|
for infoFile in kInfoFiles:
|
||||||
|
pidlst = getK4Pids(pidlst, md1, md2, infoFile)
|
||||||
|
for serialnum in serials:
|
||||||
|
pidlst = getKindlePid(pidlst, md1, md2, serialnum)
|
||||||
|
for pid in pids:
|
||||||
|
pidlst.append(pid)
|
||||||
|
return pidlst
|
||||||
@@ -81,6 +81,14 @@ class DocParser(object):
|
|||||||
pos = foundpos + 1
|
pos = foundpos + 1
|
||||||
return startpos
|
return startpos
|
||||||
|
|
||||||
|
# returns a vector of integers for the tagpath
|
||||||
|
def getData(self, tagpath, pos, end):
|
||||||
|
argres=[]
|
||||||
|
(foundat, argt) = self.findinDoc(tagpath, pos, end)
|
||||||
|
if (argt != None) and (len(argt) > 0) :
|
||||||
|
argList = argt.split('|')
|
||||||
|
argres = [ int(strval) for strval in argList]
|
||||||
|
return argres
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
@@ -237,7 +245,11 @@ def convert2CSS(flatxml, fontsize, ph, pw):
|
|||||||
|
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, fontsize, ph, pw)
|
dp = DocParser(flatxml, fontsize, ph, pw)
|
||||||
|
|
||||||
csspage = dp.process()
|
csspage = dp.process()
|
||||||
|
|
||||||
return csspage
|
return csspage
|
||||||
|
|
||||||
|
|
||||||
|
def getpageIDMap(flatxml):
|
||||||
|
dp = DocParser(flatxml, 0, 0, 0)
|
||||||
|
pageidnumbers = dp.getData('info.original.pid', 0, -1)
|
||||||
|
return pageidnumbers
|
||||||
469
Calibre_Plugins/K4MobiDeDRM_plugin/topazextract.py
Normal file
469
Calibre_Plugins/K4MobiDeDRM_plugin/topazextract.py
Normal file
@@ -0,0 +1,469 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
|
import os, csv, getopt
|
||||||
|
import zlib, zipfile, tempfile, shutil
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# local support routines
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.k4mobidedrm import kgenpids
|
||||||
|
from calibre_plugins.k4mobidedrm import genbook
|
||||||
|
else:
|
||||||
|
import kgenpids
|
||||||
|
import genbook
|
||||||
|
|
||||||
|
|
||||||
|
# recursive zip creation support routine
|
||||||
|
def zipUpDir(myzip, tdir, localname):
|
||||||
|
currentdir = tdir
|
||||||
|
if localname != "":
|
||||||
|
currentdir = os.path.join(currentdir,localname)
|
||||||
|
list = os.listdir(currentdir)
|
||||||
|
for file in list:
|
||||||
|
afilename = file
|
||||||
|
localfilePath = os.path.join(localname, afilename)
|
||||||
|
realfilePath = os.path.join(currentdir,file)
|
||||||
|
if os.path.isfile(realfilePath):
|
||||||
|
myzip.write(realfilePath, localfilePath)
|
||||||
|
elif os.path.isdir(realfilePath):
|
||||||
|
zipUpDir(myzip, tdir, localfilePath)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Utility routines
|
||||||
|
#
|
||||||
|
|
||||||
|
# Get a 7 bit encoded number from file
|
||||||
|
def bookReadEncodedNumber(fo):
|
||||||
|
flag = False
|
||||||
|
data = ord(fo.read(1))
|
||||||
|
if data == 0xFF:
|
||||||
|
flag = True
|
||||||
|
data = ord(fo.read(1))
|
||||||
|
if data >= 0x80:
|
||||||
|
datax = (data & 0x7F)
|
||||||
|
while data >= 0x80 :
|
||||||
|
data = ord(fo.read(1))
|
||||||
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
|
data = datax
|
||||||
|
if flag:
|
||||||
|
data = -data
|
||||||
|
return data
|
||||||
|
|
||||||
|
# Get a length prefixed string from file
|
||||||
|
def bookReadString(fo):
|
||||||
|
stringLength = bookReadEncodedNumber(fo)
|
||||||
|
return unpack(str(stringLength)+"s",fo.read(stringLength))[0]
|
||||||
|
|
||||||
|
#
|
||||||
|
# crypto routines
|
||||||
|
#
|
||||||
|
|
||||||
|
# Context initialisation for the Topaz Crypto
|
||||||
|
def topazCryptoInit(key):
|
||||||
|
ctx1 = 0x0CAFFE19E
|
||||||
|
for keyChar in key:
|
||||||
|
keyByte = ord(keyChar)
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
return [ctx1,ctx2]
|
||||||
|
|
||||||
|
# decrypt data with the context prepared by topazCryptoInit()
|
||||||
|
def topazCryptoDecrypt(data, ctx):
|
||||||
|
ctx1 = ctx[0]
|
||||||
|
ctx2 = ctx[1]
|
||||||
|
plainText = ""
|
||||||
|
for dataChar in data:
|
||||||
|
dataByte = ord(dataChar)
|
||||||
|
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
plainText += chr(m)
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
# Decrypt data with the PID
|
||||||
|
def decryptRecord(data,PID):
|
||||||
|
ctx = topazCryptoInit(PID)
|
||||||
|
return topazCryptoDecrypt(data, ctx)
|
||||||
|
|
||||||
|
# Try to decrypt a dkey record (contains the bookPID)
|
||||||
|
def decryptDkeyRecord(data,PID):
|
||||||
|
record = decryptRecord(data,PID)
|
||||||
|
fields = unpack("3sB8sB8s3s",record)
|
||||||
|
if fields[0] != "PID" or fields[5] != "pid" :
|
||||||
|
raise TpzDRMError("Didn't find PID magic numbers in record")
|
||||||
|
elif fields[1] != 8 or fields[3] != 8 :
|
||||||
|
raise TpzDRMError("Record didn't contain correct length fields")
|
||||||
|
elif fields[2] != PID :
|
||||||
|
raise TpzDRMError("Record didn't contain PID")
|
||||||
|
return fields[4]
|
||||||
|
|
||||||
|
# Decrypt all dkey records (contain the book PID)
|
||||||
|
def decryptDkeyRecords(data,PID):
|
||||||
|
nbKeyRecords = ord(data[0])
|
||||||
|
records = []
|
||||||
|
data = data[1:]
|
||||||
|
for i in range (0,nbKeyRecords):
|
||||||
|
length = ord(data[0])
|
||||||
|
try:
|
||||||
|
key = decryptDkeyRecord(data[1:length+1],PID)
|
||||||
|
records.append(key)
|
||||||
|
except TpzDRMError:
|
||||||
|
pass
|
||||||
|
data = data[1+length:]
|
||||||
|
if len(records) == 0:
|
||||||
|
raise TpzDRMError("BookKey Not Found")
|
||||||
|
return records
|
||||||
|
|
||||||
|
|
||||||
|
class TopazBook:
|
||||||
|
def __init__(self, filename):
|
||||||
|
self.fo = file(filename, 'rb')
|
||||||
|
self.outdir = tempfile.mkdtemp()
|
||||||
|
# self.outdir = 'rawdat'
|
||||||
|
self.bookPayloadOffset = 0
|
||||||
|
self.bookHeaderRecords = {}
|
||||||
|
self.bookMetadata = {}
|
||||||
|
self.bookKey = None
|
||||||
|
magic = unpack("4s",self.fo.read(4))[0]
|
||||||
|
if magic != 'TPZ0':
|
||||||
|
raise TpzDRMError("Parse Error : Invalid Header, not a Topaz file")
|
||||||
|
self.parseTopazHeaders()
|
||||||
|
self.parseMetadata()
|
||||||
|
|
||||||
|
def parseTopazHeaders(self):
|
||||||
|
def bookReadHeaderRecordData():
|
||||||
|
# Read and return the data of one header record at the current book file position
|
||||||
|
# [[offset,decompressedLength,compressedLength],...]
|
||||||
|
nbValues = bookReadEncodedNumber(self.fo)
|
||||||
|
values = []
|
||||||
|
for i in range (0,nbValues):
|
||||||
|
values.append([bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo)])
|
||||||
|
return values
|
||||||
|
def parseTopazHeaderRecord():
|
||||||
|
# Read and parse one header record at the current book file position and return the associated data
|
||||||
|
# [[offset,decompressedLength,compressedLength],...]
|
||||||
|
if ord(self.fo.read(1)) != 0x63:
|
||||||
|
raise TpzDRMError("Parse Error : Invalid Header")
|
||||||
|
tag = bookReadString(self.fo)
|
||||||
|
record = bookReadHeaderRecordData()
|
||||||
|
return [tag,record]
|
||||||
|
nbRecords = bookReadEncodedNumber(self.fo)
|
||||||
|
for i in range (0,nbRecords):
|
||||||
|
result = parseTopazHeaderRecord()
|
||||||
|
# print result[0], result[1]
|
||||||
|
self.bookHeaderRecords[result[0]] = result[1]
|
||||||
|
if ord(self.fo.read(1)) != 0x64 :
|
||||||
|
raise TpzDRMError("Parse Error : Invalid Header")
|
||||||
|
self.bookPayloadOffset = self.fo.tell()
|
||||||
|
|
||||||
|
def parseMetadata(self):
|
||||||
|
# Parse the metadata record from the book payload and return a list of [key,values]
|
||||||
|
self.fo.seek(self.bookPayloadOffset + self.bookHeaderRecords["metadata"][0][0])
|
||||||
|
tag = bookReadString(self.fo)
|
||||||
|
if tag != "metadata" :
|
||||||
|
raise TpzDRMError("Parse Error : Record Names Don't Match")
|
||||||
|
flags = ord(self.fo.read(1))
|
||||||
|
nbRecords = ord(self.fo.read(1))
|
||||||
|
# print nbRecords
|
||||||
|
for i in range (0,nbRecords) :
|
||||||
|
keyval = bookReadString(self.fo)
|
||||||
|
content = bookReadString(self.fo)
|
||||||
|
# print keyval
|
||||||
|
# print content
|
||||||
|
self.bookMetadata[keyval] = content
|
||||||
|
return self.bookMetadata
|
||||||
|
|
||||||
|
def getPIDMetaInfo(self):
|
||||||
|
keysRecord = self.bookMetadata.get('keys','')
|
||||||
|
keysRecordRecord = ''
|
||||||
|
if keysRecord != '':
|
||||||
|
keylst = keysRecord.split(',')
|
||||||
|
for keyval in keylst:
|
||||||
|
keysRecordRecord += self.bookMetadata.get(keyval,'')
|
||||||
|
return keysRecord, keysRecordRecord
|
||||||
|
|
||||||
|
def getBookTitle(self):
|
||||||
|
title = ''
|
||||||
|
if 'Title' in self.bookMetadata:
|
||||||
|
title = self.bookMetadata['Title']
|
||||||
|
return title
|
||||||
|
|
||||||
|
def setBookKey(self, key):
|
||||||
|
self.bookKey = key
|
||||||
|
|
||||||
|
def getBookPayloadRecord(self, name, index):
|
||||||
|
# Get a record in the book payload, given its name and index.
|
||||||
|
# decrypted and decompressed if necessary
|
||||||
|
encrypted = False
|
||||||
|
compressed = False
|
||||||
|
try:
|
||||||
|
recordOffset = self.bookHeaderRecords[name][index][0]
|
||||||
|
except:
|
||||||
|
raise TpzDRMError("Parse Error : Invalid Record, record not found")
|
||||||
|
|
||||||
|
self.fo.seek(self.bookPayloadOffset + recordOffset)
|
||||||
|
|
||||||
|
tag = bookReadString(self.fo)
|
||||||
|
if tag != name :
|
||||||
|
raise TpzDRMError("Parse Error : Invalid Record, record name doesn't match")
|
||||||
|
|
||||||
|
recordIndex = bookReadEncodedNumber(self.fo)
|
||||||
|
if recordIndex < 0 :
|
||||||
|
encrypted = True
|
||||||
|
recordIndex = -recordIndex -1
|
||||||
|
|
||||||
|
if recordIndex != index :
|
||||||
|
raise TpzDRMError("Parse Error : Invalid Record, index doesn't match")
|
||||||
|
|
||||||
|
if (self.bookHeaderRecords[name][index][2] > 0):
|
||||||
|
compressed = True
|
||||||
|
record = self.fo.read(self.bookHeaderRecords[name][index][2])
|
||||||
|
else:
|
||||||
|
record = self.fo.read(self.bookHeaderRecords[name][index][1])
|
||||||
|
|
||||||
|
if encrypted:
|
||||||
|
if self.bookKey:
|
||||||
|
ctx = topazCryptoInit(self.bookKey)
|
||||||
|
record = topazCryptoDecrypt(record,ctx)
|
||||||
|
else :
|
||||||
|
raise TpzDRMError("Error: Attempt to decrypt without bookKey")
|
||||||
|
|
||||||
|
if compressed:
|
||||||
|
record = zlib.decompress(record)
|
||||||
|
|
||||||
|
return record
|
||||||
|
|
||||||
|
def processBook(self, pidlst):
|
||||||
|
raw = 0
|
||||||
|
fixedimage=True
|
||||||
|
try:
|
||||||
|
keydata = self.getBookPayloadRecord('dkey', 0)
|
||||||
|
except TpzDRMError, e:
|
||||||
|
print "no dkey record found, book may not be encrypted"
|
||||||
|
print "attempting to extrct files without a book key"
|
||||||
|
self.createBookDirectory()
|
||||||
|
self.extractFiles()
|
||||||
|
print "Successfully Extracted Topaz contents"
|
||||||
|
rv = genbook.generateBook(self.outdir, raw, fixedimage)
|
||||||
|
if rv == 0:
|
||||||
|
print "\nBook Successfully generated"
|
||||||
|
return rv
|
||||||
|
|
||||||
|
# try each pid to decode the file
|
||||||
|
bookKey = None
|
||||||
|
for pid in pidlst:
|
||||||
|
# use 8 digit pids here
|
||||||
|
pid = pid[0:8]
|
||||||
|
print "\nTrying: ", pid
|
||||||
|
bookKeys = []
|
||||||
|
data = keydata
|
||||||
|
try:
|
||||||
|
bookKeys+=decryptDkeyRecords(data,pid)
|
||||||
|
except TpzDRMError, e:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
bookKey = bookKeys[0]
|
||||||
|
print "Book Key Found!"
|
||||||
|
break
|
||||||
|
|
||||||
|
if not bookKey:
|
||||||
|
raise TpzDRMError('Decryption Unsucessful; No valid pid found')
|
||||||
|
|
||||||
|
self.setBookKey(bookKey)
|
||||||
|
self.createBookDirectory()
|
||||||
|
self.extractFiles()
|
||||||
|
print "Successfully Extracted Topaz contents"
|
||||||
|
rv = genbook.generateBook(self.outdir, raw, fixedimage)
|
||||||
|
if rv == 0:
|
||||||
|
print "\nBook Successfully generated"
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def createBookDirectory(self):
|
||||||
|
outdir = self.outdir
|
||||||
|
# create output directory structure
|
||||||
|
if not os.path.exists(outdir):
|
||||||
|
os.makedirs(outdir)
|
||||||
|
destdir = os.path.join(outdir,'img')
|
||||||
|
if not os.path.exists(destdir):
|
||||||
|
os.makedirs(destdir)
|
||||||
|
destdir = os.path.join(outdir,'color_img')
|
||||||
|
if not os.path.exists(destdir):
|
||||||
|
os.makedirs(destdir)
|
||||||
|
destdir = os.path.join(outdir,'page')
|
||||||
|
if not os.path.exists(destdir):
|
||||||
|
os.makedirs(destdir)
|
||||||
|
destdir = os.path.join(outdir,'glyphs')
|
||||||
|
if not os.path.exists(destdir):
|
||||||
|
os.makedirs(destdir)
|
||||||
|
|
||||||
|
def extractFiles(self):
|
||||||
|
outdir = self.outdir
|
||||||
|
for headerRecord in self.bookHeaderRecords:
|
||||||
|
name = headerRecord
|
||||||
|
if name != "dkey" :
|
||||||
|
ext = '.dat'
|
||||||
|
if name == 'img' : ext = '.jpg'
|
||||||
|
if name == 'color' : ext = '.jpg'
|
||||||
|
print "\nProcessing Section: %s " % name
|
||||||
|
for index in range (0,len(self.bookHeaderRecords[name])) :
|
||||||
|
fnum = "%04d" % index
|
||||||
|
fname = name + fnum + ext
|
||||||
|
destdir = outdir
|
||||||
|
if name == 'img':
|
||||||
|
destdir = os.path.join(outdir,'img')
|
||||||
|
if name == 'color':
|
||||||
|
destdir = os.path.join(outdir,'color_img')
|
||||||
|
if name == 'page':
|
||||||
|
destdir = os.path.join(outdir,'page')
|
||||||
|
if name == 'glyphs':
|
||||||
|
destdir = os.path.join(outdir,'glyphs')
|
||||||
|
outputFile = os.path.join(destdir,fname)
|
||||||
|
print ".",
|
||||||
|
record = self.getBookPayloadRecord(name,index)
|
||||||
|
if record != '':
|
||||||
|
file(outputFile, 'wb').write(record)
|
||||||
|
print " "
|
||||||
|
|
||||||
|
def getHTMLZip(self, zipname):
|
||||||
|
htmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
||||||
|
htmlzip.write(os.path.join(self.outdir,'book.html'),'book.html')
|
||||||
|
htmlzip.write(os.path.join(self.outdir,'book.opf'),'book.opf')
|
||||||
|
if os.path.isfile(os.path.join(self.outdir,'cover.jpg')):
|
||||||
|
htmlzip.write(os.path.join(self.outdir,'cover.jpg'),'cover.jpg')
|
||||||
|
htmlzip.write(os.path.join(self.outdir,'style.css'),'style.css')
|
||||||
|
zipUpDir(htmlzip, self.outdir, 'img')
|
||||||
|
htmlzip.close()
|
||||||
|
|
||||||
|
def getSVGZip(self, zipname):
|
||||||
|
svgzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
||||||
|
svgzip.write(os.path.join(self.outdir,'index_svg.xhtml'),'index_svg.xhtml')
|
||||||
|
zipUpDir(svgzip, self.outdir, 'svg')
|
||||||
|
zipUpDir(svgzip, self.outdir, 'img')
|
||||||
|
svgzip.close()
|
||||||
|
|
||||||
|
def getXMLZip(self, zipname):
|
||||||
|
xmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
||||||
|
targetdir = os.path.join(self.outdir,'xml')
|
||||||
|
zipUpDir(xmlzip, targetdir, '')
|
||||||
|
zipUpDir(xmlzip, self.outdir, 'img')
|
||||||
|
xmlzip.close()
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
if os.path.isdir(self.outdir):
|
||||||
|
pass
|
||||||
|
# shutil.rmtree(self.outdir, True)
|
||||||
|
|
||||||
|
def usage(progname):
|
||||||
|
print "Removes DRM protection from Topaz ebooks and extract the contents"
|
||||||
|
print "Usage:"
|
||||||
|
print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname
|
||||||
|
|
||||||
|
|
||||||
|
# Main
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
k4 = False
|
||||||
|
pids = []
|
||||||
|
serials = []
|
||||||
|
kInfoFiles = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
print str(err)
|
||||||
|
usage(progname)
|
||||||
|
return 1
|
||||||
|
if len(args)<2:
|
||||||
|
usage(progname)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
for o, a in opts:
|
||||||
|
if o == "-k":
|
||||||
|
if a == None :
|
||||||
|
print "Invalid parameter for -k"
|
||||||
|
return 1
|
||||||
|
kInfoFiles.append(a)
|
||||||
|
if o == "-p":
|
||||||
|
if a == None :
|
||||||
|
print "Invalid parameter for -p"
|
||||||
|
return 1
|
||||||
|
pids = a.split(',')
|
||||||
|
if o == "-s":
|
||||||
|
if a == None :
|
||||||
|
print "Invalid parameter for -s"
|
||||||
|
return 1
|
||||||
|
serials = a.split(',')
|
||||||
|
k4 = True
|
||||||
|
|
||||||
|
infile = args[0]
|
||||||
|
outdir = args[1]
|
||||||
|
|
||||||
|
if not os.path.isfile(infile):
|
||||||
|
print "Input File Does Not Exist"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
|
|
||||||
|
tb = TopazBook(infile)
|
||||||
|
title = tb.getBookTitle()
|
||||||
|
print "Processing Book: ", title
|
||||||
|
keysRecord, keysRecordRecord = tb.getPIDMetaInfo()
|
||||||
|
pidlst = kgenpids.getPidList(keysRecord, keysRecordRecord, k4, pids, serials, kInfoFiles)
|
||||||
|
|
||||||
|
try:
|
||||||
|
print "Decrypting Book"
|
||||||
|
tb.processBook(pidlst)
|
||||||
|
|
||||||
|
print " Creating HTML ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, bookname + '_nodrm' + '.htmlz')
|
||||||
|
tb.getHTMLZip(zipname)
|
||||||
|
|
||||||
|
print " Creating SVG ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
|
||||||
|
tb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
print " Creating XML ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
||||||
|
tb.getXMLZip(zipname)
|
||||||
|
|
||||||
|
# removing internal temporary directory of pieces
|
||||||
|
tb.cleanup()
|
||||||
|
|
||||||
|
except TpzDRMError, e:
|
||||||
|
print str(e)
|
||||||
|
# tb.cleanup()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
except Exception, e:
|
||||||
|
print str(e)
|
||||||
|
# tb.cleanup
|
||||||
|
return 1
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
sys.exit(main())
|
||||||
|
|
||||||
39
Calibre_Plugins/README-Ineptpdf-plugin.txt
Normal file
39
Calibre_Plugins/README-Ineptpdf-plugin.txt
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
Inept PDF Plugin - ineptpdf_vXX_plugin.zip
|
||||||
|
|
||||||
|
Requires Calibre version 0.6.44 or higher.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
All credit given to IHeartCabbages for the original standalone scripts.
|
||||||
|
|
||||||
|
I had the much easier job of converting them to a Calibre plugin.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
This plugin is meant to decrypt Adobe Digital Edition PDFs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python, PyCrypto and/or OpenSSL already installed, but they aren't necessary.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Installation:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Go to Calibre's Preferences page. Do **NOT** select "Get plugins to enhance calibre" as this is reserved for "official" plugins, instead select "Change calibre behavior". Under "Advanced" click on the Plugins button. Use the "Load plugin from file" button to select the plugin's zip file (ineptpdf_vXX_plugin.zip) and click the 'Add' button. you're done.
|
||||||
|
|
||||||
|
Please note: Calibre does not provide any immediate feedback to indicate that adding the plugin was a success. You can always click on the File-Type plugins to see if the plugin was added.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Configuration:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
When first run, the plugin will attempt to find your Adobe Digital Editions installation (on Windows and Mac OS's). If successful, it will create an 'adeptkey.der' file and save it in Calibre's configuration directory. It will use that file on subsequent runs. If there are already '*.der' files in the directory, the plugin won't attempt to
|
||||||
|
|
||||||
|
find the Adobe Digital Editions installation installation.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
So if you have Adobe Digital Editions installation installed on the same machine as Calibre... you are ready to go. If not... keep reading.
|
||||||
@@ -1,20 +1,19 @@
|
|||||||
Plugin for K4PC, K4Mac and Mobi Books
|
Plugin for K4PC, K4Mac, standalone Kindles, Mobi Books, and for Devices with Fixed PIDs.
|
||||||
|
|
||||||
Will work on Linux (standard DRM Mobi books only), Mac OS X (standard DRM Mobi books and "Kindle for Mac" books, and Windows (standard DRM Mobi books and "Kindle for PC" books.
|
This plugin supersedes MobiDeDRM, K4DeDRM, and K4PCDeDRM and K4X plugins. If you install this plugin, those plugins can be safely removed.
|
||||||
|
|
||||||
This plugin supersedes MobiDeDRM, K4DeDRM, and K4PCDeDRM plugins. If you install this plugin, those plugins can be safely removed.
|
|
||||||
|
|
||||||
This plugin is meant to convert "Kindle for PC", "Kindle for Mac" and "Mobi" ebooks with DRM to unlocked Mobi files. Calibre can then convert them to whatever format you desire. It is meant to function without having to install any dependencies except for Calibre being on your same machine and in the same account as your "Kindle for PC" or "Kindle for Mac" application if you are going to remove the DRM from those types of books.
|
|
||||||
|
|
||||||
This plugin is meant to remove the DRM from .prc, .azw, .azw1, and .tpz ebooks. Calibre can then convert them to whatever format you desire. It is meant to function without having to install any dependencies except for Calibre being on your same machine and in the same account as your "Kindle for PC" or "Kindle for Mac" application if you are going to remove the DRM from those types of books.
|
This plugin is meant to remove the DRM from .prc, .azw, .azw1, and .tpz ebooks. Calibre can then convert them to whatever format you desire. It is meant to function without having to install any dependencies except for Calibre being on your same machine and in the same account as your "Kindle for PC" or "Kindle for Mac" application if you are going to remove the DRM from those types of books.
|
||||||
Go to Calibre's Preferences page... click on the Plugins button. Use the file dialog button to select the plugin's zip file (k4mobidedrm_vXX_plugin.zip) and click the 'Add' button. You're done.
|
|
||||||
|
|
||||||
|
|
||||||
Installation:
|
Installation:
|
||||||
Highlight the plugin (K4MobiDeDRM under the "File type plugins" category) and click the "Customize Plugin" button on Calibre's Preferences->Plugins page. Enter a comma separated list of your 10 digit PIDs. This is not needed if you only want to decode "Kindle for PC" or "Kindle for Mac" books.
|
|
||||||
Go to Calibre's Preferences page. Do **NOT** select "Get Plugins to enhance calibre" as this is reserved for official calibre plugins", instead select "Change calibre behavior". Under "Advanced" click on the on the Plugins button. Click on the "Load plugin from file" button at the bottom of the screen. Use the file dialog button to select the plugin's zip file (K4MobiDeDRM_vXX_plugin.zip) and click the "Add" (or it may say "Open" button. Then click on the "Yes" button in the warning dialog that appears. A Confirmation dialog appears that says the plugin has been installed.
|
Go to Calibre's Preferences page. Do **NOT** select "Get Plugins to enhance calibre" as this is reserved for official calibre plugins", instead select "Change calibre behavior". Under "Advanced" click on the on the Plugins button. Click on the "Load plugin from file" button at the bottom of the screen. Use the file dialog button to select the plugin's zip file (K4MobiDeDRM_vXX_plugin.zip) and click the "Add" (or it may say "Open" button. Then click on the "Yes" button in the warning dialog that appears. A Confirmation dialog appears that says the plugin has been installed.
|
||||||
|
|
||||||
|
|
||||||
If you find that it's not working for you (imported azw's are not converted to mobi format), you can save a lot of time and trouble by trying to add the azw file to Calibre with the command line tools. This will print out a lot of helpful debugging info that can be copied into any online help requests. I'm going to ask you to do it first, anyway, so you might
|
Configuration:
|
||||||
|
|
||||||
Highlight the plugin (K4MobiDeDRM under the "File type plugins" category) and click the "Customize Plugin" button on Calibre's Preferences->Plugins page. Enter your 10 digit PID. If you have more than one PID separate them with a comma (no spaces). If you have a standalone Kindle include the 16 digit serial number (these typically begin "B0...") in this list (again separated from the PIDs or other serial numbers with a comma (no spaces). This configuration is not needed if you only want to decode "Kindle for PC" or "Kindle for Mac" books.
|
Highlight the plugin (K4MobiDeDRM under the "File type plugins" category) and click the "Customize Plugin" button on Calibre's Preferences->Plugins page. Enter your 10 digit PID. If you have more than one PID separate them with a comma (no spaces). If you have a standalone Kindle include the 16 digit serial number (these typically begin "B0...") in this list (again separated from the PIDs or other serial numbers with a comma (no spaces). This configuration is not needed if you only want to decode "Kindle for PC" or "Kindle for Mac" books.
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,9 @@ All credit given to The Dark Reverser for the original standalone script. I had
|
|||||||
All credit given to The Dark Reverser for the original standalone script. I had the much easier job of converting it to a Calibre plugin.
|
All credit given to The Dark Reverser for the original standalone script. I had the much easier job of converting it to a Calibre plugin.
|
||||||
|
|
||||||
|
|
||||||
Go to Calibre's Preferences page... click on the Plugins button. Use the file dialog button to select the plugin's zip file (eReaderPDB2PML_vXX_plugin.zip) and click the 'Add' button. You're done.
|
|
||||||
|
This plugin is meant to convert secure Ereader files (PDB) to unsecured PMLZ files. Calibre can then convert it to whatever format you desire. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. I've included the psyco libraries (compiled for each platform) for speed. If your system can use them, great! Otherwise, they won't be used and things will just work slower.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Installation:
|
Installation:
|
||||||
|
|||||||
@@ -9,9 +9,11 @@ with Adobe's Adept encryption. It is meant to function without having to install
|
|||||||
I had the much easier job of converting them to a Calibre plugin.
|
I had the much easier job of converting them to a Calibre plugin.
|
||||||
|
|
||||||
|
|
||||||
Go to Calibre's Preferences page... click on the Plugins button. Use the file dialog button to select the plugin's zip file (ignobleepub_vXX_plugin.zip) and
|
|
||||||
This plugin is meant to decrypt Barnes & Noble Epubs that are protected
|
This plugin is meant to decrypt Barnes & Noble Epubs that are protected
|
||||||
|
|
||||||
|
with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Installation:
|
Installation:
|
||||||
|
|||||||
@@ -8,7 +8,10 @@ This plugin is meant to decrypt Adobe Digital Edition Epubs that are protected w
|
|||||||
|
|
||||||
I had the much easier job of converting them to a Calibre plugin.
|
I had the much easier job of converting them to a Calibre plugin.
|
||||||
|
|
||||||
Go to Calibre's Preferences page... click on the Plugins button. Use the file dialog button to select the plugin's zip file (ineptepub_vXX_plugin.zip) and click the 'Add' button. you're done.
|
|
||||||
|
|
||||||
|
This plugin is meant to decrypt Adobe Digital Edition Epubs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Installation:
|
Installation:
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
Installing openssl on Windows 64-bit (Windows 2000 and higher)
|
|
||||||
|
|
||||||
Win64 OpenSSL v0.9.8o (8Mb)
|
|
||||||
http://www.slproweb.com/download/Win64OpenSSL-0_9_8o.exe
|
|
||||||
(if you get an error message about missing Visual C++ redistributables... cancel the install and install the below support program from Microsoft, THEN install OpenSSL)
|
|
||||||
|
|
||||||
Visual C++ 2008 Redistributables (x64) (1.7Mb)
|
|
||||||
http://www.microsoft.com/downloads/details.aspx?familyid=bd2a6171-e2d6-4230-b809-9a8d7548c1b6
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Installing openssl on Windows 32-bit (Windows 2000 and higher)
|
|
||||||
|
|
||||||
Win32 OpenSSL v0.9.8o (8Mb)
|
|
||||||
http://www.slproweb.com/download/Win32OpenSSL-0_9_8o.exe
|
|
||||||
(if you get an error message about missing Visual C++ redistributables... cancel the install and install the below support program from Microsoft, THEN install OpenSSL)
|
|
||||||
|
|
||||||
Visual C++ 2008 Redistributables (1.7Mb)
|
|
||||||
http://www.microsoft.com/downloads/details.aspx?familyid=9B2DA534-3E03-4391-8A4D-074B9F2BC1BF
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Other versions of OpenSSL (and versions for Windows older than Windows 2000) can be found on the following website.
|
|
||||||
|
|
||||||
Shining Light Productions
|
|
||||||
http://www.slproweb.com/products/Win32OpenSSL.html
|
|
||||||
Binary file not shown.
@@ -10,9 +10,7 @@
|
|||||||
# This plugin is meant to convert secure Ereader files (PDB) to unsecured PMLZ files.
|
# This plugin is meant to convert secure Ereader files (PDB) to unsecured PMLZ files.
|
||||||
# Calibre can then convert it to whatever format you desire.
|
# Calibre can then convert it to whatever format you desire.
|
||||||
# It is meant to function without having to install any dependencies...
|
# It is meant to function without having to install any dependencies...
|
||||||
# other than having Calibre installed, of course. I've included the psyco libraries
|
# other than having Calibre installed, of course.
|
||||||
# (compiled for each platform) for speed. If your system can use them, great!
|
|
||||||
# Otherwise, they won't be used and things will just work slower.
|
|
||||||
#
|
#
|
||||||
# Installation:
|
# Installation:
|
||||||
# Go to Calibre's Preferences page... click on the Plugins button. Use the file
|
# Go to Calibre's Preferences page... click on the Plugins button. Use the file
|
||||||
@@ -32,10 +30,16 @@
|
|||||||
# Revision history:
|
# Revision history:
|
||||||
# 0.0.1 - Initial release
|
# 0.0.1 - Initial release
|
||||||
# 0.0.2 - updated to distinguish it from earlier non-openssl version
|
# 0.0.2 - updated to distinguish it from earlier non-openssl version
|
||||||
|
# 0.0.3 - removed added psyco code as it is not supported under Calibre's Python 2.7
|
||||||
|
# 0.0.4 - minor typos fixed
|
||||||
|
# 0.0.5 - updated to the new calibre plugin interface
|
||||||
|
|
||||||
import sys, os
|
import sys, os
|
||||||
|
|
||||||
from calibre.customize import FileTypePlugin
|
from calibre.customize import FileTypePlugin
|
||||||
|
from calibre.ptempfile import PersistentTemporaryDirectory
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
from calibre_plugins.erdrpdb2pml import erdr2pml
|
||||||
|
|
||||||
class eRdrDeDRM(FileTypePlugin):
|
class eRdrDeDRM(FileTypePlugin):
|
||||||
name = 'eReader PDB 2 PML' # Name of the plugin
|
name = 'eReader PDB 2 PML' # Name of the plugin
|
||||||
@@ -43,24 +47,14 @@ class eRdrDeDRM(FileTypePlugin):
|
|||||||
Credit given to The Dark Reverser for the original standalone script.'
|
Credit given to The Dark Reverser for the original standalone script.'
|
||||||
supported_platforms = ['linux', 'osx', 'windows'] # Platforms this plugin will run on
|
supported_platforms = ['linux', 'osx', 'windows'] # Platforms this plugin will run on
|
||||||
author = 'DiapDealer' # The author of this plugin
|
author = 'DiapDealer' # The author of this plugin
|
||||||
version = (0, 0, 2) # The version number of this plugin
|
version = (0, 0, 6) # The version number of this plugin
|
||||||
file_types = set(['pdb']) # The file types that this plugin will be applied to
|
file_types = set(['pdb']) # The file types that this plugin will be applied to
|
||||||
on_import = True # Run this plugin during the import
|
on_import = True # Run this plugin during the import
|
||||||
|
minimum_calibre_version = (0, 7, 55)
|
||||||
|
|
||||||
def run(self, path_to_ebook):
|
def run(self, path_to_ebook):
|
||||||
from calibre.ptempfile import PersistentTemporaryDirectory
|
|
||||||
from calibre.constants import iswindows, isosx
|
|
||||||
pdir = 'windows' if iswindows else 'osx' if isosx else 'linux'
|
|
||||||
ppath = os.path.join(self.sys_insertion_path, pdir)
|
|
||||||
sys.path.insert(0, ppath)
|
|
||||||
|
|
||||||
global bookname, erdr2pml
|
global bookname, erdr2pml
|
||||||
import erdr2pml
|
|
||||||
|
|
||||||
if 'psyco' in sys.modules:
|
|
||||||
print 'Using psyco acceleration for %s.' % pdir
|
|
||||||
else:
|
|
||||||
print 'NOT using psyco acceleration for %s. Conversion may be slow.' % pdir
|
|
||||||
|
|
||||||
infile = path_to_ebook
|
infile = path_to_ebook
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
@@ -74,7 +68,6 @@ class eRdrDeDRM(FileTypePlugin):
|
|||||||
try:
|
try:
|
||||||
name, cc = i.split(',')
|
name, cc = i.split(',')
|
||||||
except ValueError:
|
except ValueError:
|
||||||
sys.path.remove(ppath)
|
|
||||||
print ' Error parsing user supplied data.'
|
print ' Error parsing user supplied data.'
|
||||||
return path_to_ebook
|
return path_to_ebook
|
||||||
|
|
||||||
@@ -86,7 +79,6 @@ class eRdrDeDRM(FileTypePlugin):
|
|||||||
|
|
||||||
if pmlfilepath and pmlfilepath != 1:
|
if pmlfilepath and pmlfilepath != 1:
|
||||||
import zipfile
|
import zipfile
|
||||||
import shutil
|
|
||||||
print " Creating PMLZ file"
|
print " Creating PMLZ file"
|
||||||
myZipFile = zipfile.ZipFile(pmlzfile.name,'w',zipfile.ZIP_STORED, False)
|
myZipFile = zipfile.ZipFile(pmlzfile.name,'w',zipfile.ZIP_STORED, False)
|
||||||
list = os.listdir(outdir)
|
list = os.listdir(outdir)
|
||||||
@@ -122,7 +114,7 @@ class eRdrDeDRM(FileTypePlugin):
|
|||||||
|
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = erdr2pml.Sectionizer(infile, 'PNRdPPrs')
|
sect = erdr2pml.Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = erdr2pml.EreaderProcessor(sect.loadSection, name, cc)
|
er = erdr2pml.EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
@@ -55,29 +55,15 @@
|
|||||||
# 0.14 - contributed enhancement to support --make-pmlz switch
|
# 0.14 - contributed enhancement to support --make-pmlz switch
|
||||||
# 0.15 - enabled high-ascii to pml character encoding. DropBook now works on Mac.
|
# 0.15 - enabled high-ascii to pml character encoding. DropBook now works on Mac.
|
||||||
# 0.16 - convert to use openssl DES (very very fast) or pure python DES if openssl's libcrypto is not available
|
# 0.16 - convert to use openssl DES (very very fast) or pure python DES if openssl's libcrypto is not available
|
||||||
|
# 0.17 - added support for pycrypto's DES as well
|
||||||
|
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
||||||
|
# 0.19 - Modify the interface to allow use of import
|
||||||
|
# 0.20 - modify to allow use inside new interface for calibre plugins
|
||||||
|
# 0.21 - Support eReader (drm) version 11.
|
||||||
|
# - Don't reject dictionary format.
|
||||||
|
# - Ignore sidebars for dictionaries (different format?)
|
||||||
|
|
||||||
Des = None
|
__version__='0.21'
|
||||||
|
|
||||||
import openssl_des
|
|
||||||
Des = openssl_des.load_libcrypto()
|
|
||||||
|
|
||||||
# if that did not work then use pure python implementation
|
|
||||||
# of DES and try to speed it up with Psycho
|
|
||||||
if Des == None:
|
|
||||||
import python_des
|
|
||||||
Des = python_des.Des
|
|
||||||
# Import Psyco if available
|
|
||||||
try:
|
|
||||||
# Dumb speed hack 1
|
|
||||||
# http://psyco.sourceforge.net
|
|
||||||
import psyco
|
|
||||||
psyco.full()
|
|
||||||
pass
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
__version__='0.16'
|
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -89,28 +75,85 @@ class Unbuffered:
|
|||||||
return getattr(self.stream, attr)
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
|
|
||||||
import struct, binascii, getopt, zlib, os, os.path, urllib, tempfile
|
import struct, binascii, getopt, zlib, os, os.path, urllib, tempfile
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
|
Des = None
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
# first try with pycrypto
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import pycrypto_des
|
||||||
|
else:
|
||||||
|
import pycrypto_des
|
||||||
|
Des = pycrypto_des.load_pycrypto()
|
||||||
|
if Des == None:
|
||||||
|
# they try with openssl
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import openssl_des
|
||||||
|
else:
|
||||||
|
import openssl_des
|
||||||
|
Des = openssl_des.load_libcrypto()
|
||||||
|
else:
|
||||||
|
# first try with openssl
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import openssl_des
|
||||||
|
else:
|
||||||
|
import openssl_des
|
||||||
|
Des = openssl_des.load_libcrypto()
|
||||||
|
if Des == None:
|
||||||
|
# then try with pycrypto
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import pycrypto_des
|
||||||
|
else:
|
||||||
|
import pycrypto_des
|
||||||
|
Des = pycrypto_des.load_pycrypto()
|
||||||
|
|
||||||
|
# if that did not work then use pure python implementation
|
||||||
|
# of DES and try to speed it up with Psycho
|
||||||
|
if Des == None:
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import python_des
|
||||||
|
else:
|
||||||
|
import python_des
|
||||||
|
Des = python_des.Des
|
||||||
|
# Import Psyco if available
|
||||||
|
try:
|
||||||
|
# http://psyco.sourceforge.net
|
||||||
|
import psyco
|
||||||
|
psyco.full()
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# older Python release
|
# older Python release
|
||||||
import sha
|
import sha
|
||||||
sha1 = lambda s: sha.new(s)
|
sha1 = lambda s: sha.new(s)
|
||||||
|
|
||||||
import cgi
|
import cgi
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
#logging.basicConfig(level=logging.DEBUG)
|
#logging.basicConfig(level=logging.DEBUG)
|
||||||
|
|
||||||
|
|
||||||
class Sectionizer(object):
|
class Sectionizer(object):
|
||||||
|
bkType = "Book"
|
||||||
|
|
||||||
def __init__(self, filename, ident):
|
def __init__(self, filename, ident):
|
||||||
self.contents = file(filename, 'rb').read()
|
self.contents = file(filename, 'rb').read()
|
||||||
self.header = self.contents[0:72]
|
self.header = self.contents[0:72]
|
||||||
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
||||||
|
# Dictionary or normal content (TODO: Not hard-coded)
|
||||||
if self.header[0x3C:0x3C+8] != ident:
|
if self.header[0x3C:0x3C+8] != ident:
|
||||||
|
if self.header[0x3C:0x3C+8] == "PDctPPrs":
|
||||||
|
self.bkType = "Dict"
|
||||||
|
else:
|
||||||
raise ValueError('Invalid file format')
|
raise ValueError('Invalid file format')
|
||||||
self.sections = []
|
self.sections = []
|
||||||
for i in xrange(self.num_sections):
|
for i in xrange(self.num_sections):
|
||||||
@@ -148,15 +191,15 @@ def deXOR(text, sp, table):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
class EreaderProcessor(object):
|
class EreaderProcessor(object):
|
||||||
def __init__(self, section_reader, username, creditcard):
|
def __init__(self, sect, username, creditcard):
|
||||||
self.section_reader = section_reader
|
self.section_reader = sect.loadSection
|
||||||
data = section_reader(0)
|
data = self.section_reader(0)
|
||||||
version, = struct.unpack('>H', data[0:2])
|
version, = struct.unpack('>H', data[0:2])
|
||||||
self.version = version
|
self.version = version
|
||||||
logging.info('eReader file format version %s', version)
|
logging.info('eReader file format version %s', version)
|
||||||
if version != 272 and version != 260 and version != 259:
|
if version != 272 and version != 260 and version != 259:
|
||||||
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
||||||
data = section_reader(1)
|
data = self.section_reader(1)
|
||||||
self.data = data
|
self.data = data
|
||||||
des = Des(fixKey(data[0:8]))
|
des = Des(fixKey(data[0:8]))
|
||||||
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
||||||
@@ -185,9 +228,15 @@ class EreaderProcessor(object):
|
|||||||
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
||||||
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
||||||
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
||||||
|
# Default values
|
||||||
|
self.num_footnote_pages = 0
|
||||||
|
self.num_sidebar_pages = 0
|
||||||
|
self.first_footnote_page = -1
|
||||||
|
self.first_sidebar_page = -1
|
||||||
if self.version == 272:
|
if self.version == 272:
|
||||||
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
||||||
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
||||||
|
if (sect.bkType == "Book"):
|
||||||
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
||||||
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
||||||
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
||||||
@@ -205,10 +254,8 @@ class EreaderProcessor(object):
|
|||||||
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
||||||
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
||||||
else:
|
else:
|
||||||
self.num_footnote_pages = 0
|
# Nothing needs to be done
|
||||||
self.num_sidebar_pages = 0
|
pass
|
||||||
self.first_footnote_page = -1
|
|
||||||
self.first_sidebar_page = -1
|
|
||||||
# self.num_bookinfo_pages = 0
|
# self.num_bookinfo_pages = 0
|
||||||
# self.num_chapter_pages = 0
|
# self.num_chapter_pages = 0
|
||||||
# self.num_link_pages = 0
|
# self.num_link_pages = 0
|
||||||
@@ -233,10 +280,14 @@ class EreaderProcessor(object):
|
|||||||
encrypted_key_sha = r[44:44+20]
|
encrypted_key_sha = r[44:44+20]
|
||||||
encrypted_key = r[64:64+8]
|
encrypted_key = r[64:64+8]
|
||||||
elif version == 260:
|
elif version == 260:
|
||||||
if drm_sub_version != 13:
|
if drm_sub_version != 13 and drm_sub_version != 11:
|
||||||
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
||||||
|
if drm_sub_version == 13:
|
||||||
encrypted_key = r[44:44+8]
|
encrypted_key = r[44:44+8]
|
||||||
encrypted_key_sha = r[52:52+20]
|
encrypted_key_sha = r[52:52+20]
|
||||||
|
else:
|
||||||
|
encrypted_key = r[64:64+8]
|
||||||
|
encrypted_key_sha = r[44:44+20]
|
||||||
elif version == 272:
|
elif version == 272:
|
||||||
encrypted_key = r[172:172+8]
|
encrypted_key = r[172:172+8]
|
||||||
encrypted_key_sha = r[56:56+20]
|
encrypted_key_sha = r[56:56+20]
|
||||||
@@ -322,6 +373,12 @@ class EreaderProcessor(object):
|
|||||||
r += fmarker
|
r += fmarker
|
||||||
fnote_ids = fnote_ids[id_len+4:]
|
fnote_ids = fnote_ids[id_len+4:]
|
||||||
|
|
||||||
|
# TODO: Handle dictionary index (?) pages - which are also marked as
|
||||||
|
# sidebar_pages (?). For now dictionary sidebars are ignored
|
||||||
|
# For dictionaries - record 0 is null terminated strings, followed by
|
||||||
|
# blocks of around 62000 bytes and a final block. Not sure of the
|
||||||
|
# encoding
|
||||||
|
|
||||||
# now handle sidebar pages
|
# now handle sidebar pages
|
||||||
if self.num_sidebar_pages > 0:
|
if self.num_sidebar_pages > 0:
|
||||||
r += '\n'
|
r += '\n'
|
||||||
@@ -334,7 +391,7 @@ class EreaderProcessor(object):
|
|||||||
id_len = ord(sbar_ids[2])
|
id_len = ord(sbar_ids[2])
|
||||||
id = sbar_ids[3:3+id_len]
|
id = sbar_ids[3:3+id_len]
|
||||||
smarker = '<sidebar id="%s">\n' % id
|
smarker = '<sidebar id="%s">\n' % id
|
||||||
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_footnote_page + i)))
|
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_sidebar_page + i)))
|
||||||
smarker += '\n</sidebar>\n'
|
smarker += '\n</sidebar>\n'
|
||||||
r += smarker
|
r += smarker
|
||||||
sbar_ids = sbar_ids[id_len+4:]
|
sbar_ids = sbar_ids[id_len+4:]
|
||||||
@@ -352,10 +409,10 @@ def cleanPML(pml):
|
|||||||
def convertEreaderToPml(infile, name, cc, outdir):
|
def convertEreaderToPml(infile, name, cc, outdir):
|
||||||
if not os.path.exists(outdir):
|
if not os.path.exists(outdir):
|
||||||
os.makedirs(outdir)
|
os.makedirs(outdir)
|
||||||
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = Sectionizer(infile, 'PNRdPPrs')
|
sect = Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = EreaderProcessor(sect.loadSection, name, cc)
|
er = EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
@@ -378,62 +435,14 @@ def convertEreaderToPml(infile, name, cc, outdir):
|
|||||||
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
|
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
|
||||||
print "Converts DRMed eReader books to PML Source"
|
|
||||||
print "Usage:"
|
|
||||||
print " erdr2pml [options] infile.pdb [outdir] \"your name\" credit_card_number "
|
|
||||||
print " "
|
|
||||||
print "Options: "
|
|
||||||
print " -h prints this message"
|
|
||||||
print " --make-pmlz create PMLZ instead of using output directory"
|
|
||||||
print " "
|
|
||||||
print "Note:"
|
|
||||||
print " if ommitted, outdir defaults based on 'infile.pdb'"
|
|
||||||
print " It's enough to enter the last 8 digits of the credit card number"
|
|
||||||
return
|
|
||||||
|
|
||||||
def main(argv=None):
|
|
||||||
global bookname
|
|
||||||
try:
|
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "h", ["make-pmlz"])
|
|
||||||
except getopt.GetoptError, err:
|
|
||||||
print str(err)
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
make_pmlz = False
|
|
||||||
zipname = None
|
|
||||||
for o, a in opts:
|
|
||||||
if o == "-h":
|
|
||||||
usage()
|
|
||||||
return 0
|
|
||||||
elif o == "--make-pmlz":
|
|
||||||
make_pmlz = True
|
|
||||||
zipname = ''
|
|
||||||
|
|
||||||
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
|
|
||||||
|
|
||||||
if len(args)!=3 and len(args)!=4:
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
else:
|
|
||||||
if len(args)==3:
|
|
||||||
infile, name, cc = args[0], args[1], args[2]
|
|
||||||
outdir = infile[:-4] + '_Source'
|
|
||||||
elif len(args)==4:
|
|
||||||
infile, outdir, name, cc = args[0], args[1], args[2], args[3]
|
|
||||||
|
|
||||||
|
def decryptBook(infile, outdir, name, cc, make_pmlz):
|
||||||
if make_pmlz :
|
if make_pmlz :
|
||||||
# ignore specified outdir, use tempdir instead
|
# ignore specified outdir, use tempdir instead
|
||||||
outdir = tempfile.mkdtemp()
|
outdir = tempfile.mkdtemp()
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print "Processing..."
|
print "Processing..."
|
||||||
import time
|
|
||||||
start_time = time.time()
|
|
||||||
convertEreaderToPml(infile, name, cc, outdir)
|
convertEreaderToPml(infile, name, cc, outdir)
|
||||||
|
|
||||||
if make_pmlz :
|
if make_pmlz :
|
||||||
import zipfile
|
import zipfile
|
||||||
import shutil
|
import shutil
|
||||||
@@ -456,12 +465,7 @@ def main(argv=None):
|
|||||||
myZipFile.write(imagePath, localname)
|
myZipFile.write(imagePath, localname)
|
||||||
myZipFile.close()
|
myZipFile.close()
|
||||||
# remove temporary directory
|
# remove temporary directory
|
||||||
shutil.rmtree(outdir)
|
shutil.rmtree(outdir, True)
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
search_time = end_time - start_time
|
|
||||||
print 'elapsed time: %.2f seconds' % (search_time, )
|
|
||||||
if make_pmlz :
|
|
||||||
print 'output is %s' % zipname
|
print 'output is %s' % zipname
|
||||||
else :
|
else :
|
||||||
print 'output in %s' % outdir
|
print 'output in %s' % outdir
|
||||||
@@ -471,6 +475,53 @@ def main(argv=None):
|
|||||||
return 1
|
return 1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def usage():
|
||||||
|
print "Converts DRMed eReader books to PML Source"
|
||||||
|
print "Usage:"
|
||||||
|
print " erdr2pml [options] infile.pdb [outdir] \"your name\" credit_card_number "
|
||||||
|
print " "
|
||||||
|
print "Options: "
|
||||||
|
print " -h prints this message"
|
||||||
|
print " --make-pmlz create PMLZ instead of using output directory"
|
||||||
|
print " "
|
||||||
|
print "Note:"
|
||||||
|
print " if ommitted, outdir defaults based on 'infile.pdb'"
|
||||||
|
print " It's enough to enter the last 8 digits of the credit card number"
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=None):
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(sys.argv[1:], "h", ["make-pmlz"])
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
print str(err)
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
make_pmlz = False
|
||||||
|
for o, a in opts:
|
||||||
|
if o == "-h":
|
||||||
|
usage()
|
||||||
|
return 0
|
||||||
|
elif o == "--make-pmlz":
|
||||||
|
make_pmlz = True
|
||||||
|
|
||||||
|
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
|
||||||
|
|
||||||
|
if len(args)!=3 and len(args)!=4:
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if len(args)==3:
|
||||||
|
infile, name, cc = args[0], args[1], args[2]
|
||||||
|
outdir = infile[:-4] + '_Source'
|
||||||
|
elif len(args)==4:
|
||||||
|
infile, outdir, name, cc = args[0], args[1], args[2], args[3]
|
||||||
|
|
||||||
|
return decryptBook(infile, outdir, name, cc, make_pmlz)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
31
Calibre_Plugins/eReaderPDB2PML_plugin/pycrypto_des.py
Normal file
31
Calibre_Plugins/eReaderPDB2PML_plugin/pycrypto_des.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
|
||||||
|
def load_pycrypto():
|
||||||
|
try :
|
||||||
|
from Crypto.Cipher import DES as _DES
|
||||||
|
except:
|
||||||
|
return None
|
||||||
|
|
||||||
|
class DES(object):
|
||||||
|
def __init__(self, key):
|
||||||
|
if len(key) != 8 :
|
||||||
|
raise Error('DES improper key used')
|
||||||
|
self.key = key
|
||||||
|
self._des = _DES.new(key,_DES.MODE_ECB)
|
||||||
|
def desdecrypt(self, data):
|
||||||
|
return self._des.decrypt(data)
|
||||||
|
def decrypt(self, data):
|
||||||
|
if not data:
|
||||||
|
return ''
|
||||||
|
i = 0
|
||||||
|
result = []
|
||||||
|
while i < len(data):
|
||||||
|
block = data[i:i+8]
|
||||||
|
processed_block = self.desdecrypt(block)
|
||||||
|
result.append(processed_block)
|
||||||
|
i += 8
|
||||||
|
return ''.join(result)
|
||||||
|
return DES
|
||||||
|
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
ECB = 0
|
ECB = 0
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -4,7 +4,7 @@
|
|||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
# later. <http://www.gnu.org/licenses/>
|
# later. <http://www.gnu.org/licenses/>
|
||||||
#
|
#
|
||||||
# Requires Calibre version 0.6.44 or higher.
|
# Requires Calibre version 0.7.55 or higher.
|
||||||
#
|
#
|
||||||
# All credit given to I <3 Cabbages for the original standalone scripts.
|
# All credit given to I <3 Cabbages for the original standalone scripts.
|
||||||
# I had the much easier job of converting them to Calibre a plugin.
|
# I had the much easier job of converting them to Calibre a plugin.
|
||||||
@@ -44,7 +44,11 @@
|
|||||||
# 0.1.0 - Initial release
|
# 0.1.0 - Initial release
|
||||||
# 0.1.1 - Allow Windows users to make use of openssl if they have it installed.
|
# 0.1.1 - Allow Windows users to make use of openssl if they have it installed.
|
||||||
# - Incorporated SomeUpdates zipfix routine.
|
# - Incorporated SomeUpdates zipfix routine.
|
||||||
|
# 0.1.2 - bug fix for non-ascii file names in encryption.xml
|
||||||
|
# 0.1.3 - Try PyCrypto on Windows first
|
||||||
|
# 0.1.4 - update zipfix to deal with mimetype not in correct place
|
||||||
|
# 0.1.5 - update zipfix to deal with completely missing mimetype files
|
||||||
|
# 0.1.6 - update ot the new calibre plugin interface
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypt Barnes & Noble ADEPT encrypted EPUB books.
|
Decrypt Barnes & Noble ADEPT encrypted EPUB books.
|
||||||
@@ -169,7 +173,10 @@ def _load_crypto_pycrypto():
|
|||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
_aes = _aes2 = None
|
_aes = _aes2 = None
|
||||||
for loader in (_load_crypto_libcrypto, _load_crypto_pycrypto):
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
try:
|
try:
|
||||||
_aes, _aes2 = loader()
|
_aes, _aes2 = loader()
|
||||||
break
|
break
|
||||||
@@ -209,6 +216,7 @@ class Decryptor(object):
|
|||||||
enc('CipherReference'))
|
enc('CipherReference'))
|
||||||
for elem in encryption.findall(expr):
|
for elem in encryption.findall(expr):
|
||||||
path = elem.get('URI', None)
|
path = elem.get('URI', None)
|
||||||
|
path = path.encode('utf-8')
|
||||||
if path is not None:
|
if path is not None:
|
||||||
encrypted.add(path)
|
encrypted.add(path)
|
||||||
|
|
||||||
@@ -259,6 +267,7 @@ def plugin_main(userkey, inpath, outpath):
|
|||||||
return 0
|
return 0
|
||||||
|
|
||||||
from calibre.customize import FileTypePlugin
|
from calibre.customize import FileTypePlugin
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
|
||||||
class IgnobleDeDRM(FileTypePlugin):
|
class IgnobleDeDRM(FileTypePlugin):
|
||||||
name = 'Ignoble Epub DeDRM'
|
name = 'Ignoble Epub DeDRM'
|
||||||
@@ -266,8 +275,8 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
||||||
supported_platforms = ['linux', 'osx', 'windows']
|
supported_platforms = ['linux', 'osx', 'windows']
|
||||||
author = 'DiapDealer'
|
author = 'DiapDealer'
|
||||||
version = (0, 1, 1)
|
version = (0, 1, 6)
|
||||||
minimum_calibre_version = (0, 6, 44) # Compiled python libraries cannot be imported in earlier versions.
|
minimum_calibre_version = (0, 7, 55) # Compiled python libraries cannot be imported in earlier versions.
|
||||||
file_types = set(['epub'])
|
file_types = set(['epub'])
|
||||||
on_import = True
|
on_import = True
|
||||||
|
|
||||||
@@ -275,20 +284,10 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
global AES
|
global AES
|
||||||
global AES2
|
global AES2
|
||||||
|
|
||||||
from calibre.gui2 import is_ok_to_use_qt
|
|
||||||
from PyQt4.Qt import QMessageBox
|
|
||||||
from calibre.constants import iswindows, isosx
|
|
||||||
|
|
||||||
# Add the included pycrypto import directory for Windows users.
|
|
||||||
pdir = 'windows' if iswindows else 'osx' if isosx else 'linux'
|
|
||||||
ppath = os.path.join(self.sys_insertion_path, pdir)
|
|
||||||
sys.path.append(ppath)
|
|
||||||
|
|
||||||
AES, AES2 = _load_crypto()
|
AES, AES2 = _load_crypto()
|
||||||
|
|
||||||
if AES == None or AES2 == None:
|
if AES == None or AES2 == None:
|
||||||
# Failed to load libcrypto or PyCrypto... Adobe Epubs can't be decrypted.'
|
# Failed to load libcrypto or PyCrypto... Adobe Epubs can't be decrypted.'
|
||||||
sys.path.remove(ppath)
|
|
||||||
raise IGNOBLEError('IgnobleEpub - Failed to load crypto libs.')
|
raise IGNOBLEError('IgnobleEpub - Failed to load crypto libs.')
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -317,7 +316,6 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
# Get name and credit card number from Plugin Customization
|
# Get name and credit card number from Plugin Customization
|
||||||
if not userkeys and not self.site_customization:
|
if not userkeys and not self.site_customization:
|
||||||
# Plugin hasn't been configured... do nothing.
|
# Plugin hasn't been configured... do nothing.
|
||||||
sys.path.remove(ppath)
|
|
||||||
raise IGNOBLEError('IgnobleEpub - No keys found. Plugin not configured.')
|
raise IGNOBLEError('IgnobleEpub - No keys found. Plugin not configured.')
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -330,7 +328,6 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
name, ccn = i.split(',')
|
name, ccn = i.split(',')
|
||||||
keycount += 1
|
keycount += 1
|
||||||
except ValueError:
|
except ValueError:
|
||||||
sys.path.remove(ppath)
|
|
||||||
raise IGNOBLEError('IgnobleEpub - Error parsing user supplied data.')
|
raise IGNOBLEError('IgnobleEpub - Error parsing user supplied data.')
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -342,7 +339,7 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
for userkey in userkeys:
|
for userkey in userkeys:
|
||||||
# Create a TemporaryPersistent file to work with.
|
# Create a TemporaryPersistent file to work with.
|
||||||
# Check original epub archive for zip errors.
|
# Check original epub archive for zip errors.
|
||||||
import zipfix
|
from calibre_plugins.ignobleepub import zipfix
|
||||||
inf = self.temporary_file('.epub')
|
inf = self.temporary_file('.epub')
|
||||||
try:
|
try:
|
||||||
fr = zipfix.fixZip(path_to_ebook, inf.name)
|
fr = zipfix.fixZip(path_to_ebook, inf.name)
|
||||||
@@ -360,7 +357,6 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
if result == 1:
|
if result == 1:
|
||||||
print 'IgnobleEpub: Not a B&N Adept Epub... punting.'
|
print 'IgnobleEpub: Not a B&N Adept Epub... punting.'
|
||||||
of.close()
|
of.close()
|
||||||
sys.path.remove(ppath)
|
|
||||||
return path_to_ebook
|
return path_to_ebook
|
||||||
break
|
break
|
||||||
|
|
||||||
@@ -369,7 +365,6 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
if result == 0:
|
if result == 0:
|
||||||
print 'IgnobleEpub: Encryption successfully removed.'
|
print 'IgnobleEpub: Encryption successfully removed.'
|
||||||
of.close()
|
of.close()
|
||||||
sys.path.remove(ppath)
|
|
||||||
return of.name
|
return of.name
|
||||||
break
|
break
|
||||||
|
|
||||||
@@ -379,7 +374,6 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
# Something went wrong with decryption.
|
# Something went wrong with decryption.
|
||||||
# Import the original unmolested epub.
|
# Import the original unmolested epub.
|
||||||
of.close
|
of.close
|
||||||
sys.path.remove(ppath)
|
|
||||||
raise IGNOBLEError('IgnobleEpub - Ultimately failed to decrypt.')
|
raise IGNOBLEError('IgnobleEpub - Ultimately failed to decrypt.')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
@@ -1,51 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Secret-key encryption algorithms.
|
|
||||||
|
|
||||||
Secret-key encryption algorithms transform plaintext in some way that
|
|
||||||
is dependent on a key, producing ciphertext. This transformation can
|
|
||||||
easily be reversed, if (and, hopefully, only if) one knows the key.
|
|
||||||
|
|
||||||
The encryption modules here all support the interface described in PEP
|
|
||||||
272, "API for Block Encryption Algorithms".
|
|
||||||
|
|
||||||
If you don't know which algorithm to choose, use AES because it's
|
|
||||||
standard and has undergone a fair bit of examination.
|
|
||||||
|
|
||||||
Crypto.Cipher.AES Advanced Encryption Standard
|
|
||||||
Crypto.Cipher.ARC2 Alleged RC2
|
|
||||||
Crypto.Cipher.ARC4 Alleged RC4
|
|
||||||
Crypto.Cipher.Blowfish
|
|
||||||
Crypto.Cipher.CAST
|
|
||||||
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
|
|
||||||
in the past, but today its 56-bit keys are too small.
|
|
||||||
Crypto.Cipher.DES3 Triple DES.
|
|
||||||
Crypto.Cipher.XOR The simple XOR cipher.
|
|
||||||
"""
|
|
||||||
|
|
||||||
__all__ = ['AES', 'ARC2', 'ARC4',
|
|
||||||
'Blowfish', 'CAST', 'DES', 'DES3',
|
|
||||||
'XOR'
|
|
||||||
]
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Python Cryptography Toolkit
|
|
||||||
|
|
||||||
A collection of cryptographic modules implementing various algorithms
|
|
||||||
and protocols.
|
|
||||||
|
|
||||||
Subpackages:
|
|
||||||
Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4)
|
|
||||||
Crypto.Hash Hashing algorithms (MD5, SHA, HMAC)
|
|
||||||
Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing
|
|
||||||
transform). This package does not contain any
|
|
||||||
network protocols.
|
|
||||||
Crypto.PublicKey Public-key encryption and signature algorithms
|
|
||||||
(RSA, DSA)
|
|
||||||
Crypto.Util Various useful modules and functions (long-to-string
|
|
||||||
conversion, random number generation, number
|
|
||||||
theoretic functions)
|
|
||||||
"""
|
|
||||||
|
|
||||||
__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util']
|
|
||||||
|
|
||||||
__version__ = '2.3' # See also below and setup.py
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
# New software should look at this instead of at __version__ above.
|
|
||||||
version_info = (2, 1, 0, 'final', 0) # See also above and setup.py
|
|
||||||
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
# -*- coding: ascii -*-
|
|
||||||
#
|
|
||||||
# pct_warnings.py : PyCrypto warnings file
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
#
|
|
||||||
# Base classes. All our warnings inherit from one of these in order to allow
|
|
||||||
# the user to specifically filter them.
|
|
||||||
#
|
|
||||||
|
|
||||||
class CryptoWarning(Warning):
|
|
||||||
"""Base class for PyCrypto warnings"""
|
|
||||||
|
|
||||||
class CryptoDeprecationWarning(DeprecationWarning, CryptoWarning):
|
|
||||||
"""Base PyCrypto DeprecationWarning class"""
|
|
||||||
|
|
||||||
class CryptoRuntimeWarning(RuntimeWarning, CryptoWarning):
|
|
||||||
"""Base PyCrypto RuntimeWarning class"""
|
|
||||||
|
|
||||||
#
|
|
||||||
# Warnings that we might actually use
|
|
||||||
#
|
|
||||||
|
|
||||||
class RandomPool_DeprecationWarning(CryptoDeprecationWarning):
|
|
||||||
"""Issued when Crypto.Util.randpool.RandomPool is instantiated."""
|
|
||||||
|
|
||||||
class ClockRewindWarning(CryptoRuntimeWarning):
|
|
||||||
"""Warning for when the system clock moves backwards."""
|
|
||||||
|
|
||||||
class GetRandomNumber_DeprecationWarning(CryptoDeprecationWarning):
|
|
||||||
"""Issued when Crypto.Util.number.getRandomNumber is invoked."""
|
|
||||||
|
|
||||||
# By default, we want this warning to be shown every time we compensate for
|
|
||||||
# clock rewinding.
|
|
||||||
import warnings as _warnings
|
|
||||||
_warnings.filterwarnings('always', category=ClockRewindWarning, append=1)
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -13,9 +13,20 @@ _FILENAME_LEN_OFFSET = 26
|
|||||||
_EXTRA_LEN_OFFSET = 28
|
_EXTRA_LEN_OFFSET = 28
|
||||||
_FILENAME_OFFSET = 30
|
_FILENAME_OFFSET = 30
|
||||||
_MAX_SIZE = 64 * 1024
|
_MAX_SIZE = 64 * 1024
|
||||||
|
_MIMETYPE = 'application/epub+zip'
|
||||||
|
|
||||||
|
class ZipInfo(zipfile.ZipInfo):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if 'compress_type' in kwargs:
|
||||||
|
compress_type = kwargs.pop('compress_type')
|
||||||
|
super(ZipInfo, self).__init__(*args, **kwargs)
|
||||||
|
self.compress_type = compress_type
|
||||||
|
|
||||||
class fixZip:
|
class fixZip:
|
||||||
def __init__(self, zinput, zoutput):
|
def __init__(self, zinput, zoutput):
|
||||||
|
self.ztype = 'zip'
|
||||||
|
if zinput.lower().find('.epub') >= 0 :
|
||||||
|
self.ztype = 'epub'
|
||||||
self.inzip = zipfile.ZipFile(zinput,'r')
|
self.inzip = zipfile.ZipFile(zinput,'r')
|
||||||
self.outzip = zipfile.ZipFile(zoutput,'w')
|
self.outzip = zipfile.ZipFile(zoutput,'w')
|
||||||
# open the input zip for reading only as a raw file
|
# open the input zip for reading only as a raw file
|
||||||
@@ -82,12 +93,18 @@ class fixZip:
|
|||||||
# and copy member over to output archive
|
# and copy member over to output archive
|
||||||
# if problems exist with local vs central filename, fix them
|
# if problems exist with local vs central filename, fix them
|
||||||
|
|
||||||
for i, zinfo in enumerate(self.inzip.infolist()):
|
# if epub write mimetype file first, with no compression
|
||||||
|
if self.ztype == 'epub':
|
||||||
|
nzinfo = ZipInfo('mimetype', compress_type=zipfile.ZIP_STORED)
|
||||||
|
self.outzip.writestr(nzinfo, _MIMETYPE)
|
||||||
|
|
||||||
|
# write the rest of the files
|
||||||
|
for zinfo in self.inzip.infolist():
|
||||||
|
if zinfo.filename != "mimetype" or self.ztype == '.zip':
|
||||||
data = None
|
data = None
|
||||||
nzinfo = zinfo
|
nzinfo = zinfo
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = self.inzip.read(zinfo)
|
data = self.inzip.read(zinfo.filename)
|
||||||
except zipfile.BadZipfile or zipfile.error:
|
except zipfile.BadZipfile or zipfile.error:
|
||||||
local_name = self.getlocalname(zinfo)
|
local_name = self.getlocalname(zinfo)
|
||||||
data = self.getfiledata(zinfo)
|
data = self.getfiledata(zinfo)
|
||||||
@@ -111,14 +128,7 @@ def usage():
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def repairBook(infile, outfile):
|
||||||
if len(argv)!=3:
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
infile = None
|
|
||||||
outfile = None
|
|
||||||
infile = argv[1]
|
|
||||||
outfile = argv[2]
|
|
||||||
if not os.path.exists(infile):
|
if not os.path.exists(infile):
|
||||||
print "Error: Input Zip File does not exist"
|
print "Error: Input Zip File does not exist"
|
||||||
return 1
|
return 1
|
||||||
@@ -130,6 +140,16 @@ def main(argv=sys.argv):
|
|||||||
print "Error Occurred ", e
|
print "Error Occurred ", e
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
if len(argv)!=3:
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
infile = argv[1]
|
||||||
|
outfile = argv[2]
|
||||||
|
return repairBook(infile, outfile)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__' :
|
if __name__ == '__main__' :
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
@@ -4,7 +4,7 @@
|
|||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
# later. <http://www.gnu.org/licenses/>
|
# later. <http://www.gnu.org/licenses/>
|
||||||
#
|
#
|
||||||
# Requires Calibre version 0.6.44 or higher.
|
# Requires Calibre version 0.7.55 or higher.
|
||||||
#
|
#
|
||||||
# All credit given to I <3 Cabbages for the original standalone scripts.
|
# All credit given to I <3 Cabbages for the original standalone scripts.
|
||||||
# I had the much easier job of converting them to a Calibre plugin.
|
# I had the much easier job of converting them to a Calibre plugin.
|
||||||
@@ -43,7 +43,13 @@
|
|||||||
# 0.1 - Initial release
|
# 0.1 - Initial release
|
||||||
# 0.1.1 - Allow Windows users to make use of openssl if they have it installed.
|
# 0.1.1 - Allow Windows users to make use of openssl if they have it installed.
|
||||||
# - Incorporated SomeUpdates zipfix routine.
|
# - Incorporated SomeUpdates zipfix routine.
|
||||||
|
# 0.1.2 - Removed Carbon dependency for Mac users. Fixes an issue that was a
|
||||||
|
# result of Calibre changing to python 2.7.
|
||||||
|
# 0.1.3 - bug fix for epubs with non-ascii chars in file names
|
||||||
|
# 0.1.4 - default to try PyCrypto first on Windows
|
||||||
|
# 0.1.5 - update zipfix to handle out of position mimetypes
|
||||||
|
# 0.1.6 - update zipfix to handle completely missing mimetype files
|
||||||
|
# 0.1.7 - update to new calibre plugin interface
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypt Adobe ADEPT-encrypted EPUB books.
|
Decrypt Adobe ADEPT-encrypted EPUB books.
|
||||||
@@ -281,7 +287,10 @@ def _load_crypto_pycrypto():
|
|||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
_aes = _rsa = None
|
_aes = _rsa = None
|
||||||
for loader in (_load_crypto_libcrypto, _load_crypto_pycrypto):
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
try:
|
try:
|
||||||
_aes, _rsa = loader()
|
_aes, _rsa = loader()
|
||||||
break
|
break
|
||||||
@@ -306,6 +315,7 @@ class Decryptor(object):
|
|||||||
enc('CipherReference'))
|
enc('CipherReference'))
|
||||||
for elem in encryption.findall(expr):
|
for elem in encryption.findall(expr):
|
||||||
path = elem.get('URI', None)
|
path = elem.get('URI', None)
|
||||||
|
path = path.encode('utf-8')
|
||||||
if path is not None:
|
if path is not None:
|
||||||
encrypted.add(path)
|
encrypted.add(path)
|
||||||
|
|
||||||
@@ -356,6 +366,7 @@ def plugin_main(userkey, inpath, outpath):
|
|||||||
return 0
|
return 0
|
||||||
|
|
||||||
from calibre.customize import FileTypePlugin
|
from calibre.customize import FileTypePlugin
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
|
||||||
class IneptDeDRM(FileTypePlugin):
|
class IneptDeDRM(FileTypePlugin):
|
||||||
name = 'Inept Epub DeDRM'
|
name = 'Inept Epub DeDRM'
|
||||||
@@ -363,8 +374,8 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
||||||
supported_platforms = ['linux', 'osx', 'windows']
|
supported_platforms = ['linux', 'osx', 'windows']
|
||||||
author = 'DiapDealer'
|
author = 'DiapDealer'
|
||||||
version = (0, 1, 1)
|
version = (0, 1, 7)
|
||||||
minimum_calibre_version = (0, 6, 44) # Compiled python libraries cannot be imported in earlier versions.
|
minimum_calibre_version = (0, 7, 55) # Compiled python libraries cannot be imported in earlier versions.
|
||||||
file_types = set(['epub'])
|
file_types = set(['epub'])
|
||||||
on_import = True
|
on_import = True
|
||||||
priority = 100
|
priority = 100
|
||||||
@@ -373,21 +384,10 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
global AES
|
global AES
|
||||||
global RSA
|
global RSA
|
||||||
|
|
||||||
from calibre.gui2 import is_ok_to_use_qt
|
|
||||||
from PyQt4.Qt import QMessageBox
|
|
||||||
from calibre.constants import iswindows, isosx
|
|
||||||
|
|
||||||
# Add the included pycrypto import directory for Windows users.
|
|
||||||
# Add the included Carbon import directory for Mac users.
|
|
||||||
pdir = 'windows' if iswindows else 'osx' if isosx else 'linux'
|
|
||||||
ppath = os.path.join(self.sys_insertion_path, pdir)
|
|
||||||
sys.path.append(ppath)
|
|
||||||
|
|
||||||
AES, RSA = _load_crypto()
|
AES, RSA = _load_crypto()
|
||||||
|
|
||||||
if AES == None or RSA == None:
|
if AES == None or RSA == None:
|
||||||
# Failed to load libcrypto or PyCrypto... Adobe Epubs can\'t be decrypted.'
|
# Failed to load libcrypto or PyCrypto... Adobe Epubs can\'t be decrypted.'
|
||||||
sys.path.remove(ppath)
|
|
||||||
raise ADEPTError('IneptEpub: Failed to load crypto libs... Adobe Epubs can\'t be decrypted.')
|
raise ADEPTError('IneptEpub: Failed to load crypto libs... Adobe Epubs can\'t be decrypted.')
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -416,11 +416,11 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
# Calibre's configuration directory for future use.
|
# Calibre's configuration directory for future use.
|
||||||
if iswindows or isosx:
|
if iswindows or isosx:
|
||||||
# ADE key retrieval script included in respective OS folder.
|
# ADE key retrieval script included in respective OS folder.
|
||||||
from ade_key import retrieve_key
|
from calibre_plugins.ineptepub.ade_key import retrieve_key
|
||||||
try:
|
try:
|
||||||
keydata = retrieve_key()
|
keydata = retrieve_key()
|
||||||
userkeys.append(keydata)
|
userkeys.append(keydata)
|
||||||
keypath = os.path.join(confpath, 'adeptkey.der')
|
keypath = os.path.join(confpath, 'calibre-adeptkey.der')
|
||||||
with open(keypath, 'wb') as f:
|
with open(keypath, 'wb') as f:
|
||||||
f.write(keydata)
|
f.write(keydata)
|
||||||
print 'IneptEpub: Created keyfile from ADE install.'
|
print 'IneptEpub: Created keyfile from ADE install.'
|
||||||
@@ -430,7 +430,6 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
|
|
||||||
if not userkeys:
|
if not userkeys:
|
||||||
# No user keys found... bail out.
|
# No user keys found... bail out.
|
||||||
sys.path.remove(ppath)
|
|
||||||
raise ADEPTError('IneptEpub - No keys found. Check keyfile(s)/ADE install')
|
raise ADEPTError('IneptEpub - No keys found. Check keyfile(s)/ADE install')
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -438,7 +437,7 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
for userkey in userkeys:
|
for userkey in userkeys:
|
||||||
# Create a TemporaryPersistent file to work with.
|
# Create a TemporaryPersistent file to work with.
|
||||||
# Check original epub archive for zip errors.
|
# Check original epub archive for zip errors.
|
||||||
import zipfix
|
from calibre_plugins.ineptepub import zipfix
|
||||||
inf = self.temporary_file('.epub')
|
inf = self.temporary_file('.epub')
|
||||||
try:
|
try:
|
||||||
fr = zipfix.fixZip(path_to_ebook, inf.name)
|
fr = zipfix.fixZip(path_to_ebook, inf.name)
|
||||||
@@ -456,7 +455,6 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
if result == 1:
|
if result == 1:
|
||||||
print 'IneptEpub: Not an Adobe Adept Epub... punting.'
|
print 'IneptEpub: Not an Adobe Adept Epub... punting.'
|
||||||
of.close()
|
of.close()
|
||||||
sys.path.remove(ppath)
|
|
||||||
return path_to_ebook
|
return path_to_ebook
|
||||||
break
|
break
|
||||||
|
|
||||||
@@ -465,7 +463,6 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
if result == 0:
|
if result == 0:
|
||||||
print 'IneptEpub: Encryption successfully removed.'
|
print 'IneptEpub: Encryption successfully removed.'
|
||||||
of.close
|
of.close
|
||||||
sys.path.remove(ppath)
|
|
||||||
return of.name
|
return of.name
|
||||||
break
|
break
|
||||||
|
|
||||||
@@ -475,7 +472,6 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
# Something went wrong with decryption.
|
# Something went wrong with decryption.
|
||||||
# Import the original unmolested epub.
|
# Import the original unmolested epub.
|
||||||
of.close
|
of.close
|
||||||
sys.path.remove(ppath)
|
|
||||||
raise ADEPTError('IneptEpub - Ultimately failed to decrypt')
|
raise ADEPTError('IneptEpub - Ultimately failed to decrypt')
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -79,7 +79,7 @@ if iswindows:
|
|||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
AES = None
|
AES = None
|
||||||
for loader in (_load_crypto_libcrypto, _load_crypto_pycrypto):
|
for loader in (_load_crypto_pycrypto, _load_crypto_libcrypto):
|
||||||
try:
|
try:
|
||||||
AES = loader()
|
AES = loader()
|
||||||
break
|
break
|
||||||
@@ -310,35 +310,31 @@ if iswindows:
|
|||||||
else:
|
else:
|
||||||
|
|
||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
import Carbon.File
|
import subprocess
|
||||||
import Carbon.Folder
|
|
||||||
import Carbon.Folders
|
|
||||||
import MacOS
|
|
||||||
|
|
||||||
ACTIVATION_PATH = 'Adobe/Digital Editions/activation.dat'
|
|
||||||
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
||||||
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
||||||
|
|
||||||
def find_folder(domain, dtype):
|
def findActivationDat():
|
||||||
try:
|
home = os.getenv('HOME')
|
||||||
fsref = Carbon.Folder.FSFindFolder(domain, dtype, False)
|
cmdline = 'find "' + home + '/Library/Application Support/Adobe/Digital Editions" -name "activation.dat"'
|
||||||
return Carbon.File.pathname(fsref)
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
except MacOS.Error:
|
p2 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
return None
|
out1, out2 = p2.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
def find_app_support_file(subpath):
|
cnt = len(reslst)
|
||||||
dtype = Carbon.Folders.kApplicationSupportFolderType
|
for j in xrange(cnt):
|
||||||
for domain in Carbon.Folders.kUserDomain, Carbon.Folders.kLocalDomain:
|
resline = reslst[j]
|
||||||
path = find_folder(domain, dtype)
|
pp = resline.find('activation.dat')
|
||||||
if path is None:
|
if pp >= 0:
|
||||||
continue
|
ActDatPath = resline
|
||||||
path = os.path.join(path, subpath)
|
break
|
||||||
if os.path.isfile(path):
|
if os.path.exists(ActDatPath):
|
||||||
return path
|
return ActDatPath
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def retrieve_key():
|
def retrieve_key():
|
||||||
actpath = find_app_support_file(ACTIVATION_PATH)
|
actpath = findActivationDat()
|
||||||
if actpath is None:
|
if actpath is None:
|
||||||
raise ADEPTError("Could not locate ADE activation")
|
raise ADEPTError("Could not locate ADE activation")
|
||||||
tree = etree.parse(actpath)
|
tree = etree.parse(actpath)
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,51 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Secret-key encryption algorithms.
|
|
||||||
|
|
||||||
Secret-key encryption algorithms transform plaintext in some way that
|
|
||||||
is dependent on a key, producing ciphertext. This transformation can
|
|
||||||
easily be reversed, if (and, hopefully, only if) one knows the key.
|
|
||||||
|
|
||||||
The encryption modules here all support the interface described in PEP
|
|
||||||
272, "API for Block Encryption Algorithms".
|
|
||||||
|
|
||||||
If you don't know which algorithm to choose, use AES because it's
|
|
||||||
standard and has undergone a fair bit of examination.
|
|
||||||
|
|
||||||
Crypto.Cipher.AES Advanced Encryption Standard
|
|
||||||
Crypto.Cipher.ARC2 Alleged RC2
|
|
||||||
Crypto.Cipher.ARC4 Alleged RC4
|
|
||||||
Crypto.Cipher.Blowfish
|
|
||||||
Crypto.Cipher.CAST
|
|
||||||
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
|
|
||||||
in the past, but today its 56-bit keys are too small.
|
|
||||||
Crypto.Cipher.DES3 Triple DES.
|
|
||||||
Crypto.Cipher.XOR The simple XOR cipher.
|
|
||||||
"""
|
|
||||||
|
|
||||||
__all__ = ['AES', 'ARC2', 'ARC4',
|
|
||||||
'Blowfish', 'CAST', 'DES', 'DES3',
|
|
||||||
'XOR'
|
|
||||||
]
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
|
|
||||||
Binary file not shown.
@@ -1,44 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Hashing algorithms
|
|
||||||
|
|
||||||
Hash functions take arbitrary strings as input, and produce an output
|
|
||||||
of fixed size that is dependent on the input; it should never be
|
|
||||||
possible to derive the input data given only the hash function's
|
|
||||||
output. Hash functions can be used simply as a checksum, or, in
|
|
||||||
association with a public-key algorithm, can be used to implement
|
|
||||||
digital signatures.
|
|
||||||
|
|
||||||
The hashing modules here all support the interface described in PEP
|
|
||||||
247, "API for Cryptographic Hash Functions".
|
|
||||||
|
|
||||||
Submodules:
|
|
||||||
Crypto.Hash.HMAC RFC 2104: Keyed-Hashing for Message Authentication
|
|
||||||
Crypto.Hash.MD2
|
|
||||||
Crypto.Hash.MD4
|
|
||||||
Crypto.Hash.MD5
|
|
||||||
Crypto.Hash.RIPEMD160
|
|
||||||
Crypto.Hash.SHA
|
|
||||||
"""
|
|
||||||
|
|
||||||
__all__ = ['HMAC', 'MD2', 'MD4', 'MD5', 'RIPEMD', 'RIPEMD160', 'SHA', 'SHA256']
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
@@ -1,184 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# PublicKey/RSA.py : RSA public key primitive
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""RSA public-key cryptography algorithm."""
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
__all__ = ['generate', 'construct', 'error']
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
from Crypto.PublicKey import _RSA, _slowmath, pubkey
|
|
||||||
from Crypto import Random
|
|
||||||
|
|
||||||
try:
|
|
||||||
from Crypto.PublicKey import _fastmath
|
|
||||||
except ImportError:
|
|
||||||
_fastmath = None
|
|
||||||
|
|
||||||
class _RSAobj(pubkey.pubkey):
|
|
||||||
keydata = ['n', 'e', 'd', 'p', 'q', 'u']
|
|
||||||
|
|
||||||
def __init__(self, implementation, key):
|
|
||||||
self.implementation = implementation
|
|
||||||
self.key = key
|
|
||||||
|
|
||||||
def __getattr__(self, attrname):
|
|
||||||
if attrname in self.keydata:
|
|
||||||
# For backward compatibility, allow the user to get (not set) the
|
|
||||||
# RSA key parameters directly from this object.
|
|
||||||
return getattr(self.key, attrname)
|
|
||||||
else:
|
|
||||||
raise AttributeError("%s object has no %r attribute" % (self.__class__.__name__, attrname,))
|
|
||||||
|
|
||||||
def _encrypt(self, c, K):
|
|
||||||
return (self.key._encrypt(c),)
|
|
||||||
|
|
||||||
def _decrypt(self, c):
|
|
||||||
#(ciphertext,) = c
|
|
||||||
(ciphertext,) = c[:1] # HACK - We should use the previous line
|
|
||||||
# instead, but this is more compatible and we're
|
|
||||||
# going to replace the Crypto.PublicKey API soon
|
|
||||||
# anyway.
|
|
||||||
return self.key._decrypt(ciphertext)
|
|
||||||
|
|
||||||
def _blind(self, m, r):
|
|
||||||
return self.key._blind(m, r)
|
|
||||||
|
|
||||||
def _unblind(self, m, r):
|
|
||||||
return self.key._unblind(m, r)
|
|
||||||
|
|
||||||
def _sign(self, m, K=None):
|
|
||||||
return (self.key._sign(m),)
|
|
||||||
|
|
||||||
def _verify(self, m, sig):
|
|
||||||
#(s,) = sig
|
|
||||||
(s,) = sig[:1] # HACK - We should use the previous line instead, but
|
|
||||||
# this is more compatible and we're going to replace
|
|
||||||
# the Crypto.PublicKey API soon anyway.
|
|
||||||
return self.key._verify(m, s)
|
|
||||||
|
|
||||||
def has_private(self):
|
|
||||||
return self.key.has_private()
|
|
||||||
|
|
||||||
def size(self):
|
|
||||||
return self.key.size()
|
|
||||||
|
|
||||||
def can_blind(self):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def can_encrypt(self):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def can_sign(self):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def publickey(self):
|
|
||||||
return self.implementation.construct((self.key.n, self.key.e))
|
|
||||||
|
|
||||||
def __getstate__(self):
|
|
||||||
d = {}
|
|
||||||
for k in self.keydata:
|
|
||||||
try:
|
|
||||||
d[k] = getattr(self.key, k)
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
return d
|
|
||||||
|
|
||||||
def __setstate__(self, d):
|
|
||||||
if not hasattr(self, 'implementation'):
|
|
||||||
self.implementation = RSAImplementation()
|
|
||||||
t = []
|
|
||||||
for k in self.keydata:
|
|
||||||
if not d.has_key(k):
|
|
||||||
break
|
|
||||||
t.append(d[k])
|
|
||||||
self.key = self.implementation._math.rsa_construct(*tuple(t))
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
attrs = []
|
|
||||||
for k in self.keydata:
|
|
||||||
if k == 'n':
|
|
||||||
attrs.append("n(%d)" % (self.size()+1,))
|
|
||||||
elif hasattr(self.key, k):
|
|
||||||
attrs.append(k)
|
|
||||||
if self.has_private():
|
|
||||||
attrs.append("private")
|
|
||||||
return "<%s @0x%x %s>" % (self.__class__.__name__, id(self), ",".join(attrs))
|
|
||||||
|
|
||||||
class RSAImplementation(object):
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
# 'use_fast_math' parameter:
|
|
||||||
# None (default) - Use fast math if available; Use slow math if not.
|
|
||||||
# True - Use fast math, and raise RuntimeError if it's not available.
|
|
||||||
# False - Use slow math.
|
|
||||||
use_fast_math = kwargs.get('use_fast_math', None)
|
|
||||||
if use_fast_math is None: # Automatic
|
|
||||||
if _fastmath is not None:
|
|
||||||
self._math = _fastmath
|
|
||||||
else:
|
|
||||||
self._math = _slowmath
|
|
||||||
|
|
||||||
elif use_fast_math: # Explicitly select fast math
|
|
||||||
if _fastmath is not None:
|
|
||||||
self._math = _fastmath
|
|
||||||
else:
|
|
||||||
raise RuntimeError("fast math module not available")
|
|
||||||
|
|
||||||
else: # Explicitly select slow math
|
|
||||||
self._math = _slowmath
|
|
||||||
|
|
||||||
self.error = self._math.error
|
|
||||||
|
|
||||||
# 'default_randfunc' parameter:
|
|
||||||
# None (default) - use Random.new().read
|
|
||||||
# not None - use the specified function
|
|
||||||
self._default_randfunc = kwargs.get('default_randfunc', None)
|
|
||||||
self._current_randfunc = None
|
|
||||||
|
|
||||||
def _get_randfunc(self, randfunc):
|
|
||||||
if randfunc is not None:
|
|
||||||
return randfunc
|
|
||||||
elif self._current_randfunc is None:
|
|
||||||
self._current_randfunc = Random.new().read
|
|
||||||
return self._current_randfunc
|
|
||||||
|
|
||||||
def generate(self, bits, randfunc=None, progress_func=None):
|
|
||||||
rf = self._get_randfunc(randfunc)
|
|
||||||
obj = _RSA.generate_py(bits, rf, progress_func) # TODO: Don't use legacy _RSA module
|
|
||||||
key = self._math.rsa_construct(obj.n, obj.e, obj.d, obj.p, obj.q, obj.u)
|
|
||||||
return _RSAobj(self, key)
|
|
||||||
|
|
||||||
def construct(self, tup):
|
|
||||||
key = self._math.rsa_construct(*tup)
|
|
||||||
return _RSAobj(self, key)
|
|
||||||
|
|
||||||
_impl = RSAImplementation()
|
|
||||||
generate = _impl.generate
|
|
||||||
construct = _impl.construct
|
|
||||||
error = _impl.error
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
#
|
|
||||||
# RSA.py : RSA encryption/decryption
|
|
||||||
#
|
|
||||||
# Part of the Python Cryptography Toolkit
|
|
||||||
#
|
|
||||||
# Written by Andrew Kuchling, Paul Swartz, and others
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
#
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
from Crypto.PublicKey import pubkey
|
|
||||||
from Crypto.Util import number
|
|
||||||
|
|
||||||
def generate_py(bits, randfunc, progress_func=None):
|
|
||||||
"""generate(bits:int, randfunc:callable, progress_func:callable)
|
|
||||||
|
|
||||||
Generate an RSA key of length 'bits', using 'randfunc' to get
|
|
||||||
random data and 'progress_func', if present, to display
|
|
||||||
the progress of the key generation.
|
|
||||||
"""
|
|
||||||
obj=RSAobj()
|
|
||||||
obj.e = 65537L
|
|
||||||
|
|
||||||
# Generate the prime factors of n
|
|
||||||
if progress_func:
|
|
||||||
progress_func('p,q\n')
|
|
||||||
p = q = 1L
|
|
||||||
while number.size(p*q) < bits:
|
|
||||||
# Note that q might be one bit longer than p if somebody specifies an odd
|
|
||||||
# number of bits for the key. (Why would anyone do that? You don't get
|
|
||||||
# more security.)
|
|
||||||
#
|
|
||||||
# Note also that we ensure that e is coprime to (p-1) and (q-1).
|
|
||||||
# This is needed for encryption to work properly, according to the 1997
|
|
||||||
# paper by Robert D. Silverman of RSA Labs, "Fast generation of random,
|
|
||||||
# strong RSA primes", available at
|
|
||||||
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.17.2713&rep=rep1&type=pdf
|
|
||||||
# Since e=65537 is prime, it is sufficient to check that e divides
|
|
||||||
# neither (p-1) nor (q-1).
|
|
||||||
p = 1L
|
|
||||||
while (p - 1) % obj.e == 0:
|
|
||||||
if progress_func:
|
|
||||||
progress_func('p\n')
|
|
||||||
p = pubkey.getPrime(bits/2, randfunc)
|
|
||||||
q = 1L
|
|
||||||
while (q - 1) % obj.e == 0:
|
|
||||||
if progress_func:
|
|
||||||
progress_func('q\n')
|
|
||||||
q = pubkey.getPrime(bits - (bits/2), randfunc)
|
|
||||||
|
|
||||||
# p shall be smaller than q (for calc of u)
|
|
||||||
if p > q:
|
|
||||||
(p, q)=(q, p)
|
|
||||||
obj.p = p
|
|
||||||
obj.q = q
|
|
||||||
|
|
||||||
if progress_func:
|
|
||||||
progress_func('u\n')
|
|
||||||
obj.u = pubkey.inverse(obj.p, obj.q)
|
|
||||||
obj.n = obj.p*obj.q
|
|
||||||
|
|
||||||
if progress_func:
|
|
||||||
progress_func('d\n')
|
|
||||||
obj.d=pubkey.inverse(obj.e, (obj.p-1)*(obj.q-1))
|
|
||||||
|
|
||||||
assert bits <= 1+obj.size(), "Generated key is too small"
|
|
||||||
|
|
||||||
return obj
|
|
||||||
|
|
||||||
class RSAobj(pubkey.pubkey):
|
|
||||||
|
|
||||||
def size(self):
|
|
||||||
"""size() : int
|
|
||||||
Return the maximum number of bits that can be handled by this key.
|
|
||||||
"""
|
|
||||||
return number.size(self.n) - 1
|
|
||||||
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Public-key encryption and signature algorithms.
|
|
||||||
|
|
||||||
Public-key encryption uses two different keys, one for encryption and
|
|
||||||
one for decryption. The encryption key can be made public, and the
|
|
||||||
decryption key is kept private. Many public-key algorithms can also
|
|
||||||
be used to sign messages, and some can *only* be used for signatures.
|
|
||||||
|
|
||||||
Crypto.PublicKey.DSA Digital Signature Algorithm. (Signature only)
|
|
||||||
Crypto.PublicKey.ElGamal (Signing and encryption)
|
|
||||||
Crypto.PublicKey.RSA (Signing, encryption, and blinding)
|
|
||||||
Crypto.PublicKey.qNEW (Signature only)
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
__all__ = ['RSA', 'DSA', 'ElGamal', 'qNEW']
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
@@ -1,134 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# PubKey/RSA/_slowmath.py : Pure Python implementation of the RSA portions of _fastmath
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Pure Python implementation of the RSA-related portions of Crypto.PublicKey._fastmath."""
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
__all__ = ['rsa_construct']
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
from Crypto.Util.number import size, inverse
|
|
||||||
|
|
||||||
class error(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class _RSAKey(object):
|
|
||||||
def _blind(self, m, r):
|
|
||||||
# compute r**e * m (mod n)
|
|
||||||
return m * pow(r, self.e, self.n)
|
|
||||||
|
|
||||||
def _unblind(self, m, r):
|
|
||||||
# compute m / r (mod n)
|
|
||||||
return inverse(r, self.n) * m % self.n
|
|
||||||
|
|
||||||
def _decrypt(self, c):
|
|
||||||
# compute c**d (mod n)
|
|
||||||
if not self.has_private():
|
|
||||||
raise TypeError("No private key")
|
|
||||||
return pow(c, self.d, self.n) # TODO: CRT exponentiation
|
|
||||||
|
|
||||||
def _encrypt(self, m):
|
|
||||||
# compute m**d (mod n)
|
|
||||||
return pow(m, self.e, self.n)
|
|
||||||
|
|
||||||
def _sign(self, m): # alias for _decrypt
|
|
||||||
if not self.has_private():
|
|
||||||
raise TypeError("No private key")
|
|
||||||
return self._decrypt(m)
|
|
||||||
|
|
||||||
def _verify(self, m, sig):
|
|
||||||
return self._encrypt(sig) == m
|
|
||||||
|
|
||||||
def has_private(self):
|
|
||||||
return hasattr(self, 'd')
|
|
||||||
|
|
||||||
def size(self):
|
|
||||||
"""Return the maximum number of bits that can be encrypted"""
|
|
||||||
return size(self.n) - 1
|
|
||||||
|
|
||||||
def rsa_construct(n, e, d=None, p=None, q=None, u=None):
|
|
||||||
"""Construct an RSAKey object"""
|
|
||||||
assert isinstance(n, long)
|
|
||||||
assert isinstance(e, long)
|
|
||||||
assert isinstance(d, (long, type(None)))
|
|
||||||
assert isinstance(p, (long, type(None)))
|
|
||||||
assert isinstance(q, (long, type(None)))
|
|
||||||
assert isinstance(u, (long, type(None)))
|
|
||||||
obj = _RSAKey()
|
|
||||||
obj.n = n
|
|
||||||
obj.e = e
|
|
||||||
if d is not None: obj.d = d
|
|
||||||
if p is not None: obj.p = p
|
|
||||||
if q is not None: obj.q = q
|
|
||||||
if u is not None: obj.u = u
|
|
||||||
return obj
|
|
||||||
|
|
||||||
class _DSAKey(object):
|
|
||||||
def size(self):
|
|
||||||
"""Return the maximum number of bits that can be encrypted"""
|
|
||||||
return size(self.p) - 1
|
|
||||||
|
|
||||||
def has_private(self):
|
|
||||||
return hasattr(self, 'x')
|
|
||||||
|
|
||||||
def _sign(self, m, k): # alias for _decrypt
|
|
||||||
# SECURITY TODO - We _should_ be computing SHA1(m), but we don't because that's the API.
|
|
||||||
if not self.has_private():
|
|
||||||
raise TypeError("No private key")
|
|
||||||
if not (1L < k < self.q):
|
|
||||||
raise ValueError("k is not between 2 and q-1")
|
|
||||||
inv_k = inverse(k, self.q) # Compute k**-1 mod q
|
|
||||||
r = pow(self.g, k, self.p) % self.q # r = (g**k mod p) mod q
|
|
||||||
s = (inv_k * (m + self.x * r)) % self.q
|
|
||||||
return (r, s)
|
|
||||||
|
|
||||||
def _verify(self, m, r, s):
|
|
||||||
# SECURITY TODO - We _should_ be computing SHA1(m), but we don't because that's the API.
|
|
||||||
if not (0 < r < self.q) or not (0 < s < self.q):
|
|
||||||
return False
|
|
||||||
w = inverse(s, self.q)
|
|
||||||
u1 = (m*w) % self.q
|
|
||||||
u2 = (r*w) % self.q
|
|
||||||
v = (pow(self.g, u1, self.p) * pow(self.y, u2, self.p) % self.p) % self.q
|
|
||||||
return v == r
|
|
||||||
|
|
||||||
def dsa_construct(y, g, p, q, x=None):
|
|
||||||
assert isinstance(y, long)
|
|
||||||
assert isinstance(g, long)
|
|
||||||
assert isinstance(p, long)
|
|
||||||
assert isinstance(q, long)
|
|
||||||
assert isinstance(x, (long, type(None)))
|
|
||||||
obj = _DSAKey()
|
|
||||||
obj.y = y
|
|
||||||
obj.g = g
|
|
||||||
obj.p = p
|
|
||||||
obj.q = q
|
|
||||||
if x is not None: obj.x = x
|
|
||||||
return obj
|
|
||||||
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
|
|
||||||
@@ -1,192 +0,0 @@
|
|||||||
#
|
|
||||||
# pubkey.py : Internal functions for public key operations
|
|
||||||
#
|
|
||||||
# Part of the Python Cryptography Toolkit
|
|
||||||
#
|
|
||||||
# Written by Andrew Kuchling, Paul Swartz, and others
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
#
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
import types, warnings
|
|
||||||
from Crypto.Util.number import *
|
|
||||||
|
|
||||||
# Basic public key class
|
|
||||||
class pubkey:
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def __getstate__(self):
|
|
||||||
"""To keep key objects platform-independent, the key data is
|
|
||||||
converted to standard Python long integers before being
|
|
||||||
written out. It will then be reconverted as necessary on
|
|
||||||
restoration."""
|
|
||||||
d=self.__dict__
|
|
||||||
for key in self.keydata:
|
|
||||||
if d.has_key(key): d[key]=long(d[key])
|
|
||||||
return d
|
|
||||||
|
|
||||||
def __setstate__(self, d):
|
|
||||||
"""On unpickling a key object, the key data is converted to the big
|
|
||||||
number representation being used, whether that is Python long
|
|
||||||
integers, MPZ objects, or whatever."""
|
|
||||||
for key in self.keydata:
|
|
||||||
if d.has_key(key): self.__dict__[key]=bignum(d[key])
|
|
||||||
|
|
||||||
def encrypt(self, plaintext, K):
|
|
||||||
"""encrypt(plaintext:string|long, K:string|long) : tuple
|
|
||||||
Encrypt the string or integer plaintext. K is a random
|
|
||||||
parameter required by some algorithms.
|
|
||||||
"""
|
|
||||||
wasString=0
|
|
||||||
if isinstance(plaintext, types.StringType):
|
|
||||||
plaintext=bytes_to_long(plaintext) ; wasString=1
|
|
||||||
if isinstance(K, types.StringType):
|
|
||||||
K=bytes_to_long(K)
|
|
||||||
ciphertext=self._encrypt(plaintext, K)
|
|
||||||
if wasString: return tuple(map(long_to_bytes, ciphertext))
|
|
||||||
else: return ciphertext
|
|
||||||
|
|
||||||
def decrypt(self, ciphertext):
|
|
||||||
"""decrypt(ciphertext:tuple|string|long): string
|
|
||||||
Decrypt 'ciphertext' using this key.
|
|
||||||
"""
|
|
||||||
wasString=0
|
|
||||||
if not isinstance(ciphertext, types.TupleType):
|
|
||||||
ciphertext=(ciphertext,)
|
|
||||||
if isinstance(ciphertext[0], types.StringType):
|
|
||||||
ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1
|
|
||||||
plaintext=self._decrypt(ciphertext)
|
|
||||||
if wasString: return long_to_bytes(plaintext)
|
|
||||||
else: return plaintext
|
|
||||||
|
|
||||||
def sign(self, M, K):
|
|
||||||
"""sign(M : string|long, K:string|long) : tuple
|
|
||||||
Return a tuple containing the signature for the message M.
|
|
||||||
K is a random parameter required by some algorithms.
|
|
||||||
"""
|
|
||||||
if (not self.has_private()):
|
|
||||||
raise TypeError('Private key not available in this object')
|
|
||||||
if isinstance(M, types.StringType): M=bytes_to_long(M)
|
|
||||||
if isinstance(K, types.StringType): K=bytes_to_long(K)
|
|
||||||
return self._sign(M, K)
|
|
||||||
|
|
||||||
def verify (self, M, signature):
|
|
||||||
"""verify(M:string|long, signature:tuple) : bool
|
|
||||||
Verify that the signature is valid for the message M;
|
|
||||||
returns true if the signature checks out.
|
|
||||||
"""
|
|
||||||
if isinstance(M, types.StringType): M=bytes_to_long(M)
|
|
||||||
return self._verify(M, signature)
|
|
||||||
|
|
||||||
# alias to compensate for the old validate() name
|
|
||||||
def validate (self, M, signature):
|
|
||||||
warnings.warn("validate() method name is obsolete; use verify()",
|
|
||||||
DeprecationWarning)
|
|
||||||
|
|
||||||
def blind(self, M, B):
|
|
||||||
"""blind(M : string|long, B : string|long) : string|long
|
|
||||||
Blind message M using blinding factor B.
|
|
||||||
"""
|
|
||||||
wasString=0
|
|
||||||
if isinstance(M, types.StringType):
|
|
||||||
M=bytes_to_long(M) ; wasString=1
|
|
||||||
if isinstance(B, types.StringType): B=bytes_to_long(B)
|
|
||||||
blindedmessage=self._blind(M, B)
|
|
||||||
if wasString: return long_to_bytes(blindedmessage)
|
|
||||||
else: return blindedmessage
|
|
||||||
|
|
||||||
def unblind(self, M, B):
|
|
||||||
"""unblind(M : string|long, B : string|long) : string|long
|
|
||||||
Unblind message M using blinding factor B.
|
|
||||||
"""
|
|
||||||
wasString=0
|
|
||||||
if isinstance(M, types.StringType):
|
|
||||||
M=bytes_to_long(M) ; wasString=1
|
|
||||||
if isinstance(B, types.StringType): B=bytes_to_long(B)
|
|
||||||
unblindedmessage=self._unblind(M, B)
|
|
||||||
if wasString: return long_to_bytes(unblindedmessage)
|
|
||||||
else: return unblindedmessage
|
|
||||||
|
|
||||||
|
|
||||||
# The following methods will usually be left alone, except for
|
|
||||||
# signature-only algorithms. They both return Boolean values
|
|
||||||
# recording whether this key's algorithm can sign and encrypt.
|
|
||||||
def can_sign (self):
|
|
||||||
"""can_sign() : bool
|
|
||||||
Return a Boolean value recording whether this algorithm can
|
|
||||||
generate signatures. (This does not imply that this
|
|
||||||
particular key object has the private information required to
|
|
||||||
to generate a signature.)
|
|
||||||
"""
|
|
||||||
return 1
|
|
||||||
|
|
||||||
def can_encrypt (self):
|
|
||||||
"""can_encrypt() : bool
|
|
||||||
Return a Boolean value recording whether this algorithm can
|
|
||||||
encrypt data. (This does not imply that this
|
|
||||||
particular key object has the private information required to
|
|
||||||
to decrypt a message.)
|
|
||||||
"""
|
|
||||||
return 1
|
|
||||||
|
|
||||||
def can_blind (self):
|
|
||||||
"""can_blind() : bool
|
|
||||||
Return a Boolean value recording whether this algorithm can
|
|
||||||
blind data. (This does not imply that this
|
|
||||||
particular key object has the private information required to
|
|
||||||
to blind a message.)
|
|
||||||
"""
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# The following methods will certainly be overridden by
|
|
||||||
# subclasses.
|
|
||||||
|
|
||||||
def size (self):
|
|
||||||
"""size() : int
|
|
||||||
Return the maximum number of bits that can be handled by this key.
|
|
||||||
"""
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def has_private (self):
|
|
||||||
"""has_private() : bool
|
|
||||||
Return a Boolean denoting whether the object contains
|
|
||||||
private components.
|
|
||||||
"""
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def publickey (self):
|
|
||||||
"""publickey(): object
|
|
||||||
Return a new key object containing only the public information.
|
|
||||||
"""
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __eq__ (self, other):
|
|
||||||
"""__eq__(other): 0, 1
|
|
||||||
Compare us to other for equality.
|
|
||||||
"""
|
|
||||||
return self.__getstate__() == other.__getstate__()
|
|
||||||
|
|
||||||
def __ne__ (self, other):
|
|
||||||
"""__ne__(other): 0, 1
|
|
||||||
Compare us to other for inequality.
|
|
||||||
"""
|
|
||||||
return not self.__eq__(other)
|
|
||||||
@@ -1,139 +0,0 @@
|
|||||||
# -*- coding: ascii -*-
|
|
||||||
#
|
|
||||||
# FortunaAccumulator.py : Fortuna's internal accumulator
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
from binascii import b2a_hex
|
|
||||||
import time
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
from Crypto.pct_warnings import ClockRewindWarning
|
|
||||||
import SHAd256
|
|
||||||
|
|
||||||
import FortunaGenerator
|
|
||||||
|
|
||||||
class FortunaPool(object):
|
|
||||||
"""Fortuna pool type
|
|
||||||
|
|
||||||
This object acts like a hash object, with the following differences:
|
|
||||||
|
|
||||||
- It keeps a count (the .length attribute) of the number of bytes that
|
|
||||||
have been added to the pool
|
|
||||||
- It supports a .reset() method for in-place reinitialization
|
|
||||||
- The method to add bytes to the pool is .append(), not .update().
|
|
||||||
"""
|
|
||||||
|
|
||||||
digest_size = SHAd256.digest_size
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.reset()
|
|
||||||
|
|
||||||
def append(self, data):
|
|
||||||
self._h.update(data)
|
|
||||||
self.length += len(data)
|
|
||||||
|
|
||||||
def digest(self):
|
|
||||||
return self._h.digest()
|
|
||||||
|
|
||||||
def hexdigest(self):
|
|
||||||
return b2a_hex(self.digest())
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
self._h = SHAd256.new()
|
|
||||||
self.length = 0
|
|
||||||
|
|
||||||
def which_pools(r):
|
|
||||||
"""Return a list of pools indexes (in range(32)) that are to be included during reseed number r.
|
|
||||||
|
|
||||||
According to _Practical Cryptography_, chapter 10.5.2 "Pools":
|
|
||||||
|
|
||||||
"Pool P_i is included if 2**i is a divisor of r. Thus P_0 is used
|
|
||||||
every reseed, P_1 every other reseed, P_2 every fourth reseed, etc."
|
|
||||||
"""
|
|
||||||
# This is a separate function so that it can be unit-tested.
|
|
||||||
assert r >= 1
|
|
||||||
retval = []
|
|
||||||
mask = 0
|
|
||||||
for i in range(32):
|
|
||||||
# "Pool P_i is included if 2**i is a divisor of [reseed_count]"
|
|
||||||
if (r & mask) == 0:
|
|
||||||
retval.append(i)
|
|
||||||
else:
|
|
||||||
break # optimization. once this fails, it always fails
|
|
||||||
mask = (mask << 1) | 1L
|
|
||||||
return retval
|
|
||||||
|
|
||||||
class FortunaAccumulator(object):
|
|
||||||
|
|
||||||
min_pool_size = 64 # TODO: explain why
|
|
||||||
reseed_interval = 0.100 # 100 ms TODO: explain why
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.reseed_count = 0
|
|
||||||
self.generator = FortunaGenerator.AESGenerator()
|
|
||||||
self.last_reseed = None
|
|
||||||
|
|
||||||
# Initialize 32 FortunaPool instances.
|
|
||||||
# NB: This is _not_ equivalent to [FortunaPool()]*32, which would give
|
|
||||||
# us 32 references to the _same_ FortunaPool instance (and cause the
|
|
||||||
# assertion below to fail).
|
|
||||||
self.pools = [FortunaPool() for i in range(32)] # 32 pools
|
|
||||||
assert(self.pools[0] is not self.pools[1])
|
|
||||||
|
|
||||||
def random_data(self, bytes):
|
|
||||||
current_time = time.time()
|
|
||||||
if self.last_reseed > current_time:
|
|
||||||
warnings.warn("Clock rewind detected. Resetting last_reseed.", ClockRewindWarning)
|
|
||||||
self.last_reseed = None
|
|
||||||
if (self.pools[0].length >= self.min_pool_size and
|
|
||||||
(self.last_reseed is None or
|
|
||||||
current_time > self.last_reseed + self.reseed_interval)):
|
|
||||||
self._reseed(current_time)
|
|
||||||
# The following should fail if we haven't seeded the pool yet.
|
|
||||||
return self.generator.pseudo_random_data(bytes)
|
|
||||||
|
|
||||||
def _reseed(self, current_time=None):
|
|
||||||
if current_time is None:
|
|
||||||
current_time = time.time()
|
|
||||||
seed = []
|
|
||||||
self.reseed_count += 1
|
|
||||||
self.last_reseed = current_time
|
|
||||||
for i in which_pools(self.reseed_count):
|
|
||||||
seed.append(self.pools[i].digest())
|
|
||||||
self.pools[i].reset()
|
|
||||||
|
|
||||||
seed = "".join(seed)
|
|
||||||
self.generator.reseed(seed)
|
|
||||||
|
|
||||||
def add_random_event(self, source_number, pool_number, data):
|
|
||||||
assert 1 <= len(data) <= 32
|
|
||||||
assert 0 <= source_number <= 255
|
|
||||||
assert 0 <= pool_number <= 31
|
|
||||||
self.pools[pool_number].append(chr(source_number))
|
|
||||||
self.pools[pool_number].append(chr(len(data)))
|
|
||||||
self.pools[pool_number].append(data)
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,128 +0,0 @@
|
|||||||
# -*- coding: ascii -*-
|
|
||||||
#
|
|
||||||
# FortunaGenerator.py : Fortuna's internal PRNG
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
import struct
|
|
||||||
|
|
||||||
from Crypto.Util.number import ceil_shift, exact_log2, exact_div
|
|
||||||
from Crypto.Util import Counter
|
|
||||||
from Crypto.Cipher import AES
|
|
||||||
|
|
||||||
import SHAd256
|
|
||||||
|
|
||||||
class AESGenerator(object):
|
|
||||||
"""The Fortuna "generator"
|
|
||||||
|
|
||||||
This is used internally by the Fortuna PRNG to generate arbitrary amounts
|
|
||||||
of pseudorandom data from a smaller amount of seed data.
|
|
||||||
|
|
||||||
The output is generated by running AES-256 in counter mode and re-keying
|
|
||||||
after every mebibyte (2**16 blocks) of output.
|
|
||||||
"""
|
|
||||||
|
|
||||||
block_size = AES.block_size # output block size in octets (128 bits)
|
|
||||||
key_size = 32 # key size in octets (256 bits)
|
|
||||||
|
|
||||||
# Because of the birthday paradox, we expect to find approximately one
|
|
||||||
# collision for every 2**64 blocks of output from a real random source.
|
|
||||||
# However, this code generates pseudorandom data by running AES in
|
|
||||||
# counter mode, so there will be no collisions until the counter
|
|
||||||
# (theoretically) wraps around at 2**128 blocks. Thus, in order to prevent
|
|
||||||
# Fortuna's pseudorandom output from deviating perceptibly from a true
|
|
||||||
# random source, Ferguson and Schneier specify a limit of 2**16 blocks
|
|
||||||
# without rekeying.
|
|
||||||
max_blocks_per_request = 2**16 # Allow no more than this number of blocks per _pseudo_random_data request
|
|
||||||
|
|
||||||
_four_kiblocks_of_zeros = "\0" * block_size * 4096
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.counter = Counter.new(nbits=self.block_size*8, initial_value=0, little_endian=True)
|
|
||||||
self.key = None
|
|
||||||
|
|
||||||
# Set some helper constants
|
|
||||||
self.block_size_shift = exact_log2(self.block_size)
|
|
||||||
assert (1 << self.block_size_shift) == self.block_size
|
|
||||||
|
|
||||||
self.blocks_per_key = exact_div(self.key_size, self.block_size)
|
|
||||||
assert self.key_size == self.blocks_per_key * self.block_size
|
|
||||||
|
|
||||||
self.max_bytes_per_request = self.max_blocks_per_request * self.block_size
|
|
||||||
|
|
||||||
def reseed(self, seed):
|
|
||||||
if self.key is None:
|
|
||||||
self.key = "\0" * self.key_size
|
|
||||||
self._set_key(SHAd256.new(self.key + seed).digest())
|
|
||||||
self.counter() # increment counter
|
|
||||||
assert len(self.key) == self.key_size
|
|
||||||
|
|
||||||
def pseudo_random_data(self, bytes):
|
|
||||||
assert bytes >= 0
|
|
||||||
|
|
||||||
num_full_blocks = bytes >> 20
|
|
||||||
remainder = bytes & ((1<<20)-1)
|
|
||||||
|
|
||||||
retval = []
|
|
||||||
for i in xrange(num_full_blocks):
|
|
||||||
retval.append(self._pseudo_random_data(1<<20))
|
|
||||||
retval.append(self._pseudo_random_data(remainder))
|
|
||||||
|
|
||||||
return "".join(retval)
|
|
||||||
|
|
||||||
def _set_key(self, key):
|
|
||||||
self.key = key
|
|
||||||
self._cipher = AES.new(key, AES.MODE_CTR, counter=self.counter)
|
|
||||||
|
|
||||||
def _pseudo_random_data(self, bytes):
|
|
||||||
if not (0 <= bytes <= self.max_bytes_per_request):
|
|
||||||
raise AssertionError("You cannot ask for more than 1 MiB of data per request")
|
|
||||||
|
|
||||||
num_blocks = ceil_shift(bytes, self.block_size_shift) # num_blocks = ceil(bytes / self.block_size)
|
|
||||||
|
|
||||||
# Compute the output
|
|
||||||
retval = self._generate_blocks(num_blocks)[:bytes]
|
|
||||||
|
|
||||||
# Switch to a new key to avoid later compromises of this output (i.e.
|
|
||||||
# state compromise extension attacks)
|
|
||||||
self._set_key(self._generate_blocks(self.blocks_per_key))
|
|
||||||
|
|
||||||
assert len(retval) == bytes
|
|
||||||
assert len(self.key) == self.key_size
|
|
||||||
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def _generate_blocks(self, num_blocks):
|
|
||||||
if self.key is None:
|
|
||||||
raise AssertionError("generator must be seeded before use")
|
|
||||||
assert 0 <= num_blocks <= self.max_blocks_per_request
|
|
||||||
retval = []
|
|
||||||
for i in xrange(num_blocks >> 12): # xrange(num_blocks / 4096)
|
|
||||||
retval.append(self._cipher.encrypt(self._four_kiblocks_of_zeros))
|
|
||||||
remaining_bytes = (num_blocks & 4095) << self.block_size_shift # (num_blocks % 4095) * self.block_size
|
|
||||||
retval.append(self._cipher.encrypt(self._four_kiblocks_of_zeros[:remaining_bytes]))
|
|
||||||
return "".join(retval)
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,88 +0,0 @@
|
|||||||
# -*- coding: ascii -*-
|
|
||||||
#
|
|
||||||
# Random/Fortuna/SHAd256.py : SHA_d-256 hash function implementation
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""\
|
|
||||||
SHA_d-256 hash function implementation.
|
|
||||||
|
|
||||||
This module should comply with PEP 247.
|
|
||||||
"""
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
__all__ = ['new', 'digest_size']
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
from binascii import b2a_hex
|
|
||||||
|
|
||||||
from Crypto.Hash import SHA256
|
|
||||||
|
|
||||||
assert SHA256.digest_size == 32
|
|
||||||
|
|
||||||
class _SHAd256(object):
|
|
||||||
"""SHA-256, doubled.
|
|
||||||
|
|
||||||
Returns SHA-256(SHA-256(data)).
|
|
||||||
"""
|
|
||||||
|
|
||||||
digest_size = SHA256.digest_size
|
|
||||||
|
|
||||||
_internal = object()
|
|
||||||
|
|
||||||
def __init__(self, internal_api_check, sha256_hash_obj):
|
|
||||||
if internal_api_check is not self._internal:
|
|
||||||
raise AssertionError("Do not instantiate this class directly. Use %s.new()" % (__name__,))
|
|
||||||
self._h = sha256_hash_obj
|
|
||||||
|
|
||||||
# PEP 247 "copy" method
|
|
||||||
def copy(self):
|
|
||||||
"""Return a copy of this hashing object"""
|
|
||||||
return _SHAd256(SHAd256._internal, self._h.copy())
|
|
||||||
|
|
||||||
# PEP 247 "digest" method
|
|
||||||
def digest(self):
|
|
||||||
"""Return the hash value of this object as a binary string"""
|
|
||||||
retval = SHA256.new(self._h.digest()).digest()
|
|
||||||
assert len(retval) == 32
|
|
||||||
return retval
|
|
||||||
|
|
||||||
# PEP 247 "hexdigest" method
|
|
||||||
def hexdigest(self):
|
|
||||||
"""Return the hash value of this object as a (lowercase) hexadecimal string"""
|
|
||||||
retval = b2a_hex(self.digest())
|
|
||||||
assert len(retval) == 64
|
|
||||||
return retval
|
|
||||||
|
|
||||||
# PEP 247 "update" method
|
|
||||||
def update(self, data):
|
|
||||||
self._h.update(data)
|
|
||||||
|
|
||||||
# PEP 247 module-level "digest_size" variable
|
|
||||||
digest_size = _SHAd256.digest_size
|
|
||||||
|
|
||||||
# PEP 247 module-level "new" function
|
|
||||||
def new(data=""):
|
|
||||||
"""Return a new SHAd256 hashing object"""
|
|
||||||
return _SHAd256(_SHAd256._internal, SHA256.new(data))
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
#
|
|
||||||
# Random/OSRNG/__init__.py : Platform-independent OS RNG API
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Provides a platform-independent interface to the random number generators
|
|
||||||
supplied by various operating systems."""
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
if os.name == 'posix':
|
|
||||||
from Crypto.Random.OSRNG.posix import new
|
|
||||||
elif os.name == 'nt':
|
|
||||||
from Crypto.Random.OSRNG.nt import new
|
|
||||||
elif hasattr(os, 'urandom'):
|
|
||||||
from Crypto.Random.OSRNG.fallback import new
|
|
||||||
else:
|
|
||||||
raise ImportError("Not implemented")
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
#
|
|
||||||
# Random/OSRNG/fallback.py : Fallback entropy source for systems with os.urandom
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
__all__ = ['PythonOSURandomRNG']
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from rng_base import BaseRNG
|
|
||||||
|
|
||||||
class PythonOSURandomRNG(BaseRNG):
|
|
||||||
|
|
||||||
name = "<os.urandom>"
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self._read = os.urandom
|
|
||||||
BaseRNG.__init__(self)
|
|
||||||
|
|
||||||
def _close(self):
|
|
||||||
self._read = None
|
|
||||||
|
|
||||||
def new(*args, **kwargs):
|
|
||||||
return PythonOSURandomRNG(*args, **kwargs)
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
#
|
|
||||||
# Random/OSRNG/nt.py : OS entropy source for MS Windows
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
__all__ = ['WindowsRNG']
|
|
||||||
|
|
||||||
import winrandom
|
|
||||||
from rng_base import BaseRNG
|
|
||||||
|
|
||||||
class WindowsRNG(BaseRNG):
|
|
||||||
|
|
||||||
name = "<CryptGenRandom>"
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.__winrand = winrandom.new()
|
|
||||||
BaseRNG.__init__(self)
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
"""Work around weakness in Windows RNG.
|
|
||||||
|
|
||||||
The CryptGenRandom mechanism in some versions of Windows allows an
|
|
||||||
attacker to learn 128 KiB of past and future output. As a workaround,
|
|
||||||
this function reads 128 KiB of 'random' data from Windows and discards
|
|
||||||
it.
|
|
||||||
|
|
||||||
For more information about the weaknesses in CryptGenRandom, see
|
|
||||||
_Cryptanalysis of the Random Number Generator of the Windows Operating
|
|
||||||
System_, by Leo Dorrendorf and Zvi Gutterman and Benny Pinkas
|
|
||||||
http://eprint.iacr.org/2007/419
|
|
||||||
"""
|
|
||||||
if self.closed:
|
|
||||||
raise ValueError("I/O operation on closed file")
|
|
||||||
data = self.__winrand.get_bytes(128*1024)
|
|
||||||
assert (len(data) == 128*1024)
|
|
||||||
BaseRNG.flush(self)
|
|
||||||
|
|
||||||
def _close(self):
|
|
||||||
self.__winrand = None
|
|
||||||
|
|
||||||
def _read(self, N):
|
|
||||||
# Unfortunately, research shows that CryptGenRandom doesn't provide
|
|
||||||
# forward secrecy and fails the next-bit test unless we apply a
|
|
||||||
# workaround, which we do here. See http://eprint.iacr.org/2007/419
|
|
||||||
# for information on the vulnerability.
|
|
||||||
self.flush()
|
|
||||||
data = self.__winrand.get_bytes(N)
|
|
||||||
self.flush()
|
|
||||||
return data
|
|
||||||
|
|
||||||
def new(*args, **kwargs):
|
|
||||||
return WindowsRNG(*args, **kwargs)
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,86 +0,0 @@
|
|||||||
#
|
|
||||||
# Random/OSRNG/rng_base.py : Base class for OSRNG
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
class BaseRNG(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.closed = False
|
|
||||||
self._selftest()
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
self.close()
|
|
||||||
|
|
||||||
def _selftest(self):
|
|
||||||
# Test that urandom can return data
|
|
||||||
data = self.read(16)
|
|
||||||
if len(data) != 16:
|
|
||||||
raise AssertionError("read truncated")
|
|
||||||
|
|
||||||
# Test that we get different data every time (if we don't, the RNG is
|
|
||||||
# probably malfunctioning)
|
|
||||||
data2 = self.read(16)
|
|
||||||
if data == data2:
|
|
||||||
raise AssertionError("OS RNG returned duplicate data")
|
|
||||||
|
|
||||||
# PEP 343: Support for the "with" statement
|
|
||||||
def __enter__(self):
|
|
||||||
pass
|
|
||||||
def __exit__(self):
|
|
||||||
"""PEP 343 support"""
|
|
||||||
self.close()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
if not self.closed:
|
|
||||||
self._close()
|
|
||||||
self.closed = True
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def read(self, N=-1):
|
|
||||||
"""Return N bytes from the RNG."""
|
|
||||||
if self.closed:
|
|
||||||
raise ValueError("I/O operation on closed file")
|
|
||||||
if not isinstance(N, (long, int)):
|
|
||||||
raise TypeError("an integer is required")
|
|
||||||
if N < 0:
|
|
||||||
raise ValueError("cannot read to end of infinite stream")
|
|
||||||
elif N == 0:
|
|
||||||
return ""
|
|
||||||
data = self._read(N)
|
|
||||||
if len(data) != N:
|
|
||||||
raise AssertionError("%s produced truncated output (requested %d, got %d)" % (self.name, N, len(data)))
|
|
||||||
return data
|
|
||||||
|
|
||||||
def _close(self):
|
|
||||||
raise NotImplementedError("child class must implement this")
|
|
||||||
|
|
||||||
def _read(self, N):
|
|
||||||
raise NotImplementedError("child class must implement this")
|
|
||||||
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
Binary file not shown.
@@ -1,213 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Random/_UserFriendlyRNG.py : A user-friendly random number generator
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
import os
|
|
||||||
import threading
|
|
||||||
import struct
|
|
||||||
import time
|
|
||||||
from math import floor
|
|
||||||
|
|
||||||
from Crypto.Random import OSRNG
|
|
||||||
from Crypto.Random.Fortuna import FortunaAccumulator
|
|
||||||
|
|
||||||
class _EntropySource(object):
|
|
||||||
def __init__(self, accumulator, src_num):
|
|
||||||
self._fortuna = accumulator
|
|
||||||
self._src_num = src_num
|
|
||||||
self._pool_num = 0
|
|
||||||
|
|
||||||
def feed(self, data):
|
|
||||||
self._fortuna.add_random_event(self._src_num, self._pool_num, data)
|
|
||||||
self._pool_num = (self._pool_num + 1) & 31
|
|
||||||
|
|
||||||
class _EntropyCollector(object):
|
|
||||||
|
|
||||||
def __init__(self, accumulator):
|
|
||||||
self._osrng = OSRNG.new()
|
|
||||||
self._osrng_es = _EntropySource(accumulator, 255)
|
|
||||||
self._time_es = _EntropySource(accumulator, 254)
|
|
||||||
self._clock_es = _EntropySource(accumulator, 253)
|
|
||||||
|
|
||||||
def reinit(self):
|
|
||||||
# Add 256 bits to each of the 32 pools, twice. (For a total of 16384
|
|
||||||
# bits collected from the operating system.)
|
|
||||||
for i in range(2):
|
|
||||||
block = self._osrng.read(32*32)
|
|
||||||
for p in range(32):
|
|
||||||
self._osrng_es.feed(block[p*32:(p+1)*32])
|
|
||||||
block = None
|
|
||||||
self._osrng.flush()
|
|
||||||
|
|
||||||
def collect(self):
|
|
||||||
# Collect 64 bits of entropy from the operating system and feed it to Fortuna.
|
|
||||||
self._osrng_es.feed(self._osrng.read(8))
|
|
||||||
|
|
||||||
# Add the fractional part of time.time()
|
|
||||||
t = time.time()
|
|
||||||
self._time_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
|
|
||||||
|
|
||||||
# Add the fractional part of time.clock()
|
|
||||||
t = time.clock()
|
|
||||||
self._clock_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
|
|
||||||
|
|
||||||
|
|
||||||
class _UserFriendlyRNG(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.closed = False
|
|
||||||
self._fa = FortunaAccumulator.FortunaAccumulator()
|
|
||||||
self._ec = _EntropyCollector(self._fa)
|
|
||||||
self.reinit()
|
|
||||||
|
|
||||||
def reinit(self):
|
|
||||||
"""Initialize the random number generator and seed it with entropy from
|
|
||||||
the operating system.
|
|
||||||
"""
|
|
||||||
self._pid = os.getpid()
|
|
||||||
self._ec.reinit()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
self.closed = True
|
|
||||||
self._osrng = None
|
|
||||||
self._fa = None
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def read(self, N):
|
|
||||||
"""Return N bytes from the RNG."""
|
|
||||||
if self.closed:
|
|
||||||
raise ValueError("I/O operation on closed file")
|
|
||||||
if not isinstance(N, (long, int)):
|
|
||||||
raise TypeError("an integer is required")
|
|
||||||
if N < 0:
|
|
||||||
raise ValueError("cannot read to end of infinite stream")
|
|
||||||
|
|
||||||
# Collect some entropy and feed it to Fortuna
|
|
||||||
self._ec.collect()
|
|
||||||
|
|
||||||
# Ask Fortuna to generate some bytes
|
|
||||||
retval = self._fa.random_data(N)
|
|
||||||
|
|
||||||
# Check that we haven't forked in the meantime. (If we have, we don't
|
|
||||||
# want to use the data, because it might have been duplicated in the
|
|
||||||
# parent process.
|
|
||||||
self._check_pid()
|
|
||||||
|
|
||||||
# Return the random data.
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def _check_pid(self):
|
|
||||||
# Lame fork detection to remind developers to invoke Random.atfork()
|
|
||||||
# after every call to os.fork(). Note that this check is not reliable,
|
|
||||||
# since process IDs can be reused on most operating systems.
|
|
||||||
#
|
|
||||||
# You need to do Random.atfork() in the child process after every call
|
|
||||||
# to os.fork() to avoid reusing PRNG state. If you want to avoid
|
|
||||||
# leaking PRNG state to child processes (for example, if you are using
|
|
||||||
# os.setuid()) then you should also invoke Random.atfork() in the
|
|
||||||
# *parent* process.
|
|
||||||
if os.getpid() != self._pid:
|
|
||||||
raise AssertionError("PID check failed. RNG must be re-initialized after fork(). Hint: Try Random.atfork()")
|
|
||||||
|
|
||||||
|
|
||||||
class _LockingUserFriendlyRNG(_UserFriendlyRNG):
|
|
||||||
def __init__(self):
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
_UserFriendlyRNG.__init__(self)
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
self._lock.acquire()
|
|
||||||
try:
|
|
||||||
return _UserFriendlyRNG.close(self)
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
def reinit(self):
|
|
||||||
self._lock.acquire()
|
|
||||||
try:
|
|
||||||
return _UserFriendlyRNG.reinit(self)
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
def read(self, bytes):
|
|
||||||
self._lock.acquire()
|
|
||||||
try:
|
|
||||||
return _UserFriendlyRNG.read(self, bytes)
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
class RNGFile(object):
|
|
||||||
def __init__(self, singleton):
|
|
||||||
self.closed = False
|
|
||||||
self._singleton = singleton
|
|
||||||
|
|
||||||
# PEP 343: Support for the "with" statement
|
|
||||||
def __enter__(self):
|
|
||||||
"""PEP 343 support"""
|
|
||||||
def __exit__(self):
|
|
||||||
"""PEP 343 support"""
|
|
||||||
self.close()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
# Don't actually close the singleton, just close this RNGFile instance.
|
|
||||||
self.closed = True
|
|
||||||
self._singleton = None
|
|
||||||
|
|
||||||
def read(self, bytes):
|
|
||||||
if self.closed:
|
|
||||||
raise ValueError("I/O operation on closed file")
|
|
||||||
return self._singleton.read(bytes)
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
if self.closed:
|
|
||||||
raise ValueError("I/O operation on closed file")
|
|
||||||
|
|
||||||
_singleton_lock = threading.Lock()
|
|
||||||
_singleton = None
|
|
||||||
def _get_singleton():
|
|
||||||
global _singleton
|
|
||||||
_singleton_lock.acquire()
|
|
||||||
try:
|
|
||||||
if _singleton is None:
|
|
||||||
_singleton = _LockingUserFriendlyRNG()
|
|
||||||
return _singleton
|
|
||||||
finally:
|
|
||||||
_singleton_lock.release()
|
|
||||||
|
|
||||||
def new():
|
|
||||||
return RNGFile(_get_singleton())
|
|
||||||
|
|
||||||
def reinit():
|
|
||||||
_get_singleton().reinit()
|
|
||||||
|
|
||||||
def get_random_bytes(n):
|
|
||||||
"""Return the specified number of cryptographically-strong random bytes."""
|
|
||||||
return _get_singleton().read(n)
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Random/__init__.py : PyCrypto random number generation
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
__all__ = ['new']
|
|
||||||
|
|
||||||
import OSRNG
|
|
||||||
import _UserFriendlyRNG
|
|
||||||
|
|
||||||
def new(*args, **kwargs):
|
|
||||||
"""Return a file-like object that outputs cryptographically random bytes."""
|
|
||||||
return _UserFriendlyRNG.new(*args, **kwargs)
|
|
||||||
|
|
||||||
def atfork():
|
|
||||||
"""Call this whenever you call os.fork()"""
|
|
||||||
_UserFriendlyRNG.reinit()
|
|
||||||
|
|
||||||
def get_random_bytes(n):
|
|
||||||
"""Return the specified number of cryptographically-strong random bytes."""
|
|
||||||
return _UserFriendlyRNG.get_random_bytes(n)
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,143 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Random/random.py : Strong alternative for the standard 'random' module
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""A cryptographically strong version of Python's standard "random" module."""
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
__all__ = ['StrongRandom', 'getrandbits', 'randrange', 'randint', 'choice', 'shuffle', 'sample']
|
|
||||||
|
|
||||||
from Crypto import Random
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
class StrongRandom(object):
|
|
||||||
def __init__(self, rng=None, randfunc=None):
|
|
||||||
if randfunc is None and rng is None:
|
|
||||||
self._randfunc = None
|
|
||||||
elif randfunc is not None and rng is None:
|
|
||||||
self._randfunc = randfunc
|
|
||||||
elif randfunc is None and rng is not None:
|
|
||||||
self._randfunc = rng.read
|
|
||||||
else:
|
|
||||||
raise ValueError("Cannot specify both 'rng' and 'randfunc'")
|
|
||||||
|
|
||||||
def getrandbits(self, k):
|
|
||||||
"""Return a python long integer with k random bits."""
|
|
||||||
if self._randfunc is None:
|
|
||||||
self._randfunc = Random.new().read
|
|
||||||
mask = (1L << k) - 1
|
|
||||||
return mask & bytes_to_long(self._randfunc(ceil_div(k, 8)))
|
|
||||||
|
|
||||||
def randrange(self, *args):
|
|
||||||
"""randrange([start,] stop[, step]):
|
|
||||||
Return a randomly-selected element from range(start, stop, step)."""
|
|
||||||
if len(args) == 3:
|
|
||||||
(start, stop, step) = args
|
|
||||||
elif len(args) == 2:
|
|
||||||
(start, stop) = args
|
|
||||||
step = 1
|
|
||||||
elif len(args) == 1:
|
|
||||||
(stop,) = args
|
|
||||||
start = 0
|
|
||||||
step = 1
|
|
||||||
else:
|
|
||||||
raise TypeError("randrange expected at most 3 arguments, got %d" % (len(args),))
|
|
||||||
if (not isinstance(start, (int, long))
|
|
||||||
or not isinstance(stop, (int, long))
|
|
||||||
or not isinstance(step, (int, long))):
|
|
||||||
raise TypeError("randrange requires integer arguments")
|
|
||||||
if step == 0:
|
|
||||||
raise ValueError("randrange step argument must not be zero")
|
|
||||||
|
|
||||||
num_choices = ceil_div(stop - start, step)
|
|
||||||
if num_choices < 0:
|
|
||||||
num_choices = 0
|
|
||||||
if num_choices < 1:
|
|
||||||
raise ValueError("empty range for randrange(%r, %r, %r)" % (start, stop, step))
|
|
||||||
|
|
||||||
# Pick a random number in the range of possible numbers
|
|
||||||
r = num_choices
|
|
||||||
while r >= num_choices:
|
|
||||||
r = self.getrandbits(size(num_choices))
|
|
||||||
|
|
||||||
return start + (step * r)
|
|
||||||
|
|
||||||
def randint(self, a, b):
|
|
||||||
"""Return a random integer N such that a <= N <= b."""
|
|
||||||
if not isinstance(a, (int, long)) or not isinstance(b, (int, long)):
|
|
||||||
raise TypeError("randint requires integer arguments")
|
|
||||||
N = self.randrange(a, b+1)
|
|
||||||
assert a <= N <= b
|
|
||||||
return N
|
|
||||||
|
|
||||||
def choice(self, seq):
|
|
||||||
"""Return a random element from a (non-empty) sequence.
|
|
||||||
|
|
||||||
If the seqence is empty, raises IndexError.
|
|
||||||
"""
|
|
||||||
if len(seq) == 0:
|
|
||||||
raise IndexError("empty sequence")
|
|
||||||
return seq[self.randrange(len(seq))]
|
|
||||||
|
|
||||||
def shuffle(self, x):
|
|
||||||
"""Shuffle the sequence in place."""
|
|
||||||
# Make a (copy) of the list of objects we want to shuffle
|
|
||||||
items = list(x)
|
|
||||||
|
|
||||||
# Choose a random item (without replacement) until all the items have been
|
|
||||||
# chosen.
|
|
||||||
for i in xrange(len(x)):
|
|
||||||
p = self.randint(len(items))
|
|
||||||
x[i] = items[p]
|
|
||||||
del items[p]
|
|
||||||
|
|
||||||
def sample(self, population, k):
|
|
||||||
"""Return a k-length list of unique elements chosen from the population sequence."""
|
|
||||||
|
|
||||||
num_choices = len(population)
|
|
||||||
if k > num_choices:
|
|
||||||
raise ValueError("sample larger than population")
|
|
||||||
|
|
||||||
retval = []
|
|
||||||
selected = {} # we emulate a set using a dict here
|
|
||||||
for i in xrange(k):
|
|
||||||
r = None
|
|
||||||
while r is None or r in selected:
|
|
||||||
r = self.randrange(num_choices)
|
|
||||||
retval.append(population[r])
|
|
||||||
selected[r] = 1
|
|
||||||
return retval
|
|
||||||
|
|
||||||
_r = StrongRandom()
|
|
||||||
getrandbits = _r.getrandbits
|
|
||||||
randrange = _r.randrange
|
|
||||||
randint = _r.randint
|
|
||||||
choice = _r.choice
|
|
||||||
shuffle = _r.shuffle
|
|
||||||
sample = _r.sample
|
|
||||||
|
|
||||||
# These are at the bottom to avoid problems with recursive imports
|
|
||||||
from Crypto.Util.number import ceil_div, bytes_to_long, long_to_bytes, size
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# -*- coding: ascii -*-
|
|
||||||
#
|
|
||||||
# Util/Counter.py : Fast counter for use with CTR-mode ciphers
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
from Crypto.Util import _counter
|
|
||||||
import struct
|
|
||||||
|
|
||||||
# Factory function
|
|
||||||
def new(nbits, prefix="", suffix="", initial_value=1, overflow=0, little_endian=False, allow_wraparound=False, disable_shortcut=False):
|
|
||||||
# TODO: Document this
|
|
||||||
|
|
||||||
# Sanity-check the message size
|
|
||||||
(nbytes, remainder) = divmod(nbits, 8)
|
|
||||||
if remainder != 0:
|
|
||||||
# In the future, we might support arbitrary bit lengths, but for now we don't.
|
|
||||||
raise ValueError("nbits must be a multiple of 8; got %d" % (nbits,))
|
|
||||||
if nbytes < 1:
|
|
||||||
raise ValueError("nbits too small")
|
|
||||||
elif nbytes > 0xffff:
|
|
||||||
raise ValueError("nbits too large")
|
|
||||||
|
|
||||||
initval = _encode(initial_value, nbytes, little_endian)
|
|
||||||
if little_endian:
|
|
||||||
return _counter._newLE(str(prefix), str(suffix), initval, allow_wraparound=allow_wraparound, disable_shortcut=disable_shortcut)
|
|
||||||
else:
|
|
||||||
return _counter._newBE(str(prefix), str(suffix), initval, allow_wraparound=allow_wraparound, disable_shortcut=disable_shortcut)
|
|
||||||
|
|
||||||
def _encode(n, nbytes, little_endian=False):
|
|
||||||
retval = []
|
|
||||||
n = long(n)
|
|
||||||
for i in range(nbytes):
|
|
||||||
if little_endian:
|
|
||||||
retval.append(chr(n & 0xff))
|
|
||||||
else:
|
|
||||||
retval.insert(0, chr(n & 0xff))
|
|
||||||
n >>= 8
|
|
||||||
return "".join(retval)
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Miscellaneous modules
|
|
||||||
|
|
||||||
Contains useful modules that don't belong into any of the
|
|
||||||
other Crypto.* subpackages.
|
|
||||||
|
|
||||||
Crypto.Util.number Number-theoretic functions (primality testing, etc.)
|
|
||||||
Crypto.Util.randpool Random number generation
|
|
||||||
Crypto.Util.RFC1751 Converts between 128-bit keys and human-readable
|
|
||||||
strings of words.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
__all__ = ['randpool', 'RFC1751', 'number', 'strxor']
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
Binary file not shown.
@@ -1,117 +0,0 @@
|
|||||||
# -*- coding: ascii -*-
|
|
||||||
#
|
|
||||||
# Util/_number_new.py : utility functions
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
## NOTE: Do not import this module directly. Import these functions from Crypto.Util.number.
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
__all__ = ['ceil_shift', 'ceil_div', 'floor_div', 'exact_log2', 'exact_div']
|
|
||||||
|
|
||||||
from Crypto.Util.python_compat import *
|
|
||||||
|
|
||||||
def ceil_shift(n, b):
|
|
||||||
"""Return ceil(n / 2**b) without performing any floating-point or division operations.
|
|
||||||
|
|
||||||
This is done by right-shifting n by b bits and incrementing the result by 1
|
|
||||||
if any '1' bits were shifted out.
|
|
||||||
"""
|
|
||||||
if not isinstance(n, (int, long)) or not isinstance(b, (int, long)):
|
|
||||||
raise TypeError("unsupported operand type(s): %r and %r" % (type(n).__name__, type(b).__name__))
|
|
||||||
|
|
||||||
assert n >= 0 and b >= 0 # I haven't tested or even thought about negative values
|
|
||||||
mask = (1L << b) - 1
|
|
||||||
if n & mask:
|
|
||||||
return (n >> b) + 1
|
|
||||||
else:
|
|
||||||
return n >> b
|
|
||||||
|
|
||||||
def ceil_div(a, b):
|
|
||||||
"""Return ceil(a / b) without performing any floating-point operations."""
|
|
||||||
|
|
||||||
if not isinstance(a, (int, long)) or not isinstance(b, (int, long)):
|
|
||||||
raise TypeError("unsupported operand type(s): %r and %r" % (type(a).__name__, type(b).__name__))
|
|
||||||
|
|
||||||
(q, r) = divmod(a, b)
|
|
||||||
if r:
|
|
||||||
return q + 1
|
|
||||||
else:
|
|
||||||
return q
|
|
||||||
|
|
||||||
def floor_div(a, b):
|
|
||||||
if not isinstance(a, (int, long)) or not isinstance(b, (int, long)):
|
|
||||||
raise TypeError("unsupported operand type(s): %r and %r" % (type(a).__name__, type(b).__name__))
|
|
||||||
|
|
||||||
(q, r) = divmod(a, b)
|
|
||||||
return q
|
|
||||||
|
|
||||||
def exact_log2(num):
|
|
||||||
"""Find and return an integer i >= 0 such that num == 2**i.
|
|
||||||
|
|
||||||
If no such integer exists, this function raises ValueError.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if not isinstance(num, (int, long)):
|
|
||||||
raise TypeError("unsupported operand type: %r" % (type(num).__name__,))
|
|
||||||
|
|
||||||
n = long(num)
|
|
||||||
if n <= 0:
|
|
||||||
raise ValueError("cannot compute logarithm of non-positive number")
|
|
||||||
|
|
||||||
i = 0
|
|
||||||
while n != 0:
|
|
||||||
if (n & 1) and n != 1:
|
|
||||||
raise ValueError("No solution could be found")
|
|
||||||
i += 1
|
|
||||||
n >>= 1
|
|
||||||
i -= 1
|
|
||||||
|
|
||||||
assert num == (1L << i)
|
|
||||||
return i
|
|
||||||
|
|
||||||
def exact_div(p, d, allow_divzero=False):
|
|
||||||
"""Find and return an integer n such that p == n * d
|
|
||||||
|
|
||||||
If no such integer exists, this function raises ValueError.
|
|
||||||
|
|
||||||
Both operands must be integers.
|
|
||||||
|
|
||||||
If the second operand is zero, this function will raise ZeroDivisionError
|
|
||||||
unless allow_divzero is true (default: False).
|
|
||||||
"""
|
|
||||||
|
|
||||||
if not isinstance(p, (int, long)) or not isinstance(d, (int, long)):
|
|
||||||
raise TypeError("unsupported operand type(s): %r and %r" % (type(p).__name__, type(d).__name__))
|
|
||||||
|
|
||||||
if d == 0 and allow_divzero:
|
|
||||||
n = 0
|
|
||||||
if p != n * d:
|
|
||||||
raise ValueError("No solution could be found")
|
|
||||||
else:
|
|
||||||
(n, r) = divmod(p, d)
|
|
||||||
if r != 0:
|
|
||||||
raise ValueError("No solution could be found")
|
|
||||||
|
|
||||||
assert p == n * d
|
|
||||||
return n
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,250 +0,0 @@
|
|||||||
#
|
|
||||||
# number.py : Number-theoretic functions
|
|
||||||
#
|
|
||||||
# Part of the Python Cryptography Toolkit
|
|
||||||
#
|
|
||||||
# Written by Andrew M. Kuchling, Barry A. Warsaw, and others
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
#
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
bignum = long
|
|
||||||
try:
|
|
||||||
from Crypto.PublicKey import _fastmath
|
|
||||||
except ImportError:
|
|
||||||
_fastmath = None
|
|
||||||
|
|
||||||
# New functions
|
|
||||||
from _number_new import *
|
|
||||||
|
|
||||||
# Commented out and replaced with faster versions below
|
|
||||||
## def long2str(n):
|
|
||||||
## s=''
|
|
||||||
## while n>0:
|
|
||||||
## s=chr(n & 255)+s
|
|
||||||
## n=n>>8
|
|
||||||
## return s
|
|
||||||
|
|
||||||
## import types
|
|
||||||
## def str2long(s):
|
|
||||||
## if type(s)!=types.StringType: return s # Integers will be left alone
|
|
||||||
## return reduce(lambda x,y : x*256+ord(y), s, 0L)
|
|
||||||
|
|
||||||
def size (N):
|
|
||||||
"""size(N:long) : int
|
|
||||||
Returns the size of the number N in bits.
|
|
||||||
"""
|
|
||||||
bits, power = 0,1L
|
|
||||||
while N >= power:
|
|
||||||
bits += 1
|
|
||||||
power = power << 1
|
|
||||||
return bits
|
|
||||||
|
|
||||||
def getRandomNumber(N, randfunc=None):
|
|
||||||
"""getRandomNumber(N:int, randfunc:callable):long
|
|
||||||
Return a random N-bit number.
|
|
||||||
|
|
||||||
If randfunc is omitted, then Random.new().read is used.
|
|
||||||
|
|
||||||
NOTE: Confusingly, this function does NOT return N random bits; It returns
|
|
||||||
a random N-bit number, i.e. a random number between 2**(N-1) and (2**N)-1.
|
|
||||||
|
|
||||||
This function is for internal use only and may be renamed or removed in
|
|
||||||
the future.
|
|
||||||
"""
|
|
||||||
if randfunc is None:
|
|
||||||
_import_Random()
|
|
||||||
randfunc = Random.new().read
|
|
||||||
|
|
||||||
S = randfunc(N/8)
|
|
||||||
odd_bits = N % 8
|
|
||||||
if odd_bits != 0:
|
|
||||||
char = ord(randfunc(1)) >> (8-odd_bits)
|
|
||||||
S = chr(char) + S
|
|
||||||
value = bytes_to_long(S)
|
|
||||||
value |= 2L ** (N-1) # Ensure high bit is set
|
|
||||||
assert size(value) >= N
|
|
||||||
return value
|
|
||||||
|
|
||||||
def GCD(x,y):
|
|
||||||
"""GCD(x:long, y:long): long
|
|
||||||
Return the GCD of x and y.
|
|
||||||
"""
|
|
||||||
x = abs(x) ; y = abs(y)
|
|
||||||
while x > 0:
|
|
||||||
x, y = y % x, x
|
|
||||||
return y
|
|
||||||
|
|
||||||
def inverse(u, v):
|
|
||||||
"""inverse(u:long, u:long):long
|
|
||||||
Return the inverse of u mod v.
|
|
||||||
"""
|
|
||||||
u3, v3 = long(u), long(v)
|
|
||||||
u1, v1 = 1L, 0L
|
|
||||||
while v3 > 0:
|
|
||||||
q=u3 / v3
|
|
||||||
u1, v1 = v1, u1 - v1*q
|
|
||||||
u3, v3 = v3, u3 - v3*q
|
|
||||||
while u1<0:
|
|
||||||
u1 = u1 + v
|
|
||||||
return u1
|
|
||||||
|
|
||||||
# Given a number of bits to generate and a random generation function,
|
|
||||||
# find a prime number of the appropriate size.
|
|
||||||
|
|
||||||
def getPrime(N, randfunc=None):
|
|
||||||
"""getPrime(N:int, randfunc:callable):long
|
|
||||||
Return a random N-bit prime number.
|
|
||||||
|
|
||||||
If randfunc is omitted, then Random.new().read is used.
|
|
||||||
"""
|
|
||||||
if randfunc is None:
|
|
||||||
_import_Random()
|
|
||||||
randfunc = Random.new().read
|
|
||||||
|
|
||||||
number=getRandomNumber(N, randfunc) | 1
|
|
||||||
while (not isPrime(number, randfunc=randfunc)):
|
|
||||||
number=number+2
|
|
||||||
return number
|
|
||||||
|
|
||||||
def isPrime(N, randfunc=None):
|
|
||||||
"""isPrime(N:long, randfunc:callable):bool
|
|
||||||
Return true if N is prime.
|
|
||||||
|
|
||||||
If randfunc is omitted, then Random.new().read is used.
|
|
||||||
"""
|
|
||||||
_import_Random()
|
|
||||||
if randfunc is None:
|
|
||||||
randfunc = Random.new().read
|
|
||||||
|
|
||||||
randint = StrongRandom(randfunc=randfunc).randint
|
|
||||||
|
|
||||||
if N == 1:
|
|
||||||
return 0
|
|
||||||
if N in sieve:
|
|
||||||
return 1
|
|
||||||
for i in sieve:
|
|
||||||
if (N % i)==0:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# Use the accelerator if available
|
|
||||||
if _fastmath is not None:
|
|
||||||
return _fastmath.isPrime(N)
|
|
||||||
|
|
||||||
# Compute the highest bit that's set in N
|
|
||||||
N1 = N - 1L
|
|
||||||
n = 1L
|
|
||||||
while (n<N):
|
|
||||||
n=n<<1L
|
|
||||||
n = n >> 1L
|
|
||||||
|
|
||||||
# Rabin-Miller test
|
|
||||||
for c in sieve[:7]:
|
|
||||||
a=long(c) ; d=1L ; t=n
|
|
||||||
while (t): # Iterate over the bits in N1
|
|
||||||
x=(d*d) % N
|
|
||||||
if x==1L and d!=1L and d!=N1:
|
|
||||||
return 0 # Square root of 1 found
|
|
||||||
if N1 & t:
|
|
||||||
d=(x*a) % N
|
|
||||||
else:
|
|
||||||
d=x
|
|
||||||
t = t >> 1L
|
|
||||||
if d!=1L:
|
|
||||||
return 0
|
|
||||||
return 1
|
|
||||||
|
|
||||||
# Small primes used for checking primality; these are all the primes
|
|
||||||
# less than 256. This should be enough to eliminate most of the odd
|
|
||||||
# numbers before needing to do a Rabin-Miller test at all.
|
|
||||||
|
|
||||||
sieve=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
|
|
||||||
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
|
|
||||||
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
|
|
||||||
197, 199, 211, 223, 227, 229, 233, 239, 241, 251]
|
|
||||||
|
|
||||||
# Improved conversion functions contributed by Barry Warsaw, after
|
|
||||||
# careful benchmarking
|
|
||||||
|
|
||||||
import struct
|
|
||||||
|
|
||||||
def long_to_bytes(n, blocksize=0):
|
|
||||||
"""long_to_bytes(n:long, blocksize:int) : string
|
|
||||||
Convert a long integer to a byte string.
|
|
||||||
|
|
||||||
If optional blocksize is given and greater than zero, pad the front of the
|
|
||||||
byte string with binary zeros so that the length is a multiple of
|
|
||||||
blocksize.
|
|
||||||
"""
|
|
||||||
# after much testing, this algorithm was deemed to be the fastest
|
|
||||||
s = ''
|
|
||||||
n = long(n)
|
|
||||||
pack = struct.pack
|
|
||||||
while n > 0:
|
|
||||||
s = pack('>I', n & 0xffffffffL) + s
|
|
||||||
n = n >> 32
|
|
||||||
# strip off leading zeros
|
|
||||||
for i in range(len(s)):
|
|
||||||
if s[i] != '\000':
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
# only happens when n == 0
|
|
||||||
s = '\000'
|
|
||||||
i = 0
|
|
||||||
s = s[i:]
|
|
||||||
# add back some pad bytes. this could be done more efficiently w.r.t. the
|
|
||||||
# de-padding being done above, but sigh...
|
|
||||||
if blocksize > 0 and len(s) % blocksize:
|
|
||||||
s = (blocksize - len(s) % blocksize) * '\000' + s
|
|
||||||
return s
|
|
||||||
|
|
||||||
def bytes_to_long(s):
|
|
||||||
"""bytes_to_long(string) : long
|
|
||||||
Convert a byte string to a long integer.
|
|
||||||
|
|
||||||
This is (essentially) the inverse of long_to_bytes().
|
|
||||||
"""
|
|
||||||
acc = 0L
|
|
||||||
unpack = struct.unpack
|
|
||||||
length = len(s)
|
|
||||||
if length % 4:
|
|
||||||
extra = (4 - length % 4)
|
|
||||||
s = '\000' * extra + s
|
|
||||||
length = length + extra
|
|
||||||
for i in range(0, length, 4):
|
|
||||||
acc = (acc << 32) + unpack('>I', s[i:i+4])[0]
|
|
||||||
return acc
|
|
||||||
|
|
||||||
# For backwards compatibility...
|
|
||||||
import warnings
|
|
||||||
def long2str(n, blocksize=0):
|
|
||||||
warnings.warn("long2str() has been replaced by long_to_bytes()")
|
|
||||||
return long_to_bytes(n, blocksize)
|
|
||||||
def str2long(s):
|
|
||||||
warnings.warn("str2long() has been replaced by bytes_to_long()")
|
|
||||||
return bytes_to_long(s)
|
|
||||||
|
|
||||||
def _import_Random():
|
|
||||||
# This is called in a function instead of at the module level in order to avoid problems with recursive imports
|
|
||||||
global Random, StrongRandom
|
|
||||||
from Crypto import Random
|
|
||||||
from Crypto.Random.random import StrongRandom
|
|
||||||
|
|
||||||
@@ -1,84 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Util/python_compat.py : Compatibility code for old versions of Python
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Compatibility code for old versions of Python
|
|
||||||
|
|
||||||
Currently, this just defines:
|
|
||||||
- True and False
|
|
||||||
- object
|
|
||||||
- isinstance
|
|
||||||
"""
|
|
||||||
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
__all__ = []
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import __builtin__
|
|
||||||
|
|
||||||
# 'True' and 'False' aren't defined in Python 2.1. Define them.
|
|
||||||
try:
|
|
||||||
True, False
|
|
||||||
except NameError:
|
|
||||||
(True, False) = (1, 0)
|
|
||||||
__all__ += ['True', 'False']
|
|
||||||
|
|
||||||
# New-style classes were introduced in Python 2.2. Defining "object" in Python
|
|
||||||
# 2.1 lets us use new-style classes in versions of Python that support them,
|
|
||||||
# while still maintaining backward compatibility with old-style classes
|
|
||||||
try:
|
|
||||||
object
|
|
||||||
except NameError:
|
|
||||||
class object: pass
|
|
||||||
__all__ += ['object']
|
|
||||||
|
|
||||||
# Starting with Python 2.2, isinstance allows a tuple for the second argument.
|
|
||||||
# Also, builtins like "tuple", "list", "str", "unicode", "int", and "long"
|
|
||||||
# became first-class types, rather than functions. We want to support
|
|
||||||
# constructs like:
|
|
||||||
# isinstance(x, (int, long))
|
|
||||||
# So we hack it for Python 2.1.
|
|
||||||
try:
|
|
||||||
isinstance(5, (int, long))
|
|
||||||
except TypeError:
|
|
||||||
__all__ += ['isinstance']
|
|
||||||
_builtin_type_map = {
|
|
||||||
tuple: type(()),
|
|
||||||
list: type([]),
|
|
||||||
str: type(""),
|
|
||||||
unicode: type(u""),
|
|
||||||
int: type(0),
|
|
||||||
long: type(0L),
|
|
||||||
}
|
|
||||||
def isinstance(obj, t):
|
|
||||||
if not __builtin__.isinstance(t, type(())):
|
|
||||||
# t is not a tuple
|
|
||||||
return __builtin__.isinstance(obj, _builtin_type_map.get(t, t))
|
|
||||||
else:
|
|
||||||
# t is a tuple
|
|
||||||
for typ in t:
|
|
||||||
if __builtin__.isinstance(obj, _builtin_type_map.get(typ, typ)):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
"""Python Cryptography Toolkit
|
|
||||||
|
|
||||||
A collection of cryptographic modules implementing various algorithms
|
|
||||||
and protocols.
|
|
||||||
|
|
||||||
Subpackages:
|
|
||||||
Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4)
|
|
||||||
Crypto.Hash Hashing algorithms (MD5, SHA, HMAC)
|
|
||||||
Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing
|
|
||||||
transform). This package does not contain any
|
|
||||||
network protocols.
|
|
||||||
Crypto.PublicKey Public-key encryption and signature algorithms
|
|
||||||
(RSA, DSA)
|
|
||||||
Crypto.Util Various useful modules and functions (long-to-string
|
|
||||||
conversion, random number generation, number
|
|
||||||
theoretic functions)
|
|
||||||
"""
|
|
||||||
|
|
||||||
__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util']
|
|
||||||
|
|
||||||
__version__ = '2.3' # See also below and setup.py
|
|
||||||
__revision__ = "$Id$"
|
|
||||||
|
|
||||||
# New software should look at this instead of at __version__ above.
|
|
||||||
version_info = (2, 1, 0, 'final', 0) # See also above and setup.py
|
|
||||||
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
# -*- coding: ascii -*-
|
|
||||||
#
|
|
||||||
# pct_warnings.py : PyCrypto warnings file
|
|
||||||
#
|
|
||||||
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
|
|
||||||
#
|
|
||||||
# ===================================================================
|
|
||||||
# The contents of this file are dedicated to the public domain. To
|
|
||||||
# the extent that dedication to the public domain is not available,
|
|
||||||
# everyone is granted a worldwide, perpetual, royalty-free,
|
|
||||||
# non-exclusive license to exercise all rights associated with the
|
|
||||||
# contents of this file for any purpose whatsoever.
|
|
||||||
# No rights are reserved.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
# ===================================================================
|
|
||||||
|
|
||||||
#
|
|
||||||
# Base classes. All our warnings inherit from one of these in order to allow
|
|
||||||
# the user to specifically filter them.
|
|
||||||
#
|
|
||||||
|
|
||||||
class CryptoWarning(Warning):
|
|
||||||
"""Base class for PyCrypto warnings"""
|
|
||||||
|
|
||||||
class CryptoDeprecationWarning(DeprecationWarning, CryptoWarning):
|
|
||||||
"""Base PyCrypto DeprecationWarning class"""
|
|
||||||
|
|
||||||
class CryptoRuntimeWarning(RuntimeWarning, CryptoWarning):
|
|
||||||
"""Base PyCrypto RuntimeWarning class"""
|
|
||||||
|
|
||||||
#
|
|
||||||
# Warnings that we might actually use
|
|
||||||
#
|
|
||||||
|
|
||||||
class RandomPool_DeprecationWarning(CryptoDeprecationWarning):
|
|
||||||
"""Issued when Crypto.Util.randpool.RandomPool is instantiated."""
|
|
||||||
|
|
||||||
class ClockRewindWarning(CryptoRuntimeWarning):
|
|
||||||
"""Warning for when the system clock moves backwards."""
|
|
||||||
|
|
||||||
class GetRandomNumber_DeprecationWarning(CryptoDeprecationWarning):
|
|
||||||
"""Issued when Crypto.Util.number.getRandomNumber is invoked."""
|
|
||||||
|
|
||||||
# By default, we want this warning to be shown every time we compensate for
|
|
||||||
# clock rewinding.
|
|
||||||
import warnings as _warnings
|
|
||||||
_warnings.filterwarnings('always', category=ClockRewindWarning, append=1)
|
|
||||||
|
|
||||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
||||||
@@ -13,9 +13,20 @@ _FILENAME_LEN_OFFSET = 26
|
|||||||
_EXTRA_LEN_OFFSET = 28
|
_EXTRA_LEN_OFFSET = 28
|
||||||
_FILENAME_OFFSET = 30
|
_FILENAME_OFFSET = 30
|
||||||
_MAX_SIZE = 64 * 1024
|
_MAX_SIZE = 64 * 1024
|
||||||
|
_MIMETYPE = 'application/epub+zip'
|
||||||
|
|
||||||
|
class ZipInfo(zipfile.ZipInfo):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if 'compress_type' in kwargs:
|
||||||
|
compress_type = kwargs.pop('compress_type')
|
||||||
|
super(ZipInfo, self).__init__(*args, **kwargs)
|
||||||
|
self.compress_type = compress_type
|
||||||
|
|
||||||
class fixZip:
|
class fixZip:
|
||||||
def __init__(self, zinput, zoutput):
|
def __init__(self, zinput, zoutput):
|
||||||
|
self.ztype = 'zip'
|
||||||
|
if zinput.lower().find('.epub') >= 0 :
|
||||||
|
self.ztype = 'epub'
|
||||||
self.inzip = zipfile.ZipFile(zinput,'r')
|
self.inzip = zipfile.ZipFile(zinput,'r')
|
||||||
self.outzip = zipfile.ZipFile(zoutput,'w')
|
self.outzip = zipfile.ZipFile(zoutput,'w')
|
||||||
# open the input zip for reading only as a raw file
|
# open the input zip for reading only as a raw file
|
||||||
@@ -82,12 +93,18 @@ class fixZip:
|
|||||||
# and copy member over to output archive
|
# and copy member over to output archive
|
||||||
# if problems exist with local vs central filename, fix them
|
# if problems exist with local vs central filename, fix them
|
||||||
|
|
||||||
for i, zinfo in enumerate(self.inzip.infolist()):
|
# if epub write mimetype file first, with no compression
|
||||||
|
if self.ztype == 'epub':
|
||||||
|
nzinfo = ZipInfo('mimetype', compress_type=zipfile.ZIP_STORED)
|
||||||
|
self.outzip.writestr(nzinfo, _MIMETYPE)
|
||||||
|
|
||||||
|
# write the rest of the files
|
||||||
|
for zinfo in self.inzip.infolist():
|
||||||
|
if zinfo.filename != "mimetype" or self.ztype == '.zip':
|
||||||
data = None
|
data = None
|
||||||
nzinfo = zinfo
|
nzinfo = zinfo
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = self.inzip.read(zinfo)
|
data = self.inzip.read(zinfo.filename)
|
||||||
except zipfile.BadZipfile or zipfile.error:
|
except zipfile.BadZipfile or zipfile.error:
|
||||||
local_name = self.getlocalname(zinfo)
|
local_name = self.getlocalname(zinfo)
|
||||||
data = self.getfiledata(zinfo)
|
data = self.getfiledata(zinfo)
|
||||||
@@ -111,14 +128,7 @@ def usage():
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def repairBook(infile, outfile):
|
||||||
if len(argv)!=3:
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
infile = None
|
|
||||||
outfile = None
|
|
||||||
infile = argv[1]
|
|
||||||
outfile = argv[2]
|
|
||||||
if not os.path.exists(infile):
|
if not os.path.exists(infile):
|
||||||
print "Error: Input Zip File does not exist"
|
print "Error: Input Zip File does not exist"
|
||||||
return 1
|
return 1
|
||||||
@@ -130,6 +140,16 @@ def main(argv=sys.argv):
|
|||||||
print "Error Occurred ", e
|
print "Error Occurred ", e
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
if len(argv)!=3:
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
infile = argv[1]
|
||||||
|
outfile = argv[2]
|
||||||
|
return repairBook(infile, outfile)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__' :
|
if __name__ == '__main__' :
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
BIN
Calibre_Plugins/ineptpdf_plugin.zip
Normal file
BIN
Calibre_Plugins/ineptpdf_plugin.zip
Normal file
Binary file not shown.
2224
Calibre_Plugins/ineptpdf_plugin/__init__.py
Normal file
2224
Calibre_Plugins/ineptpdf_plugin/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
346
Calibre_Plugins/ineptpdf_plugin/ade_key.py
Normal file
346
Calibre_Plugins/ineptpdf_plugin/ade_key.py
Normal file
@@ -0,0 +1,346 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Retrieve Adobe ADEPT user key.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import struct
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
|
||||||
|
class ADEPTError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
if iswindows:
|
||||||
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
|
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
||||||
|
string_at, Structure, c_void_p, cast, c_size_t, memmove, CDLL, c_int, \
|
||||||
|
c_long, c_ulong
|
||||||
|
|
||||||
|
from ctypes.wintypes import LPVOID, DWORD, BOOL
|
||||||
|
import _winreg as winreg
|
||||||
|
|
||||||
|
def _load_crypto_libcrypto():
|
||||||
|
from ctypes.util import find_library
|
||||||
|
libcrypto = find_library('libeay32')
|
||||||
|
if libcrypto is None:
|
||||||
|
raise ADEPTError('libcrypto not found')
|
||||||
|
libcrypto = CDLL(libcrypto)
|
||||||
|
AES_MAXNR = 14
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
|
||||||
|
('rounds', c_int)]
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
|
||||||
|
[c_char_p, c_int, AES_KEY_p])
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
||||||
|
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
||||||
|
c_int])
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, userkey):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise ADEPTError('AES improper key used')
|
||||||
|
key = self._key = AES_KEY()
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
|
||||||
|
if rv < 0:
|
||||||
|
raise ADEPTError('Failed to initialize AES key')
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
iv = ("\x00" * self._blocksize)
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._key, iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise ADEPTError('AES decryption failed')
|
||||||
|
return out.raw
|
||||||
|
return AES
|
||||||
|
|
||||||
|
def _load_crypto_pycrypto():
|
||||||
|
from Crypto.Cipher import AES as _AES
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, key):
|
||||||
|
self._aes = _AES.new(key, _AES.MODE_CBC)
|
||||||
|
def decrypt(self, data):
|
||||||
|
return self._aes.decrypt(data)
|
||||||
|
return AES
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES = None
|
||||||
|
for loader in (_load_crypto_pycrypto, _load_crypto_libcrypto):
|
||||||
|
try:
|
||||||
|
AES = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, ADEPTError):
|
||||||
|
pass
|
||||||
|
return AES
|
||||||
|
|
||||||
|
AES = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
DEVICE_KEY_PATH = r'Software\Adobe\Adept\Device'
|
||||||
|
PRIVATE_LICENCE_KEY_PATH = r'Software\Adobe\Adept\Activation'
|
||||||
|
|
||||||
|
MAX_PATH = 255
|
||||||
|
|
||||||
|
kernel32 = windll.kernel32
|
||||||
|
advapi32 = windll.advapi32
|
||||||
|
crypt32 = windll.crypt32
|
||||||
|
|
||||||
|
def GetSystemDirectory():
|
||||||
|
GetSystemDirectoryW = kernel32.GetSystemDirectoryW
|
||||||
|
GetSystemDirectoryW.argtypes = [c_wchar_p, c_uint]
|
||||||
|
GetSystemDirectoryW.restype = c_uint
|
||||||
|
def GetSystemDirectory():
|
||||||
|
buffer = create_unicode_buffer(MAX_PATH + 1)
|
||||||
|
GetSystemDirectoryW(buffer, len(buffer))
|
||||||
|
return buffer.value
|
||||||
|
return GetSystemDirectory
|
||||||
|
GetSystemDirectory = GetSystemDirectory()
|
||||||
|
|
||||||
|
def GetVolumeSerialNumber():
|
||||||
|
GetVolumeInformationW = kernel32.GetVolumeInformationW
|
||||||
|
GetVolumeInformationW.argtypes = [c_wchar_p, c_wchar_p, c_uint,
|
||||||
|
POINTER(c_uint), POINTER(c_uint),
|
||||||
|
POINTER(c_uint), c_wchar_p, c_uint]
|
||||||
|
GetVolumeInformationW.restype = c_uint
|
||||||
|
def GetVolumeSerialNumber(path):
|
||||||
|
vsn = c_uint(0)
|
||||||
|
GetVolumeInformationW(
|
||||||
|
path, None, 0, byref(vsn), None, None, None, 0)
|
||||||
|
return vsn.value
|
||||||
|
return GetVolumeSerialNumber
|
||||||
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def GetUserName():
|
||||||
|
GetUserNameW = advapi32.GetUserNameW
|
||||||
|
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
||||||
|
GetUserNameW.restype = c_uint
|
||||||
|
def GetUserName():
|
||||||
|
buffer = create_unicode_buffer(32)
|
||||||
|
size = c_uint(len(buffer))
|
||||||
|
while not GetUserNameW(buffer, byref(size)):
|
||||||
|
buffer = create_unicode_buffer(len(buffer) * 2)
|
||||||
|
size.value = len(buffer)
|
||||||
|
return buffer.value.encode('utf-16-le')[::2]
|
||||||
|
return GetUserName
|
||||||
|
GetUserName = GetUserName()
|
||||||
|
|
||||||
|
PAGE_EXECUTE_READWRITE = 0x40
|
||||||
|
MEM_COMMIT = 0x1000
|
||||||
|
MEM_RESERVE = 0x2000
|
||||||
|
|
||||||
|
def VirtualAlloc():
|
||||||
|
_VirtualAlloc = kernel32.VirtualAlloc
|
||||||
|
_VirtualAlloc.argtypes = [LPVOID, c_size_t, DWORD, DWORD]
|
||||||
|
_VirtualAlloc.restype = LPVOID
|
||||||
|
def VirtualAlloc(addr, size, alloctype=(MEM_COMMIT | MEM_RESERVE),
|
||||||
|
protect=PAGE_EXECUTE_READWRITE):
|
||||||
|
return _VirtualAlloc(addr, size, alloctype, protect)
|
||||||
|
return VirtualAlloc
|
||||||
|
VirtualAlloc = VirtualAlloc()
|
||||||
|
|
||||||
|
MEM_RELEASE = 0x8000
|
||||||
|
|
||||||
|
def VirtualFree():
|
||||||
|
_VirtualFree = kernel32.VirtualFree
|
||||||
|
_VirtualFree.argtypes = [LPVOID, c_size_t, DWORD]
|
||||||
|
_VirtualFree.restype = BOOL
|
||||||
|
def VirtualFree(addr, size=0, freetype=MEM_RELEASE):
|
||||||
|
return _VirtualFree(addr, size, freetype)
|
||||||
|
return VirtualFree
|
||||||
|
VirtualFree = VirtualFree()
|
||||||
|
|
||||||
|
class NativeFunction(object):
|
||||||
|
def __init__(self, restype, argtypes, insns):
|
||||||
|
self._buf = buf = VirtualAlloc(None, len(insns))
|
||||||
|
memmove(buf, insns, len(insns))
|
||||||
|
ftype = CFUNCTYPE(restype, *argtypes)
|
||||||
|
self._native = ftype(buf)
|
||||||
|
|
||||||
|
def __call__(self, *args):
|
||||||
|
return self._native(*args)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
if self._buf is not None:
|
||||||
|
VirtualFree(self._buf)
|
||||||
|
self._buf = None
|
||||||
|
|
||||||
|
if struct.calcsize("P") == 4:
|
||||||
|
CPUID0_INSNS = (
|
||||||
|
"\x53" # push %ebx
|
||||||
|
"\x31\xc0" # xor %eax,%eax
|
||||||
|
"\x0f\xa2" # cpuid
|
||||||
|
"\x8b\x44\x24\x08" # mov 0x8(%esp),%eax
|
||||||
|
"\x89\x18" # mov %ebx,0x0(%eax)
|
||||||
|
"\x89\x50\x04" # mov %edx,0x4(%eax)
|
||||||
|
"\x89\x48\x08" # mov %ecx,0x8(%eax)
|
||||||
|
"\x5b" # pop %ebx
|
||||||
|
"\xc3" # ret
|
||||||
|
)
|
||||||
|
CPUID1_INSNS = (
|
||||||
|
"\x53" # push %ebx
|
||||||
|
"\x31\xc0" # xor %eax,%eax
|
||||||
|
"\x40" # inc %eax
|
||||||
|
"\x0f\xa2" # cpuid
|
||||||
|
"\x5b" # pop %ebx
|
||||||
|
"\xc3" # ret
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
CPUID0_INSNS = (
|
||||||
|
"\x49\x89\xd8" # mov %rbx,%r8
|
||||||
|
"\x49\x89\xc9" # mov %rcx,%r9
|
||||||
|
"\x48\x31\xc0" # xor %rax,%rax
|
||||||
|
"\x0f\xa2" # cpuid
|
||||||
|
"\x4c\x89\xc8" # mov %r9,%rax
|
||||||
|
"\x89\x18" # mov %ebx,0x0(%rax)
|
||||||
|
"\x89\x50\x04" # mov %edx,0x4(%rax)
|
||||||
|
"\x89\x48\x08" # mov %ecx,0x8(%rax)
|
||||||
|
"\x4c\x89\xc3" # mov %r8,%rbx
|
||||||
|
"\xc3" # retq
|
||||||
|
)
|
||||||
|
CPUID1_INSNS = (
|
||||||
|
"\x53" # push %rbx
|
||||||
|
"\x48\x31\xc0" # xor %rax,%rax
|
||||||
|
"\x48\xff\xc0" # inc %rax
|
||||||
|
"\x0f\xa2" # cpuid
|
||||||
|
"\x5b" # pop %rbx
|
||||||
|
"\xc3" # retq
|
||||||
|
)
|
||||||
|
|
||||||
|
def cpuid0():
|
||||||
|
_cpuid0 = NativeFunction(None, [c_char_p], CPUID0_INSNS)
|
||||||
|
buf = create_string_buffer(12)
|
||||||
|
def cpuid0():
|
||||||
|
_cpuid0(buf)
|
||||||
|
return buf.raw
|
||||||
|
return cpuid0
|
||||||
|
cpuid0 = cpuid0()
|
||||||
|
|
||||||
|
cpuid1 = NativeFunction(c_uint, [], CPUID1_INSNS)
|
||||||
|
|
||||||
|
class DataBlob(Structure):
|
||||||
|
_fields_ = [('cbData', c_uint),
|
||||||
|
('pbData', c_void_p)]
|
||||||
|
DataBlob_p = POINTER(DataBlob)
|
||||||
|
|
||||||
|
def CryptUnprotectData():
|
||||||
|
_CryptUnprotectData = crypt32.CryptUnprotectData
|
||||||
|
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
||||||
|
c_void_p, c_void_p, c_uint, DataBlob_p]
|
||||||
|
_CryptUnprotectData.restype = c_uint
|
||||||
|
def CryptUnprotectData(indata, entropy):
|
||||||
|
indatab = create_string_buffer(indata)
|
||||||
|
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
||||||
|
entropyb = create_string_buffer(entropy)
|
||||||
|
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
||||||
|
outdata = DataBlob()
|
||||||
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
|
None, None, 0, byref(outdata)):
|
||||||
|
raise ADEPTError("Failed to decrypt user key key (sic)")
|
||||||
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
|
return CryptUnprotectData
|
||||||
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
|
def retrieve_key():
|
||||||
|
if AES is None:
|
||||||
|
tkMessageBox.showerror(
|
||||||
|
"ADEPT Key",
|
||||||
|
"This script requires PyCrypto or OpenSSL which must be installed "
|
||||||
|
"separately. Read the top-of-script comment for details.")
|
||||||
|
return False
|
||||||
|
root = GetSystemDirectory().split('\\')[0] + '\\'
|
||||||
|
serial = GetVolumeSerialNumber(root)
|
||||||
|
vendor = cpuid0()
|
||||||
|
signature = struct.pack('>I', cpuid1())[1:]
|
||||||
|
user = GetUserName()
|
||||||
|
entropy = struct.pack('>I12s3s13s', serial, vendor, signature, user)
|
||||||
|
cuser = winreg.HKEY_CURRENT_USER
|
||||||
|
try:
|
||||||
|
regkey = winreg.OpenKey(cuser, DEVICE_KEY_PATH)
|
||||||
|
except WindowsError:
|
||||||
|
raise ADEPTError("Adobe Digital Editions not activated")
|
||||||
|
device = winreg.QueryValueEx(regkey, 'key')[0]
|
||||||
|
keykey = CryptUnprotectData(device, entropy)
|
||||||
|
userkey = None
|
||||||
|
try:
|
||||||
|
plkroot = winreg.OpenKey(cuser, PRIVATE_LICENCE_KEY_PATH)
|
||||||
|
except WindowsError:
|
||||||
|
raise ADEPTError("Could not locate ADE activation")
|
||||||
|
for i in xrange(0, 16):
|
||||||
|
try:
|
||||||
|
plkparent = winreg.OpenKey(plkroot, "%04d" % (i,))
|
||||||
|
except WindowsError:
|
||||||
|
break
|
||||||
|
ktype = winreg.QueryValueEx(plkparent, None)[0]
|
||||||
|
if ktype != 'credentials':
|
||||||
|
continue
|
||||||
|
for j in xrange(0, 16):
|
||||||
|
try:
|
||||||
|
plkkey = winreg.OpenKey(plkparent, "%04d" % (j,))
|
||||||
|
except WindowsError:
|
||||||
|
break
|
||||||
|
ktype = winreg.QueryValueEx(plkkey, None)[0]
|
||||||
|
if ktype != 'privateLicenseKey':
|
||||||
|
continue
|
||||||
|
userkey = winreg.QueryValueEx(plkkey, 'value')[0]
|
||||||
|
break
|
||||||
|
if userkey is not None:
|
||||||
|
break
|
||||||
|
if userkey is None:
|
||||||
|
raise ADEPTError('Could not locate privateLicenseKey')
|
||||||
|
userkey = userkey.decode('base64')
|
||||||
|
aes = AES(keykey)
|
||||||
|
userkey = aes.decrypt(userkey)
|
||||||
|
userkey = userkey[26:-ord(userkey[-1])]
|
||||||
|
return userkey
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
||||||
|
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
||||||
|
|
||||||
|
def findActivationDat():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support/Adobe/Digital Editions" -name "activation.dat"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p2 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p2.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('activation.dat')
|
||||||
|
if pp >= 0:
|
||||||
|
ActDatPath = resline
|
||||||
|
break
|
||||||
|
if os.path.exists(ActDatPath):
|
||||||
|
return ActDatPath
|
||||||
|
return None
|
||||||
|
|
||||||
|
def retrieve_key():
|
||||||
|
actpath = findActivationDat()
|
||||||
|
if actpath is None:
|
||||||
|
raise ADEPTError("Could not locate ADE activation")
|
||||||
|
tree = etree.parse(actpath)
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = '//%s/%s' % (adept('credentials'), adept('privateLicenseKey'))
|
||||||
|
userkey = tree.findtext(expr)
|
||||||
|
userkey = userkey.decode('base64')
|
||||||
|
userkey = userkey[26:]
|
||||||
|
return userkey
|
||||||
Binary file not shown.
@@ -1,582 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# engine to remove drm from Kindle for Mac and Kindle for PC books
|
|
||||||
# for personal use for archiving and converting your ebooks
|
|
||||||
|
|
||||||
# PLEASE DO NOT PIRATE EBOOKS!
|
|
||||||
|
|
||||||
# We want all authors and publishers, and eBook stores to live
|
|
||||||
# long and prosperous lives but at the same time we just want to
|
|
||||||
# be able to read OUR books on whatever device we want and to keep
|
|
||||||
# readable for a long, long time
|
|
||||||
|
|
||||||
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
|
|
||||||
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
|
|
||||||
# and many many others
|
|
||||||
|
|
||||||
# It can run standalone to convert K4M/K4PC/Mobi files, or it can be installed as a
|
|
||||||
# plugin for Calibre (http://calibre-ebook.com/about) so that importing
|
|
||||||
# K4 or Mobi with DRM is no londer a multi-step process.
|
|
||||||
#
|
|
||||||
# ***NOTE*** If you are using this script as a calibre plugin for a K4M or K4PC ebook
|
|
||||||
# then calibre must be installed on the same machine and in the same account as K4PC or K4M
|
|
||||||
# for the plugin version to function properly.
|
|
||||||
#
|
|
||||||
# To create a Calibre plugin, rename this file so that the filename
|
|
||||||
# ends in '_plugin.py', put it into a ZIP file with all its supporting python routines
|
|
||||||
# and import that ZIP into Calibre using its plugin configuration GUI.
|
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__version__ = '1.2'
|
|
||||||
|
|
||||||
class Unbuffered:
|
|
||||||
def __init__(self, stream):
|
|
||||||
self.stream = stream
|
|
||||||
def write(self, data):
|
|
||||||
self.stream.write(data)
|
|
||||||
self.stream.flush()
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
return getattr(self.stream, attr)
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os, csv, getopt
|
|
||||||
import binascii
|
|
||||||
import zlib
|
|
||||||
import re
|
|
||||||
from struct import pack, unpack, unpack_from
|
|
||||||
|
|
||||||
|
|
||||||
#Exception Handling
|
|
||||||
class DrmException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
#
|
|
||||||
# crypto digestroutines
|
|
||||||
#
|
|
||||||
|
|
||||||
import hashlib
|
|
||||||
|
|
||||||
def MD5(message):
|
|
||||||
ctx = hashlib.md5()
|
|
||||||
ctx.update(message)
|
|
||||||
return ctx.digest()
|
|
||||||
|
|
||||||
def SHA1(message):
|
|
||||||
ctx = hashlib.sha1()
|
|
||||||
ctx.update(message)
|
|
||||||
return ctx.digest()
|
|
||||||
|
|
||||||
# determine if we are running as a calibre plugin
|
|
||||||
if 'calibre' in sys.modules:
|
|
||||||
inCalibre = True
|
|
||||||
global openKindleInfo, CryptUnprotectData, GetUserName, GetVolumeSerialNumber, charMap1, charMap2, charMap3, charMap4
|
|
||||||
else:
|
|
||||||
inCalibre = False
|
|
||||||
|
|
||||||
#
|
|
||||||
# start of Kindle specific routines
|
|
||||||
#
|
|
||||||
|
|
||||||
if not inCalibre:
|
|
||||||
import mobidedrm
|
|
||||||
if sys.platform.startswith('win'):
|
|
||||||
from k4pcutils import openKindleInfo, CryptUnprotectData, GetUserName, GetVolumeSerialNumber, charMap1, charMap2, charMap3, charMap4
|
|
||||||
if sys.platform.startswith('darwin'):
|
|
||||||
from k4mutils import openKindleInfo, CryptUnprotectData, GetUserName, GetVolumeSerialNumber, charMap1, charMap2, charMap3, charMap4
|
|
||||||
|
|
||||||
global kindleDatabase
|
|
||||||
|
|
||||||
# Encode the bytes in data with the characters in map
|
|
||||||
def encode(data, map):
|
|
||||||
result = ""
|
|
||||||
for char in data:
|
|
||||||
value = ord(char)
|
|
||||||
Q = (value ^ 0x80) // len(map)
|
|
||||||
R = value % len(map)
|
|
||||||
result += map[Q]
|
|
||||||
result += map[R]
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Hash the bytes in data and then encode the digest with the characters in map
|
|
||||||
def encodeHash(data,map):
|
|
||||||
return encode(MD5(data),map)
|
|
||||||
|
|
||||||
# Decode the string in data with the characters in map. Returns the decoded bytes
|
|
||||||
def decode(data,map):
|
|
||||||
result = ""
|
|
||||||
for i in range (0,len(data)-1,2):
|
|
||||||
high = map.find(data[i])
|
|
||||||
low = map.find(data[i+1])
|
|
||||||
if (high == -1) or (low == -1) :
|
|
||||||
break
|
|
||||||
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
|
||||||
result += pack("B",value)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
# Parse the Kindle.info file and return the records as a list of key-values
|
|
||||||
def parseKindleInfo(kInfoFile):
|
|
||||||
DB = {}
|
|
||||||
infoReader = openKindleInfo(kInfoFile)
|
|
||||||
infoReader.read(1)
|
|
||||||
data = infoReader.read()
|
|
||||||
if sys.platform.startswith('win'):
|
|
||||||
items = data.split('{')
|
|
||||||
else :
|
|
||||||
items = data.split('[')
|
|
||||||
for item in items:
|
|
||||||
splito = item.split(':')
|
|
||||||
DB[splito[0]] =splito[1]
|
|
||||||
return DB
|
|
||||||
|
|
||||||
# Get a record from the Kindle.info file for the key "hashedKey" (already hashed and encoded). Return the decoded and decrypted record
|
|
||||||
def getKindleInfoValueForHash(hashedKey):
|
|
||||||
global kindleDatabase
|
|
||||||
encryptedValue = decode(kindleDatabase[hashedKey],charMap2)
|
|
||||||
if sys.platform.startswith('win'):
|
|
||||||
return CryptUnprotectData(encryptedValue,"")
|
|
||||||
else:
|
|
||||||
cleartext = CryptUnprotectData(encryptedValue)
|
|
||||||
return decode(cleartext, charMap1)
|
|
||||||
|
|
||||||
# Get a record from the Kindle.info file for the string in "key" (plaintext). Return the decoded and decrypted record
|
|
||||||
def getKindleInfoValueForKey(key):
|
|
||||||
return getKindleInfoValueForHash(encodeHash(key,charMap2))
|
|
||||||
|
|
||||||
# Find if the original string for a hashed/encoded string is known. If so return the original string othwise return an empty string.
|
|
||||||
def findNameForHash(hash):
|
|
||||||
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
|
||||||
result = ""
|
|
||||||
for name in names:
|
|
||||||
if hash == encodeHash(name, charMap2):
|
|
||||||
result = name
|
|
||||||
break
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Print all the records from the kindle.info file (option -i)
|
|
||||||
def printKindleInfo():
|
|
||||||
for record in kindleDatabase:
|
|
||||||
name = findNameForHash(record)
|
|
||||||
if name != "" :
|
|
||||||
print (name)
|
|
||||||
print ("--------------------------")
|
|
||||||
else :
|
|
||||||
print ("Unknown Record")
|
|
||||||
print getKindleInfoValueForHash(record)
|
|
||||||
print "\n"
|
|
||||||
|
|
||||||
#
|
|
||||||
# PID generation routines
|
|
||||||
#
|
|
||||||
|
|
||||||
# Returns two bit at offset from a bit field
|
|
||||||
def getTwoBitsFromBitField(bitField,offset):
|
|
||||||
byteNumber = offset // 4
|
|
||||||
bitPosition = 6 - 2*(offset % 4)
|
|
||||||
return ord(bitField[byteNumber]) >> bitPosition & 3
|
|
||||||
|
|
||||||
# Returns the six bits at offset from a bit field
|
|
||||||
def getSixBitsFromBitField(bitField,offset):
|
|
||||||
offset *= 3
|
|
||||||
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
|
||||||
return value
|
|
||||||
|
|
||||||
# 8 bits to six bits encoding from hash to generate PID string
|
|
||||||
def encodePID(hash):
|
|
||||||
global charMap3
|
|
||||||
PID = ""
|
|
||||||
for position in range (0,8):
|
|
||||||
PID += charMap3[getSixBitsFromBitField(hash,position)]
|
|
||||||
return PID
|
|
||||||
|
|
||||||
# Encryption table used to generate the device PID
|
|
||||||
def generatePidEncryptionTable() :
|
|
||||||
table = []
|
|
||||||
for counter1 in range (0,0x100):
|
|
||||||
value = counter1
|
|
||||||
for counter2 in range (0,8):
|
|
||||||
if (value & 1 == 0) :
|
|
||||||
value = value >> 1
|
|
||||||
else :
|
|
||||||
value = value >> 1
|
|
||||||
value = value ^ 0xEDB88320
|
|
||||||
table.append(value)
|
|
||||||
return table
|
|
||||||
|
|
||||||
# Seed value used to generate the device PID
|
|
||||||
def generatePidSeed(table,dsn) :
|
|
||||||
value = 0
|
|
||||||
for counter in range (0,4) :
|
|
||||||
index = (ord(dsn[counter]) ^ value) &0xFF
|
|
||||||
value = (value >> 8) ^ table[index]
|
|
||||||
return value
|
|
||||||
|
|
||||||
# Generate the device PID
|
|
||||||
def generateDevicePID(table,dsn,nbRoll):
|
|
||||||
seed = generatePidSeed(table,dsn)
|
|
||||||
pidAscii = ""
|
|
||||||
pid = [(seed >>24) &0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF,(seed>>24) & 0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF]
|
|
||||||
index = 0
|
|
||||||
for counter in range (0,nbRoll):
|
|
||||||
pid[index] = pid[index] ^ ord(dsn[counter])
|
|
||||||
index = (index+1) %8
|
|
||||||
for counter in range (0,8):
|
|
||||||
index = ((((pid[counter] >>5) & 3) ^ pid[counter]) & 0x1f) + (pid[counter] >> 7)
|
|
||||||
pidAscii += charMap4[index]
|
|
||||||
return pidAscii
|
|
||||||
|
|
||||||
# convert from 8 digit PID to 10 digit PID with checksum
|
|
||||||
def checksumPid(s):
|
|
||||||
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
|
||||||
crc = (~binascii.crc32(s,-1))&0xFFFFFFFF
|
|
||||||
crc = crc ^ (crc >> 16)
|
|
||||||
res = s
|
|
||||||
l = len(letters)
|
|
||||||
for i in (0,1):
|
|
||||||
b = crc & 0xff
|
|
||||||
pos = (b // l) ^ (b % l)
|
|
||||||
res += letters[pos%l]
|
|
||||||
crc >>= 8
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
class MobiPeek:
|
|
||||||
def loadSection(self, section):
|
|
||||||
before, after = self.sections[section:section+2]
|
|
||||||
self.f.seek(before)
|
|
||||||
return self.f.read(after - before)
|
|
||||||
def __init__(self, filename):
|
|
||||||
self.f = file(filename, 'rb')
|
|
||||||
self.header = self.f.read(78)
|
|
||||||
self.ident = self.header[0x3C:0x3C+8]
|
|
||||||
if self.ident != 'BOOKMOBI' and self.ident != 'TEXtREAd':
|
|
||||||
raise DrmException('invalid file format')
|
|
||||||
self.num_sections, = unpack_from('>H', self.header, 76)
|
|
||||||
sections = self.f.read(self.num_sections*8)
|
|
||||||
self.sections = unpack_from('>%dL' % (self.num_sections*2), sections, 0)[::2] + (0xfffffff, )
|
|
||||||
self.sect0 = self.loadSection(0)
|
|
||||||
self.f.close()
|
|
||||||
def getBookTitle(self):
|
|
||||||
# get book title
|
|
||||||
toff, tlen = unpack('>II', self.sect0[0x54:0x5c])
|
|
||||||
tend = toff + tlen
|
|
||||||
title = self.sect0[toff:tend]
|
|
||||||
return title
|
|
||||||
def getexthData(self):
|
|
||||||
# if exth region exists then grab it
|
|
||||||
# get length of this header
|
|
||||||
length, type, codepage, unique_id, version = unpack('>LLLLL', self.sect0[20:40])
|
|
||||||
exth_flag, = unpack('>L', self.sect0[0x80:0x84])
|
|
||||||
exth = ''
|
|
||||||
if exth_flag & 0x40:
|
|
||||||
exth = self.sect0[16 + length:]
|
|
||||||
return exth
|
|
||||||
def isNotEncrypted(self):
|
|
||||||
lock_type, = unpack('>H', self.sect0[0xC:0xC+2])
|
|
||||||
if lock_type == 0:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
# DiapDealer's stuff: Parse the EXTH header records and parse the Kindleinfo
|
|
||||||
# file to calculate the book pid.
|
|
||||||
def getK4Pids(exth, title, kInfoFile=None):
|
|
||||||
global kindleDatabase
|
|
||||||
try:
|
|
||||||
kindleDatabase = parseKindleInfo(kInfoFile)
|
|
||||||
except Exception, message:
|
|
||||||
print(message)
|
|
||||||
|
|
||||||
if kindleDatabase != None :
|
|
||||||
# Get the Mazama Random number
|
|
||||||
MazamaRandomNumber = getKindleInfoValueForKey("MazamaRandomNumber")
|
|
||||||
|
|
||||||
# Get the HDD serial
|
|
||||||
encodedSystemVolumeSerialNumber = encodeHash(GetVolumeSerialNumber(),charMap1)
|
|
||||||
|
|
||||||
# Get the current user name
|
|
||||||
encodedUsername = encodeHash(GetUserName(),charMap1)
|
|
||||||
|
|
||||||
# concat, hash and encode to calculate the DSN
|
|
||||||
DSN = encode(SHA1(MazamaRandomNumber+encodedSystemVolumeSerialNumber+encodedUsername),charMap1)
|
|
||||||
|
|
||||||
print("\nDSN: " + DSN)
|
|
||||||
|
|
||||||
# Compute the device PID (for which I can tell, is used for nothing).
|
|
||||||
# But hey, stuff being printed out is apparently cool.
|
|
||||||
table = generatePidEncryptionTable()
|
|
||||||
devicePID = generateDevicePID(table,DSN,4)
|
|
||||||
|
|
||||||
print("Device PID: " + checksumPid(devicePID))
|
|
||||||
|
|
||||||
# Compute book PID
|
|
||||||
exth_records = {}
|
|
||||||
nitems, = unpack('>I', exth[8:12])
|
|
||||||
pos = 12
|
|
||||||
|
|
||||||
exth_records[209] = None
|
|
||||||
# Parse the exth records, storing data indexed by type
|
|
||||||
for i in xrange(nitems):
|
|
||||||
type, size = unpack('>II', exth[pos: pos + 8])
|
|
||||||
content = exth[pos + 8: pos + size]
|
|
||||||
|
|
||||||
exth_records[type] = content
|
|
||||||
pos += size
|
|
||||||
|
|
||||||
# Grab the contents of the type 209 exth record
|
|
||||||
if exth_records[209] != None:
|
|
||||||
data = exth_records[209]
|
|
||||||
else:
|
|
||||||
raise DrmException("\nNo EXTH record type 209 - Perhaps not a K4 file?")
|
|
||||||
|
|
||||||
# Parse the 209 data to find the the exth record with the token data.
|
|
||||||
# The last character of the 209 data points to the record with the token.
|
|
||||||
# Always 208 from my experience, but I'll leave the logic in case that changes.
|
|
||||||
for i in xrange(len(data)):
|
|
||||||
if ord(data[i]) != 0:
|
|
||||||
if exth_records[ord(data[i])] != None:
|
|
||||||
token = exth_records[ord(data[i])]
|
|
||||||
|
|
||||||
# Get the kindle account token
|
|
||||||
kindleAccountToken = getKindleInfoValueForKey("kindle.account.tokens")
|
|
||||||
|
|
||||||
print("Account Token: " + kindleAccountToken)
|
|
||||||
|
|
||||||
pidHash = SHA1(DSN+kindleAccountToken+exth_records[209]+token)
|
|
||||||
|
|
||||||
bookPID = encodePID(pidHash)
|
|
||||||
bookPID = checksumPid(bookPID)
|
|
||||||
|
|
||||||
if exth_records[503] != None:
|
|
||||||
print "Pid for " + exth_records[503] + ": " + bookPID
|
|
||||||
else:
|
|
||||||
print "Pid for " + title + ":" + bookPID
|
|
||||||
return bookPID
|
|
||||||
|
|
||||||
raise DrmException("\nCould not access K4 data - Perhaps K4 is not installed/configured?")
|
|
||||||
return null
|
|
||||||
|
|
||||||
def usage(progname):
|
|
||||||
print "Removes DRM protection from K4PC, K4M, and Mobi ebooks"
|
|
||||||
print "Usage:"
|
|
||||||
print " %s [-k <kindle.info>] [-p <pidnums>] <infile> <outfile> " % progname
|
|
||||||
|
|
||||||
#
|
|
||||||
# Main
|
|
||||||
#
|
|
||||||
def main(argv=sys.argv):
|
|
||||||
global kindleDatabase
|
|
||||||
import mobidedrm
|
|
||||||
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
kInfoFiles = []
|
|
||||||
pidnums = ""
|
|
||||||
|
|
||||||
print ('K4MobiDeDrm v%(__version__)s '
|
|
||||||
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
|
||||||
|
|
||||||
try:
|
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "k:p:")
|
|
||||||
except getopt.GetoptError, err:
|
|
||||||
print str(err)
|
|
||||||
usage(progname)
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
if len(args)<2:
|
|
||||||
usage(progname)
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
for o, a in opts:
|
|
||||||
if o == "-k":
|
|
||||||
if a == None :
|
|
||||||
raise DrmException("Invalid parameter for -k")
|
|
||||||
kInfoFiles.append(a)
|
|
||||||
if o == "-p":
|
|
||||||
if a == None :
|
|
||||||
raise DrmException("Invalid parameter for -p")
|
|
||||||
pidnums = a
|
|
||||||
|
|
||||||
kindleDatabase = None
|
|
||||||
infile = args[0]
|
|
||||||
outfile = args[1]
|
|
||||||
DecodeErrorString = ""
|
|
||||||
try:
|
|
||||||
# first try with K4PC/K4M
|
|
||||||
ex = MobiPeek(infile)
|
|
||||||
if ex.isNotEncrypted():
|
|
||||||
print "File was Not Encrypted"
|
|
||||||
return 2
|
|
||||||
title = ex.getBookTitle()
|
|
||||||
exth = ex.getexthData()
|
|
||||||
if exth=='':
|
|
||||||
raise DrmException("Not a Kindle Mobipocket file")
|
|
||||||
pid = getK4Pids(exth, title)
|
|
||||||
unlocked_file = mobidedrm.getUnencryptedBook(infile, pid)
|
|
||||||
except DrmException, e:
|
|
||||||
DecodeErrorString += "Error trying default K4 info: " + str(e) + "\n"
|
|
||||||
pass
|
|
||||||
except mobidedrm.DrmException, e:
|
|
||||||
DecodeErrorString += "Error trying default K4 info: " + str(e) + "\n"
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
file(outfile, 'wb').write(unlocked_file)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# now try alternate kindle.info files
|
|
||||||
if kInfoFiles:
|
|
||||||
for infoFile in kInfoFiles:
|
|
||||||
kindleDatabase = None
|
|
||||||
try:
|
|
||||||
title = ex.getBookTitle()
|
|
||||||
exth = ex.getexthData()
|
|
||||||
if exth=='':
|
|
||||||
raise DrmException("Not a Kindle Mobipocket file")
|
|
||||||
pid = getK4Pids(exth, title, infoFile)
|
|
||||||
unlocked_file = mobidedrm.getUnencryptedBook(infile, pid)
|
|
||||||
except DrmException, e:
|
|
||||||
DecodeErrorString += "Error trying " + infoFile + " K4 info: " + str(e) + "\n"
|
|
||||||
pass
|
|
||||||
except mobidedrm.DrmException, e:
|
|
||||||
DecodeErrorString += "Error trying " + infoFile + " K4 info: " + str(e) + "\n"
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
file(outfile, 'wb').write(unlocked_file)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# Lastly, try from the pid list
|
|
||||||
pids = pidnums.split(',')
|
|
||||||
for pid in pids:
|
|
||||||
try:
|
|
||||||
print 'Trying: "'+ pid + '"'
|
|
||||||
unlocked_file = mobidedrm.getUnencryptedBook(infile, pid)
|
|
||||||
except mobidedrm.DrmException:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
file(outfile, 'wb').write(unlocked_file)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# we could not unencrypt book
|
|
||||||
print DecodeErrorString
|
|
||||||
print "Error: Could Not Unencrypt Book"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
sys.exit(main())
|
|
||||||
|
|
||||||
|
|
||||||
if not __name__ == "__main__" and inCalibre:
|
|
||||||
from calibre.customize import FileTypePlugin
|
|
||||||
|
|
||||||
class K4DeDRM(FileTypePlugin):
|
|
||||||
name = 'K4PC, K4Mac, Mobi DeDRM' # Name of the plugin
|
|
||||||
description = 'Removes DRM from K4PC, K4Mac, and Mobi files. \
|
|
||||||
Provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc.'
|
|
||||||
supported_platforms = ['osx', 'windows', 'linux'] # Platforms this plugin will run on
|
|
||||||
author = 'DiapDealer, SomeUpdates' # The author of this plugin
|
|
||||||
version = (0, 1, 3) # The version number of this plugin
|
|
||||||
file_types = set(['prc','mobi','azw']) # The file types that this plugin will be applied to
|
|
||||||
on_import = True # Run this plugin during the import
|
|
||||||
priority = 200 # run this plugin before mobidedrm, k4pcdedrm, k4dedrm
|
|
||||||
|
|
||||||
def run(self, path_to_ebook):
|
|
||||||
from calibre.gui2 import is_ok_to_use_qt
|
|
||||||
from PyQt4.Qt import QMessageBox
|
|
||||||
global kindleDatabase
|
|
||||||
global openKindleInfo, CryptUnprotectData, GetUserName, GetVolumeSerialNumber, charMap1, charMap2, charMap3, charMap4
|
|
||||||
if sys.platform.startswith('win'):
|
|
||||||
from k4pcutils import openKindleInfo, CryptUnprotectData, GetUserName, GetVolumeSerialNumber, charMap1, charMap2, charMap3, charMap4
|
|
||||||
if sys.platform.startswith('darwin'):
|
|
||||||
from k4mutils import openKindleInfo, CryptUnprotectData, GetUserName, GetVolumeSerialNumber, charMap1, charMap2, charMap3, charMap4
|
|
||||||
import mobidedrm
|
|
||||||
|
|
||||||
# Get supplied list of PIDs to try from plugin customization.
|
|
||||||
pidnums = self.site_customization
|
|
||||||
|
|
||||||
# Load any kindle info files (*.info) included Calibre's config directory.
|
|
||||||
kInfoFiles = []
|
|
||||||
try:
|
|
||||||
# Find Calibre's configuration directory.
|
|
||||||
confpath = os.path.split(os.path.split(self.plugin_path)[0])[0]
|
|
||||||
print 'K4MobiDeDRM: Calibre configuration directory = %s' % confpath
|
|
||||||
files = os.listdir(confpath)
|
|
||||||
filefilter = re.compile("\.info$", re.IGNORECASE)
|
|
||||||
files = filter(filefilter.search, files)
|
|
||||||
|
|
||||||
if files:
|
|
||||||
for filename in files:
|
|
||||||
fpath = os.path.join(confpath, filename)
|
|
||||||
kInfoFiles.append(fpath)
|
|
||||||
print 'K4MobiDeDRM: Kindle info file %s found in config folder.' % filename
|
|
||||||
except IOError:
|
|
||||||
print 'K4MobiDeDRM: Error reading kindle info files from config directory.'
|
|
||||||
pass
|
|
||||||
|
|
||||||
# first try with book specifc pid from K4PC or K4M
|
|
||||||
try:
|
|
||||||
kindleDatabase = None
|
|
||||||
ex = MobiPeek(path_to_ebook)
|
|
||||||
if ex.isNotEncrypted():
|
|
||||||
return path_to_ebook
|
|
||||||
title = ex.getBookTitle()
|
|
||||||
exth = ex.getexthData()
|
|
||||||
if exth=='':
|
|
||||||
raise DrmException("Not a Kindle Mobipocket file")
|
|
||||||
pid = getK4Pids(exth, title)
|
|
||||||
unlocked_file = mobidedrm.getUnencryptedBook(path_to_ebook,pid)
|
|
||||||
except DrmException:
|
|
||||||
pass
|
|
||||||
except mobidedrm.DrmException:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
of = self.temporary_file('.mobi')
|
|
||||||
of.write(unlocked_file)
|
|
||||||
of.close()
|
|
||||||
return of.name
|
|
||||||
|
|
||||||
# Now try alternate kindle info files
|
|
||||||
if kInfoFiles:
|
|
||||||
for infoFile in kInfoFiles:
|
|
||||||
kindleDatabase = None
|
|
||||||
try:
|
|
||||||
title = ex.getBookTitle()
|
|
||||||
exth = ex.getexthData()
|
|
||||||
if exth=='':
|
|
||||||
raise DrmException("Not a Kindle Mobipocket file")
|
|
||||||
pid = getK4Pids(exth, title, infoFile)
|
|
||||||
unlocked_file = mobidedrm.getUnencryptedBook(path_to_ebook,pid)
|
|
||||||
except DrmException:
|
|
||||||
pass
|
|
||||||
except mobidedrm.DrmException:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
of = self.temporary_file('.mobi')
|
|
||||||
of.write(unlocked_file)
|
|
||||||
of.close()
|
|
||||||
return of.name
|
|
||||||
|
|
||||||
# now try from the pid list
|
|
||||||
pids = pidnums.split(',')
|
|
||||||
for pid in pids:
|
|
||||||
try:
|
|
||||||
unlocked_file = mobidedrm.getUnencryptedBook(path_to_ebook, pid)
|
|
||||||
except mobidedrm.DrmException:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
of = self.temporary_file('.mobi')
|
|
||||||
of.write(unlocked_file)
|
|
||||||
of.close()
|
|
||||||
return of.name
|
|
||||||
|
|
||||||
#if you reached here then no luck raise and exception
|
|
||||||
if is_ok_to_use_qt():
|
|
||||||
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM Plugin", "Error decoding: %s\n" % path_to_ebook)
|
|
||||||
d.show()
|
|
||||||
d.raise_()
|
|
||||||
d.exec_()
|
|
||||||
raise Exception("K4MobiDeDRM plugin could not decode the file")
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def customization_help(self, gui=False):
|
|
||||||
return 'Enter each 10 character PID separated by a comma (no spaces).'
|
|
||||||
@@ -1,158 +1,16 @@
|
|||||||
# standlone set of Mac OSX specific routines needed for K4DeDRM
|
# standlone set of Mac OSX specific routines needed for KindleBooks
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
import os.path
|
||||||
|
|
||||||
#Exception Handling
|
|
||||||
class K4MDrmException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
import signal
|
|
||||||
import threading
|
|
||||||
import subprocess
|
import subprocess
|
||||||
from subprocess import Popen, PIPE, STDOUT
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
# **heavily** chopped up and modfied version of asyncproc.py
|
class DrmException(Exception):
|
||||||
# to make it actually work on Windows as well as Mac/Linux
|
pass
|
||||||
# For the original see:
|
|
||||||
# "http://www.lysator.liu.se/~bellman/download/"
|
|
||||||
# author is "Thomas Bellman <bellman@lysator.liu.se>"
|
|
||||||
# available under GPL version 3 or Later
|
|
||||||
|
|
||||||
# create an asynchronous subprocess whose output can be collected in
|
|
||||||
# a non-blocking manner
|
|
||||||
|
|
||||||
# What a mess! Have to use threads just to get non-blocking io
|
|
||||||
# in a cross-platform manner
|
|
||||||
|
|
||||||
# luckily all thread use is hidden within this class
|
|
||||||
|
|
||||||
class Process(object):
|
|
||||||
def __init__(self, *params, **kwparams):
|
|
||||||
if len(params) <= 3:
|
|
||||||
kwparams.setdefault('stdin', subprocess.PIPE)
|
|
||||||
if len(params) <= 4:
|
|
||||||
kwparams.setdefault('stdout', subprocess.PIPE)
|
|
||||||
if len(params) <= 5:
|
|
||||||
kwparams.setdefault('stderr', subprocess.PIPE)
|
|
||||||
self.__pending_input = []
|
|
||||||
self.__collected_outdata = []
|
|
||||||
self.__collected_errdata = []
|
|
||||||
self.__exitstatus = None
|
|
||||||
self.__lock = threading.Lock()
|
|
||||||
self.__inputsem = threading.Semaphore(0)
|
|
||||||
self.__quit = False
|
|
||||||
|
|
||||||
self.__process = subprocess.Popen(*params, **kwparams)
|
|
||||||
|
|
||||||
if self.__process.stdin:
|
|
||||||
self.__stdin_thread = threading.Thread(
|
|
||||||
name="stdin-thread",
|
|
||||||
target=self.__feeder, args=(self.__pending_input,
|
|
||||||
self.__process.stdin))
|
|
||||||
self.__stdin_thread.setDaemon(True)
|
|
||||||
self.__stdin_thread.start()
|
|
||||||
|
|
||||||
if self.__process.stdout:
|
|
||||||
self.__stdout_thread = threading.Thread(
|
|
||||||
name="stdout-thread",
|
|
||||||
target=self.__reader, args=(self.__collected_outdata,
|
|
||||||
self.__process.stdout))
|
|
||||||
self.__stdout_thread.setDaemon(True)
|
|
||||||
self.__stdout_thread.start()
|
|
||||||
|
|
||||||
if self.__process.stderr:
|
|
||||||
self.__stderr_thread = threading.Thread(
|
|
||||||
name="stderr-thread",
|
|
||||||
target=self.__reader, args=(self.__collected_errdata,
|
|
||||||
self.__process.stderr))
|
|
||||||
self.__stderr_thread.setDaemon(True)
|
|
||||||
self.__stderr_thread.start()
|
|
||||||
|
|
||||||
def pid(self):
|
|
||||||
return self.__process.pid
|
|
||||||
|
|
||||||
def kill(self, signal):
|
|
||||||
self.__process.send_signal(signal)
|
|
||||||
|
|
||||||
# check on subprocess (pass in 'nowait') to act like poll
|
|
||||||
def wait(self, flag):
|
|
||||||
if flag.lower() == 'nowait':
|
|
||||||
rc = self.__process.poll()
|
|
||||||
else:
|
|
||||||
rc = self.__process.wait()
|
|
||||||
if rc != None:
|
|
||||||
if self.__process.stdin:
|
|
||||||
self.closeinput()
|
|
||||||
if self.__process.stdout:
|
|
||||||
self.__stdout_thread.join()
|
|
||||||
if self.__process.stderr:
|
|
||||||
self.__stderr_thread.join()
|
|
||||||
return self.__process.returncode
|
|
||||||
|
|
||||||
def terminate(self):
|
|
||||||
if self.__process.stdin:
|
|
||||||
self.closeinput()
|
|
||||||
self.__process.terminate()
|
|
||||||
|
|
||||||
# thread gets data from subprocess stdout
|
|
||||||
def __reader(self, collector, source):
|
|
||||||
while True:
|
|
||||||
data = os.read(source.fileno(), 65536)
|
|
||||||
self.__lock.acquire()
|
|
||||||
collector.append(data)
|
|
||||||
self.__lock.release()
|
|
||||||
if data == "":
|
|
||||||
source.close()
|
|
||||||
break
|
|
||||||
return
|
|
||||||
|
|
||||||
# thread feeds data to subprocess stdin
|
|
||||||
def __feeder(self, pending, drain):
|
|
||||||
while True:
|
|
||||||
self.__inputsem.acquire()
|
|
||||||
self.__lock.acquire()
|
|
||||||
if not pending and self.__quit:
|
|
||||||
drain.close()
|
|
||||||
self.__lock.release()
|
|
||||||
break
|
|
||||||
data = pending.pop(0)
|
|
||||||
self.__lock.release()
|
|
||||||
drain.write(data)
|
|
||||||
|
|
||||||
# non-blocking read of data from subprocess stdout
|
|
||||||
def read(self):
|
|
||||||
self.__lock.acquire()
|
|
||||||
outdata = "".join(self.__collected_outdata)
|
|
||||||
del self.__collected_outdata[:]
|
|
||||||
self.__lock.release()
|
|
||||||
return outdata
|
|
||||||
|
|
||||||
# non-blocking read of data from subprocess stderr
|
|
||||||
def readerr(self):
|
|
||||||
self.__lock.acquire()
|
|
||||||
errdata = "".join(self.__collected_errdata)
|
|
||||||
del self.__collected_errdata[:]
|
|
||||||
self.__lock.release()
|
|
||||||
return errdata
|
|
||||||
|
|
||||||
# non-blocking write to stdin of subprocess
|
|
||||||
def write(self, data):
|
|
||||||
if self.__process.stdin is None:
|
|
||||||
raise ValueError("Writing to process with stdin not a pipe")
|
|
||||||
self.__lock.acquire()
|
|
||||||
self.__pending_input.append(data)
|
|
||||||
self.__inputsem.release()
|
|
||||||
self.__lock.release()
|
|
||||||
|
|
||||||
# close stdinput of subprocess
|
|
||||||
def closeinput(self):
|
|
||||||
self.__lock.acquire()
|
|
||||||
self.__quit = True
|
|
||||||
self.__inputsem.release()
|
|
||||||
self.__lock.release()
|
|
||||||
|
|
||||||
|
|
||||||
# interface to needed routines in openssl's libcrypto
|
# interface to needed routines in openssl's libcrypto
|
||||||
@@ -163,7 +21,7 @@ def _load_crypto_libcrypto():
|
|||||||
|
|
||||||
libcrypto = find_library('crypto')
|
libcrypto = find_library('crypto')
|
||||||
if libcrypto is None:
|
if libcrypto is None:
|
||||||
raise K4MDrmException('libcrypto not found')
|
raise DrmException('libcrypto not found')
|
||||||
libcrypto = CDLL(libcrypto)
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
AES_MAXNR = 14
|
AES_MAXNR = 14
|
||||||
@@ -196,27 +54,24 @@ def _load_crypto_libcrypto():
|
|||||||
def set_decrypt_key(self, userkey, iv):
|
def set_decrypt_key(self, userkey, iv):
|
||||||
self._blocksize = len(userkey)
|
self._blocksize = len(userkey)
|
||||||
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
raise K4MDrmException('AES improper key used')
|
raise DrmException('AES improper key used')
|
||||||
return
|
return
|
||||||
keyctx = self._keyctx = AES_KEY()
|
keyctx = self._keyctx = AES_KEY()
|
||||||
self.iv = iv
|
self.iv = iv
|
||||||
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
if rv < 0:
|
if rv < 0:
|
||||||
raise K4MDrmException('Failed to initialize AES key')
|
raise DrmException('Failed to initialize AES key')
|
||||||
|
|
||||||
def decrypt(self, data):
|
def decrypt(self, data):
|
||||||
out = create_string_buffer(len(data))
|
out = create_string_buffer(len(data))
|
||||||
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0)
|
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0)
|
||||||
if rv == 0:
|
if rv == 0:
|
||||||
raise K4MDrmException('AES decryption failed')
|
raise DrmException('AES decryption failed')
|
||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
def keyivgen(self, passwd):
|
def keyivgen(self, passwd, salt, iter, keylen):
|
||||||
salt = '16743'
|
saltlen = len(salt)
|
||||||
saltlen = 5
|
|
||||||
passlen = len(passwd)
|
passlen = len(passwd)
|
||||||
iter = 0x3e8
|
|
||||||
keylen = 80
|
|
||||||
out = create_string_buffer(keylen)
|
out = create_string_buffer(keylen)
|
||||||
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
||||||
return out.raw
|
return out.raw
|
||||||
@@ -226,7 +81,7 @@ def _load_crypto():
|
|||||||
LibCrypto = None
|
LibCrypto = None
|
||||||
try:
|
try:
|
||||||
LibCrypto = _load_crypto_libcrypto()
|
LibCrypto = _load_crypto_libcrypto()
|
||||||
except (ImportError, K4MDrmException):
|
except (ImportError, DrmException):
|
||||||
pass
|
pass
|
||||||
return LibCrypto
|
return LibCrypto
|
||||||
|
|
||||||
@@ -236,6 +91,81 @@ LibCrypto = _load_crypto()
|
|||||||
# Utility Routines
|
# Utility Routines
|
||||||
#
|
#
|
||||||
|
|
||||||
|
# crypto digestroutines
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA256(message):
|
||||||
|
ctx = hashlib.sha256()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
||||||
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
||||||
|
|
||||||
|
# For kinf approach of K4PC/K4Mac
|
||||||
|
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# For Mac they seem to re-use charMap2 here
|
||||||
|
charMap5 = charMap2
|
||||||
|
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data)-1,2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
if (high == -1) or (low == -1) :
|
||||||
|
break
|
||||||
|
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
# For .kinf approach of K4PC and now K4Mac
|
||||||
|
# generate table of prime number less than or equal to int n
|
||||||
|
def primes(n):
|
||||||
|
if n==2: return [2]
|
||||||
|
elif n<2: return []
|
||||||
|
s=range(3,n+1,2)
|
||||||
|
mroot = n ** 0.5
|
||||||
|
half=(n+1)/2-1
|
||||||
|
i=0
|
||||||
|
m=3
|
||||||
|
while m <= mroot:
|
||||||
|
if s[i]:
|
||||||
|
j=(m*m-3)/2
|
||||||
|
s[j]=0
|
||||||
|
while j<half:
|
||||||
|
s[j]=0
|
||||||
|
j+=m
|
||||||
|
i=i+1
|
||||||
|
m=2*i+3
|
||||||
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
||||||
# returns with the serial number of drive whose BSD Name is "disk0"
|
# returns with the serial number of drive whose BSD Name is "disk0"
|
||||||
def GetVolumeSerialNumber():
|
def GetVolumeSerialNumber():
|
||||||
@@ -244,10 +174,9 @@ def GetVolumeSerialNumber():
|
|||||||
return sernum
|
return sernum
|
||||||
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
p = Process(cmdline, shell=True, bufsize=1, stdin=None, stdout=PIPE, stderr=PIPE, close_fds=False)
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
poll = p.wait('wait')
|
out1, out2 = p.communicate()
|
||||||
results = p.read()
|
reslst = out1.split('\n')
|
||||||
reslst = results.split('\n')
|
|
||||||
cnt = len(reslst)
|
cnt = len(reslst)
|
||||||
bsdname = None
|
bsdname = None
|
||||||
sernum = None
|
sernum = None
|
||||||
@@ -266,69 +195,357 @@ def GetVolumeSerialNumber():
|
|||||||
foundIt = True
|
foundIt = True
|
||||||
break
|
break
|
||||||
if not foundIt:
|
if not foundIt:
|
||||||
sernum = '9999999999'
|
sernum = ''
|
||||||
return sernum
|
return sernum
|
||||||
|
|
||||||
|
def GetUserHomeAppSupKindleDirParitionName():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
dpath = home + '/Library/Application Support/Kindle'
|
||||||
|
cmdline = '/sbin/mount'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
disk = ''
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.startswith('/dev'):
|
||||||
|
(devpart, mpath) = resline.split(' on ')
|
||||||
|
dpart = devpart[5:]
|
||||||
|
pp = mpath.find('(')
|
||||||
|
if pp >= 0:
|
||||||
|
mpath = mpath[:pp-1]
|
||||||
|
if dpath.startswith(mpath):
|
||||||
|
disk = dpart
|
||||||
|
return disk
|
||||||
|
|
||||||
|
# uses a sub process to get the UUID of the specified disk partition using ioreg
|
||||||
|
def GetDiskPartitionUUID(diskpart):
|
||||||
|
uuidnum = os.getenv('MYUUIDNUMBER')
|
||||||
|
if uuidnum != None:
|
||||||
|
return uuidnum
|
||||||
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
bsdname = None
|
||||||
|
uuidnum = None
|
||||||
|
foundIt = False
|
||||||
|
nest = 0
|
||||||
|
uuidnest = -1
|
||||||
|
partnest = -2
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.find('{') >= 0:
|
||||||
|
nest += 1
|
||||||
|
if resline.find('}') >= 0:
|
||||||
|
nest -= 1
|
||||||
|
pp = resline.find('"UUID" = "')
|
||||||
|
if pp >= 0:
|
||||||
|
uuidnum = resline[pp+10:-1]
|
||||||
|
uuidnum = uuidnum.strip()
|
||||||
|
uuidnest = nest
|
||||||
|
if partnest == uuidnest and uuidnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
bb = resline.find('"BSD Name" = "')
|
||||||
|
if bb >= 0:
|
||||||
|
bsdname = resline[bb+14:-1]
|
||||||
|
bsdname = bsdname.strip()
|
||||||
|
if (bsdname == diskpart):
|
||||||
|
partnest = nest
|
||||||
|
else :
|
||||||
|
partnest = -2
|
||||||
|
if partnest == uuidnest and partnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if nest == 0:
|
||||||
|
partnest = -2
|
||||||
|
uuidnest = -1
|
||||||
|
uuidnum = None
|
||||||
|
bsdname = None
|
||||||
|
if not foundIt:
|
||||||
|
uuidnum = ''
|
||||||
|
return uuidnum
|
||||||
|
|
||||||
|
def GetMACAddressMunged():
|
||||||
|
macnum = os.getenv('MYMACNUM')
|
||||||
|
if macnum != None:
|
||||||
|
return macnum
|
||||||
|
cmdline = '/sbin/ifconfig en0'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
macnum = None
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('ether ')
|
||||||
|
if pp >= 0:
|
||||||
|
macnum = resline[pp+6:-1]
|
||||||
|
macnum = macnum.strip()
|
||||||
|
# print "original mac", macnum
|
||||||
|
# now munge it up the way Kindle app does
|
||||||
|
# by xoring it with 0xa5 and swapping elements 3 and 4
|
||||||
|
maclst = macnum.split(':')
|
||||||
|
n = len(maclst)
|
||||||
|
if n != 6:
|
||||||
|
fountIt = False
|
||||||
|
break
|
||||||
|
for i in range(6):
|
||||||
|
maclst[i] = int('0x' + maclst[i], 0)
|
||||||
|
mlst = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
|
||||||
|
mlst[5] = maclst[5] ^ 0xa5
|
||||||
|
mlst[4] = maclst[3] ^ 0xa5
|
||||||
|
mlst[3] = maclst[4] ^ 0xa5
|
||||||
|
mlst[2] = maclst[2] ^ 0xa5
|
||||||
|
mlst[1] = maclst[1] ^ 0xa5
|
||||||
|
mlst[0] = maclst[0] ^ 0xa5
|
||||||
|
macnum = "%0.2x%0.2x%0.2x%0.2x%0.2x%0.2x" % (mlst[0], mlst[1], mlst[2], mlst[3], mlst[4], mlst[5])
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if not foundIt:
|
||||||
|
macnum = ''
|
||||||
|
return macnum
|
||||||
|
|
||||||
|
|
||||||
# uses unix env to get username instead of using sysctlbyname
|
# uses unix env to get username instead of using sysctlbyname
|
||||||
def GetUserName():
|
def GetUserName():
|
||||||
username = os.getenv('USER')
|
username = os.getenv('USER')
|
||||||
return username
|
return username
|
||||||
|
|
||||||
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
|
||||||
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
|
||||||
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
|
||||||
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
|
||||||
|
|
||||||
def encode(data, map):
|
|
||||||
result = ""
|
|
||||||
for char in data:
|
|
||||||
value = ord(char)
|
|
||||||
Q = (value ^ 0x80) // len(map)
|
|
||||||
R = value % len(map)
|
|
||||||
result += map[Q]
|
|
||||||
result += map[R]
|
|
||||||
return result
|
|
||||||
|
|
||||||
import hashlib
|
|
||||||
|
|
||||||
def SHA256(message):
|
|
||||||
ctx = hashlib.sha256()
|
|
||||||
ctx.update(message)
|
|
||||||
return ctx.digest()
|
|
||||||
|
|
||||||
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used by Kindle for Mac versions < 1.6.0
|
||||||
def CryptUnprotectData(encryptedData):
|
def CryptUnprotectData(encryptedData):
|
||||||
sp = GetVolumeSerialNumber() + '!@#' + GetUserName()
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if sernum == '':
|
||||||
|
sernum = '9999999999'
|
||||||
|
sp = sernum + '!@#' + GetUserName()
|
||||||
passwdData = encode(SHA256(sp),charMap1)
|
passwdData = encode(SHA256(sp),charMap1)
|
||||||
|
salt = '16743'
|
||||||
|
iter = 0x3e8
|
||||||
|
keylen = 0x80
|
||||||
crp = LibCrypto()
|
crp = LibCrypto()
|
||||||
key_iv = crp.keyivgen(passwdData)
|
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
key = key_iv[0:32]
|
key = key_iv[0:32]
|
||||||
iv = key_iv[32:48]
|
iv = key_iv[32:48]
|
||||||
crp.set_decrypt_key(key,iv)
|
crp.set_decrypt_key(key,iv)
|
||||||
cleartext = crp.decrypt(encryptedData)
|
cleartext = crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext,charMap1)
|
||||||
return cleartext
|
return cleartext
|
||||||
|
|
||||||
# Locate and open the .kindle-info file
|
|
||||||
def openKindleInfo(kInfoFile=None):
|
def isNewInstall():
|
||||||
if kInfoFile == None:
|
home = os.getenv('HOME')
|
||||||
|
# soccer game fan anyone
|
||||||
|
dpath = home + '/Library/Application Support/Kindle/storage/.pes2011'
|
||||||
|
# print dpath, os.path.exists(dpath)
|
||||||
|
if os.path.exists(dpath):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
# K4Mac now has an extensive set of ids strings it uses
|
||||||
|
# in encoding pids and in creating unique passwords
|
||||||
|
# for use in its own version of CryptUnprotectDataV2
|
||||||
|
|
||||||
|
# BUT Amazon has now become nasty enough to detect when its app
|
||||||
|
# is being run under a debugger and actually changes code paths
|
||||||
|
# including which one of these strings is chosen, all to try
|
||||||
|
# to prevent reverse engineering
|
||||||
|
|
||||||
|
# Sad really ... they will only hurt their own sales ...
|
||||||
|
# true book lovers really want to keep their books forever
|
||||||
|
# and move them to their devices and DRM prevents that so they
|
||||||
|
# will just buy from someplace else that they can remove
|
||||||
|
# the DRM from
|
||||||
|
|
||||||
|
# Amazon should know by now that true book lover's are not like
|
||||||
|
# penniless kids that pirate music, we do not pirate books
|
||||||
|
|
||||||
|
if isNewInstall():
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if len(sernum) > 7:
|
||||||
|
return sernum
|
||||||
|
diskpart = GetUserHomeAppSupKindleDirParitionName()
|
||||||
|
uuidnum = GetDiskPartitionUUID(diskpart)
|
||||||
|
if len(uuidnum) > 7:
|
||||||
|
return uuidnum
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
return '9999999999'
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.6.0
|
||||||
|
def CryptUnprotectDataV2(encryptedData):
|
||||||
|
sp = GetUserName() + ':&%:' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap5)
|
||||||
|
# salt generation as per the code
|
||||||
|
salt = 0x0512981d * 2 * 1 * 1
|
||||||
|
salt = str(salt) + GetUserName()
|
||||||
|
salt = encode(salt,charMap5)
|
||||||
|
crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
key = key_iv[0:32]
|
||||||
|
iv = key_iv[32:48]
|
||||||
|
crp.set_decrypt_key(key,iv)
|
||||||
|
cleartext = crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap5)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# Locate the .kindle-info files
|
||||||
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
|
# first search for current .kindle-info files
|
||||||
home = os.getenv('HOME')
|
home = os.getenv('HOME')
|
||||||
cmdline = 'find "' + home + '/Library/Application Support" -name ".kindle-info"'
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kindle-info"'
|
||||||
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
p1 = Process(cmdline, shell=True, bufsize=1, stdin=None, stdout=PIPE, stderr=PIPE, close_fds=False)
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
poll = p1.wait('wait')
|
out1, out2 = p1.communicate()
|
||||||
results = p1.read()
|
reslst = out1.split('\n')
|
||||||
reslst = results.split('\n')
|
|
||||||
kinfopath = 'NONE'
|
kinfopath = 'NONE'
|
||||||
cnt = len(reslst)
|
found = False
|
||||||
for j in xrange(cnt):
|
for resline in reslst:
|
||||||
resline = reslst[j]
|
if os.path.isfile(resline):
|
||||||
pp = resline.find('.kindle-info')
|
kInfoFiles.append(resline)
|
||||||
if pp >= 0:
|
found = True
|
||||||
kinfopath = resline
|
# add any .kinf files
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
if not found:
|
||||||
|
print('No kindle-info files have been found.')
|
||||||
|
return kInfoFiles
|
||||||
|
|
||||||
|
# determine type of kindle info provided and return a
|
||||||
|
# database of keynames and values
|
||||||
|
def getDBfromFile(kInfoFile):
|
||||||
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
|
DB = {}
|
||||||
|
cnt = 0
|
||||||
|
infoReader = open(kInfoFile, 'r')
|
||||||
|
hdr = infoReader.read(1)
|
||||||
|
data = infoReader.read()
|
||||||
|
|
||||||
|
if data.find('[') != -1 :
|
||||||
|
# older style kindle-info file
|
||||||
|
items = data.split('[')
|
||||||
|
for item in items:
|
||||||
|
if item != '':
|
||||||
|
keyhash, rawdata = item.split(':')
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap2) == keyhash:
|
||||||
|
keyname = name
|
||||||
break
|
break
|
||||||
if not os.path.exists(kinfopath):
|
if keyname == "unknown":
|
||||||
raise K4MDrmException('Error: .kindle-info file can not be found')
|
keyname = keyhash
|
||||||
return open(kinfopath,'r')
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
else:
|
cleartext = CryptUnprotectData(encryptedValue)
|
||||||
return open(kInfoFile, 'r')
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# else newer style .kinf file used by K4Mac >= 1.6.0
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# the raw keyhash string is also used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
# "entropy" not used for K4Mac only K4PC
|
||||||
|
# entropy = SHA1(keyhash)
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the charMap5 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using charMap5 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using charMap5 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,charMap5)
|
||||||
|
cleartext = CryptUnprotectDataV2(encryptedValue)
|
||||||
|
# Debugging
|
||||||
|
# print keyname
|
||||||
|
# print cleartext
|
||||||
|
# print cleartext.encode('hex')
|
||||||
|
# print
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
# K4PC Windows specific routines
|
# K4PC Windows specific routines
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys, os
|
import sys, os
|
||||||
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
||||||
@@ -10,30 +12,86 @@ from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
|||||||
|
|
||||||
import _winreg as winreg
|
import _winreg as winreg
|
||||||
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
MAX_PATH = 255
|
MAX_PATH = 255
|
||||||
|
|
||||||
kernel32 = windll.kernel32
|
kernel32 = windll.kernel32
|
||||||
advapi32 = windll.advapi32
|
advapi32 = windll.advapi32
|
||||||
crypt32 = windll.crypt32
|
crypt32 = windll.crypt32
|
||||||
|
|
||||||
|
import traceback
|
||||||
|
|
||||||
#
|
# crypto digestroutines
|
||||||
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
import hashlib
|
||||||
#
|
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
|
||||||
|
# simple primes table (<= n) calculator
|
||||||
|
def primes(n):
|
||||||
|
if n==2: return [2]
|
||||||
|
elif n<2: return []
|
||||||
|
s=range(3,n+1,2)
|
||||||
|
mroot = n ** 0.5
|
||||||
|
half=(n+1)/2-1
|
||||||
|
i=0
|
||||||
|
m=3
|
||||||
|
while m <= mroot:
|
||||||
|
if s[i]:
|
||||||
|
j=(m*m-3)/2
|
||||||
|
s[j]=0
|
||||||
|
while j<half:
|
||||||
|
s[j]=0
|
||||||
|
j+=m
|
||||||
|
i=i+1
|
||||||
|
m=2*i+3
|
||||||
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
|
# Various character maps used to decrypt kindle info values.
|
||||||
|
# Probably supposed to act as obfuscation
|
||||||
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
||||||
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
|
||||||
|
|
||||||
#
|
|
||||||
# Exceptions for all the problems that might happen during the script
|
|
||||||
#
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# Encode the bytes in data with the characters in map
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data)-1,2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
if (high == -1) or (low == -1) :
|
||||||
|
break
|
||||||
|
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# interface with Windows OS Routines
|
||||||
class DataBlob(Structure):
|
class DataBlob(Structure):
|
||||||
_fields_ = [('cbData', c_uint),
|
_fields_ = [('cbData', c_uint),
|
||||||
('pbData', c_void_p)]
|
('pbData', c_void_p)]
|
||||||
@@ -64,47 +122,187 @@ def GetVolumeSerialNumber():
|
|||||||
return GetVolumeSerialNumber
|
return GetVolumeSerialNumber
|
||||||
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
return GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def getLastError():
|
||||||
|
GetLastError = kernel32.GetLastError
|
||||||
|
GetLastError.argtypes = None
|
||||||
|
GetLastError.restype = c_uint
|
||||||
|
def getLastError():
|
||||||
|
return GetLastError()
|
||||||
|
return getLastError
|
||||||
|
getLastError = getLastError()
|
||||||
|
|
||||||
def GetUserName():
|
def GetUserName():
|
||||||
GetUserNameW = advapi32.GetUserNameW
|
GetUserNameW = advapi32.GetUserNameW
|
||||||
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
||||||
GetUserNameW.restype = c_uint
|
GetUserNameW.restype = c_uint
|
||||||
def GetUserName():
|
def GetUserName():
|
||||||
buffer = create_unicode_buffer(32)
|
buffer = create_unicode_buffer(2)
|
||||||
size = c_uint(len(buffer))
|
size = c_uint(len(buffer))
|
||||||
while not GetUserNameW(buffer, byref(size)):
|
while not GetUserNameW(buffer, byref(size)):
|
||||||
|
errcd = getLastError()
|
||||||
|
if errcd == 234:
|
||||||
|
# bad wine implementation up through wine 1.3.21
|
||||||
|
return "AlternateUserName"
|
||||||
buffer = create_unicode_buffer(len(buffer) * 2)
|
buffer = create_unicode_buffer(len(buffer) * 2)
|
||||||
size.value = len(buffer)
|
size.value = len(buffer)
|
||||||
return buffer.value.encode('utf-16-le')[::2]
|
return buffer.value.encode('utf-16-le')[::2]
|
||||||
return GetUserName
|
return GetUserName
|
||||||
GetUserName = GetUserName()
|
GetUserName = GetUserName()
|
||||||
|
|
||||||
|
|
||||||
def CryptUnprotectData():
|
def CryptUnprotectData():
|
||||||
_CryptUnprotectData = crypt32.CryptUnprotectData
|
_CryptUnprotectData = crypt32.CryptUnprotectData
|
||||||
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
||||||
c_void_p, c_void_p, c_uint, DataBlob_p]
|
c_void_p, c_void_p, c_uint, DataBlob_p]
|
||||||
_CryptUnprotectData.restype = c_uint
|
_CryptUnprotectData.restype = c_uint
|
||||||
def CryptUnprotectData(indata, entropy):
|
def CryptUnprotectData(indata, entropy, flags):
|
||||||
indatab = create_string_buffer(indata)
|
indatab = create_string_buffer(indata)
|
||||||
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
||||||
entropyb = create_string_buffer(entropy)
|
entropyb = create_string_buffer(entropy)
|
||||||
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
||||||
outdata = DataBlob()
|
outdata = DataBlob()
|
||||||
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
None, None, 0, byref(outdata)):
|
None, None, flags, byref(outdata)):
|
||||||
raise DrmException("Failed to Unprotect Data")
|
raise DrmException("Failed to Unprotect Data")
|
||||||
return string_at(outdata.pbData, outdata.cbData)
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
return CryptUnprotectData
|
return CryptUnprotectData
|
||||||
CryptUnprotectData = CryptUnprotectData()
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
#
|
|
||||||
# Locate and open the Kindle.info file.
|
# Locate all of the kindle-info style files and return as list
|
||||||
#
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
def openKindleInfo(kInfoFile=None):
|
|
||||||
if kInfoFile == None:
|
|
||||||
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
||||||
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
return open(path+'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info','r')
|
|
||||||
|
# first look for older kindle-info files
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No kindle.info files have not been found.')
|
||||||
else:
|
else:
|
||||||
return open(kInfoFile, 'r')
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for newer (K4PC 1.5.0 and later rainier.2.1.1.kinf file
|
||||||
|
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.5.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\rainier.2.1.1.kinf'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.6.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
return kInfoFiles
|
||||||
|
|
||||||
|
|
||||||
|
# determine type of kindle info provided and return a
|
||||||
|
# database of keynames and values
|
||||||
|
def getDBfromFile(kInfoFile):
|
||||||
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
|
DB = {}
|
||||||
|
cnt = 0
|
||||||
|
infoReader = open(kInfoFile, 'r')
|
||||||
|
hdr = infoReader.read(1)
|
||||||
|
data = infoReader.read()
|
||||||
|
|
||||||
|
if data.find('{') != -1 :
|
||||||
|
|
||||||
|
# older style kindle-info file
|
||||||
|
items = data.split('{')
|
||||||
|
for item in items:
|
||||||
|
if item != '':
|
||||||
|
keyhash, rawdata = item.split(':')
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap2) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
|
DB[keyname] = CryptUnprotectData(encryptedValue, "", 0)
|
||||||
|
cnt = cnt + 1
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# else newer style .kinf file
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
|
||||||
|
# the raw keyhash string is also used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
entropy = SHA1(keyhash)
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the charMap5 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using charMap5 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using Map5 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,charMap5)
|
||||||
|
DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@
|
|||||||
# 0.14 - Working out when the extra data flags are present has been problematic
|
# 0.14 - Working out when the extra data flags are present has been problematic
|
||||||
# Versions 7 through 9 have tried to tweak the conditions, but have been
|
# Versions 7 through 9 have tried to tweak the conditions, but have been
|
||||||
# only partially successful. Closer examination of lots of sample
|
# only partially successful. Closer examination of lots of sample
|
||||||
# files reveals that a confusin has arisen because trailing data entries
|
# files reveals that a confusion has arisen because trailing data entries
|
||||||
# are not encrypted, but it turns out that the multibyte entries
|
# are not encrypted, but it turns out that the multibyte entries
|
||||||
# in utf8 file are encrypted. (Although neither kind gets compressed.)
|
# in utf8 file are encrypted. (Although neither kind gets compressed.)
|
||||||
# This knowledge leads to a simplification of the test for the
|
# This knowledge leads to a simplification of the test for the
|
||||||
@@ -39,12 +39,25 @@
|
|||||||
# Removed the disabled Calibre plug-in code
|
# Removed the disabled Calibre plug-in code
|
||||||
# Permit use of 8-digit PIDs
|
# Permit use of 8-digit PIDs
|
||||||
# 0.19 - It seems that multibyte entries aren't encrypted in a v6 file either.
|
# 0.19 - It seems that multibyte entries aren't encrypted in a v6 file either.
|
||||||
|
# 0.20 - Correction: It seems that multibyte entries are encrypted in a v6 file.
|
||||||
|
# 0.21 - Added support for multiple pids
|
||||||
|
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
|
||||||
|
# 0.23 - fixed problem with older files with no EXTH section
|
||||||
|
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
|
||||||
|
# 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption
|
||||||
|
# 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100%
|
||||||
|
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
||||||
|
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
||||||
|
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
||||||
|
# included in the encryption were wrong. They are for DOC compressed
|
||||||
|
# files, but they are not for HUFF/CDIC compress files!
|
||||||
|
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
||||||
|
# 0.31 - The multibyte encrytion info is true for version 7 files too.
|
||||||
|
# 0.32 - Added support for "Print Replica" Kindle ebooks
|
||||||
|
|
||||||
__version__ = '0.19'
|
__version__ = '0.32'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import struct
|
|
||||||
import binascii
|
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -54,10 +67,20 @@ class Unbuffered:
|
|||||||
self.stream.flush()
|
self.stream.flush()
|
||||||
def __getattr__(self, attr):
|
def __getattr__(self, attr):
|
||||||
return getattr(self.stream, attr)
|
return getattr(self.stream, attr)
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
|
||||||
|
import os
|
||||||
|
import struct
|
||||||
|
import binascii
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# MobiBook Utility Routines
|
||||||
|
#
|
||||||
|
|
||||||
# Implementation of Pukall Cipher 1
|
# Implementation of Pukall Cipher 1
|
||||||
def PC1(key, src, decryption=True):
|
def PC1(key, src, decryption=True):
|
||||||
sum1 = 0;
|
sum1 = 0;
|
||||||
@@ -69,7 +92,6 @@ def PC1(key, src, decryption=True):
|
|||||||
wkey = []
|
wkey = []
|
||||||
for i in xrange(8):
|
for i in xrange(8):
|
||||||
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
|
|
||||||
dst = ""
|
dst = ""
|
||||||
for i in xrange(len(src)):
|
for i in xrange(len(src)):
|
||||||
temp1 = 0;
|
temp1 = 0;
|
||||||
@@ -130,7 +152,9 @@ def getSizeOfTrailingDataEntries(ptr, size, flags):
|
|||||||
num += (ord(ptr[size - num - 1]) & 0x3) + 1
|
num += (ord(ptr[size - num - 1]) & 0x3) + 1
|
||||||
return num
|
return num
|
||||||
|
|
||||||
class DrmStripper:
|
|
||||||
|
|
||||||
|
class MobiBook:
|
||||||
def loadSection(self, section):
|
def loadSection(self, section):
|
||||||
if (section + 1 == self.num_sections):
|
if (section + 1 == self.num_sections):
|
||||||
endoff = len(self.data_file)
|
endoff = len(self.data_file)
|
||||||
@@ -139,6 +163,115 @@ class DrmStripper:
|
|||||||
off = self.sections[section][0]
|
off = self.sections[section][0]
|
||||||
return self.data_file[off:endoff]
|
return self.data_file[off:endoff]
|
||||||
|
|
||||||
|
def __init__(self, infile):
|
||||||
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
|
|
||||||
|
# initial sanity check on file
|
||||||
|
self.data_file = file(infile, 'rb').read()
|
||||||
|
self.mobi_data = ''
|
||||||
|
self.header = self.data_file[0:78]
|
||||||
|
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
|
||||||
|
raise DrmException("invalid file format")
|
||||||
|
self.magic = self.header[0x3C:0x3C+8]
|
||||||
|
self.crypto_type = -1
|
||||||
|
|
||||||
|
# build up section offset and flag info
|
||||||
|
self.num_sections, = struct.unpack('>H', self.header[76:78])
|
||||||
|
self.sections = []
|
||||||
|
for i in xrange(self.num_sections):
|
||||||
|
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.data_file[78+i*8:78+i*8+8])
|
||||||
|
flags, val = a1, a2<<16|a3<<8|a4
|
||||||
|
self.sections.append( (offset, flags, val) )
|
||||||
|
|
||||||
|
# parse information from section 0
|
||||||
|
self.sect = self.loadSection(0)
|
||||||
|
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
|
||||||
|
self.compression, = struct.unpack('>H', self.sect[0x0:0x0+2])
|
||||||
|
|
||||||
|
if self.magic == 'TEXtREAd':
|
||||||
|
print "Book has format: ", self.magic
|
||||||
|
self.extra_data_flags = 0
|
||||||
|
self.mobi_length = 0
|
||||||
|
self.mobi_version = -1
|
||||||
|
self.meta_array = {}
|
||||||
|
return
|
||||||
|
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
||||||
|
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
|
||||||
|
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
||||||
|
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
||||||
|
self.extra_data_flags = 0
|
||||||
|
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
||||||
|
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
||||||
|
print "Extra Data Flags = %d" % self.extra_data_flags
|
||||||
|
if (self.compression != 17480):
|
||||||
|
# multibyte utf8 data is included in the encryption for PalmDoc compression
|
||||||
|
# so clear that byte so that we leave it to be decrypted.
|
||||||
|
self.extra_data_flags &= 0xFFFE
|
||||||
|
|
||||||
|
# if exth region exists parse it for metadata array
|
||||||
|
self.meta_array = {}
|
||||||
|
try:
|
||||||
|
exth_flag, = struct.unpack('>L', self.sect[0x80:0x84])
|
||||||
|
exth = 'NONE'
|
||||||
|
if exth_flag & 0x40:
|
||||||
|
exth = self.sect[16 + self.mobi_length:]
|
||||||
|
if (len(exth) >= 4) and (exth[:4] == 'EXTH'):
|
||||||
|
nitems, = struct.unpack('>I', exth[8:12])
|
||||||
|
pos = 12
|
||||||
|
for i in xrange(nitems):
|
||||||
|
type, size = struct.unpack('>II', exth[pos: pos + 8])
|
||||||
|
content = exth[pos + 8: pos + size]
|
||||||
|
self.meta_array[type] = content
|
||||||
|
# reset the text to speech flag and clipping limit, if present
|
||||||
|
if type == 401 and size == 9:
|
||||||
|
# set clipping limit to 100%
|
||||||
|
self.patchSection(0, "\144", 16 + self.mobi_length + pos + 8)
|
||||||
|
elif type == 404 and size == 9:
|
||||||
|
# make sure text to speech is enabled
|
||||||
|
self.patchSection(0, "\0", 16 + self.mobi_length + pos + 8)
|
||||||
|
# print type, size, content, content.encode('hex')
|
||||||
|
pos += size
|
||||||
|
except:
|
||||||
|
self.meta_array = {}
|
||||||
|
pass
|
||||||
|
self.print_replica = False
|
||||||
|
|
||||||
|
def getBookTitle(self):
|
||||||
|
codec_map = {
|
||||||
|
1252 : 'windows-1252',
|
||||||
|
65001 : 'utf-8',
|
||||||
|
}
|
||||||
|
title = ''
|
||||||
|
if 503 in self.meta_array:
|
||||||
|
title = self.meta_array[503]
|
||||||
|
else :
|
||||||
|
toff, tlen = struct.unpack('>II', self.sect[0x54:0x5c])
|
||||||
|
tend = toff + tlen
|
||||||
|
title = self.sect[toff:tend]
|
||||||
|
if title == '':
|
||||||
|
title = self.header[:32]
|
||||||
|
title = title.split("\0")[0]
|
||||||
|
codec = 'windows-1252'
|
||||||
|
if self.mobi_codepage in codec_map.keys():
|
||||||
|
codec = codec_map[self.mobi_codepage]
|
||||||
|
return unicode(title, codec).encode('utf-8')
|
||||||
|
|
||||||
|
def getPIDMetaInfo(self):
|
||||||
|
rec209 = ''
|
||||||
|
token = ''
|
||||||
|
if 209 in self.meta_array:
|
||||||
|
rec209 = self.meta_array[209]
|
||||||
|
data = rec209
|
||||||
|
# The 209 data comes in five byte groups. Interpret the last four bytes
|
||||||
|
# of each group as a big endian unsigned integer to get a key value
|
||||||
|
# if that key exists in the meta_array, append its contents to the token
|
||||||
|
for i in xrange(0,len(data),5):
|
||||||
|
val, = struct.unpack('>I',data[i+1:i+5])
|
||||||
|
sval = self.meta_array.get(val,'')
|
||||||
|
token += sval
|
||||||
|
return rec209, token
|
||||||
|
|
||||||
def patch(self, off, new):
|
def patch(self, off, new):
|
||||||
self.data_file = self.data_file[:off] + new + self.data_file[off+len(new):]
|
self.data_file = self.data_file[:off] + new + self.data_file[off+len(new):]
|
||||||
|
|
||||||
@@ -151,134 +284,155 @@ class DrmStripper:
|
|||||||
assert off + in_off + len(new) <= endoff
|
assert off + in_off + len(new) <= endoff
|
||||||
self.patch(off + in_off, new)
|
self.patch(off + in_off, new)
|
||||||
|
|
||||||
def parseDRM(self, data, count, pid):
|
def parseDRM(self, data, count, pidlist):
|
||||||
pid = pid.ljust(16,'\0')
|
found_key = None
|
||||||
keyvec1 = "\x72\x38\x33\xB0\xB4\xF2\xE3\xCA\xDF\x09\x01\xD6\xE2\xE0\x3F\x96"
|
keyvec1 = "\x72\x38\x33\xB0\xB4\xF2\xE3\xCA\xDF\x09\x01\xD6\xE2\xE0\x3F\x96"
|
||||||
temp_key = PC1(keyvec1, pid, False)
|
for pid in pidlist:
|
||||||
|
bigpid = pid.ljust(16,'\0')
|
||||||
|
temp_key = PC1(keyvec1, bigpid, False)
|
||||||
temp_key_sum = sum(map(ord,temp_key)) & 0xff
|
temp_key_sum = sum(map(ord,temp_key)) & 0xff
|
||||||
found_key = None
|
found_key = None
|
||||||
for i in xrange(count):
|
for i in xrange(count):
|
||||||
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
|
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
|
||||||
|
if cksum == temp_key_sum:
|
||||||
cookie = PC1(temp_key, cookie)
|
cookie = PC1(temp_key, cookie)
|
||||||
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
|
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
|
||||||
if verification == ver and cksum == temp_key_sum and (flags & 0x1F) == 1:
|
if verification == ver and (flags & 0x1F) == 1:
|
||||||
found_key = finalkey
|
found_key = finalkey
|
||||||
break
|
break
|
||||||
|
if found_key != None:
|
||||||
|
break
|
||||||
if not found_key:
|
if not found_key:
|
||||||
# Then try the default encoding that doesn't require a PID
|
# Then try the default encoding that doesn't require a PID
|
||||||
|
pid = "00000000"
|
||||||
temp_key = keyvec1
|
temp_key = keyvec1
|
||||||
temp_key_sum = sum(map(ord,temp_key)) & 0xff
|
temp_key_sum = sum(map(ord,temp_key)) & 0xff
|
||||||
for i in xrange(count):
|
for i in xrange(count):
|
||||||
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
|
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
|
||||||
|
if cksum == temp_key_sum:
|
||||||
cookie = PC1(temp_key, cookie)
|
cookie = PC1(temp_key, cookie)
|
||||||
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
|
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
|
||||||
if verification == ver and cksum == temp_key_sum:
|
if verification == ver:
|
||||||
found_key = finalkey
|
found_key = finalkey
|
||||||
break
|
break
|
||||||
return found_key
|
return [found_key,pid]
|
||||||
|
|
||||||
def __init__(self, data_file, pid):
|
def getMobiFile(self, outpath):
|
||||||
if len(pid)==10:
|
file(outpath,'wb').write(self.mobi_data)
|
||||||
if checksumPid(pid[0:-2]) != pid:
|
|
||||||
raise DrmException("invalid PID checksum")
|
|
||||||
pid = pid[0:-2]
|
|
||||||
elif len(pid)==8:
|
|
||||||
print "PID without checksum given. With checksum PID is "+checksumPid(pid)
|
|
||||||
else:
|
|
||||||
raise DrmException("Invalid PID length")
|
|
||||||
|
|
||||||
self.data_file = data_file
|
def getPrintReplica(self):
|
||||||
header = data_file[0:72]
|
return self.print_replica
|
||||||
if header[0x3C:0x3C+8] != 'BOOKMOBI':
|
|
||||||
raise DrmException("invalid file format")
|
|
||||||
self.num_sections, = struct.unpack('>H', data_file[76:78])
|
|
||||||
|
|
||||||
self.sections = []
|
def processBook(self, pidlist):
|
||||||
for i in xrange(self.num_sections):
|
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||||
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', data_file[78+i*8:78+i*8+8])
|
print 'Crypto Type is: ', crypto_type
|
||||||
flags, val = a1, a2<<16|a3<<8|a4
|
self.crypto_type = crypto_type
|
||||||
self.sections.append( (offset, flags, val) )
|
|
||||||
|
|
||||||
sect = self.loadSection(0)
|
|
||||||
records, = struct.unpack('>H', sect[0x8:0x8+2])
|
|
||||||
mobi_length, = struct.unpack('>L',sect[0x14:0x18])
|
|
||||||
mobi_version, = struct.unpack('>L',sect[0x68:0x6C])
|
|
||||||
extra_data_flags = 0
|
|
||||||
print "MOBI header version = %d, length = %d" %(mobi_version, mobi_length)
|
|
||||||
if (mobi_length >= 0xE4) and (mobi_version >= 5):
|
|
||||||
extra_data_flags, = struct.unpack('>H', sect[0xF2:0xF4])
|
|
||||||
print "Extra Data Flags = %d" %extra_data_flags
|
|
||||||
if mobi_version <= 5:
|
|
||||||
# multibyte utf8 data is included in the encryption for mobi_version 5 and below
|
|
||||||
# so clear that byte so that we leave it to be decrypted.
|
|
||||||
extra_data_flags &= 0xFFFE
|
|
||||||
|
|
||||||
crypto_type, = struct.unpack('>H', sect[0xC:0xC+2])
|
|
||||||
if crypto_type == 0:
|
if crypto_type == 0:
|
||||||
print "This book is not encrypted."
|
print "This book is not encrypted."
|
||||||
|
# we must still check for Print Replica
|
||||||
|
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
|
||||||
|
self.mobi_data = self.data_file
|
||||||
|
return
|
||||||
|
if crypto_type != 2 and crypto_type != 1:
|
||||||
|
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
||||||
|
if 406 in self.meta_array:
|
||||||
|
data406 = self.meta_array[406]
|
||||||
|
val406, = struct.unpack('>Q',data406)
|
||||||
|
if val406 != 0:
|
||||||
|
raise DrmException("Cannot decode library or rented ebooks.")
|
||||||
|
|
||||||
|
goodpids = []
|
||||||
|
for pid in pidlist:
|
||||||
|
if len(pid)==10:
|
||||||
|
if checksumPid(pid[0:-2]) != pid:
|
||||||
|
print "Warning: PID " + pid + " has incorrect checksum, should have been "+checksumPid(pid[0:-2])
|
||||||
|
goodpids.append(pid[0:-2])
|
||||||
|
elif len(pid)==8:
|
||||||
|
goodpids.append(pid)
|
||||||
|
|
||||||
|
if self.crypto_type == 1:
|
||||||
|
t1_keyvec = "QDCVEPMU675RUBSZ"
|
||||||
|
if self.magic == 'TEXtREAd':
|
||||||
|
bookkey_data = self.sect[0x0E:0x0E+16]
|
||||||
|
elif self.mobi_version < 0:
|
||||||
|
bookkey_data = self.sect[0x90:0x90+16]
|
||||||
else:
|
else:
|
||||||
if crypto_type == 1:
|
bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32]
|
||||||
raise DrmException("cannot decode Mobipocket encryption type 1")
|
pid = "00000000"
|
||||||
if crypto_type != 2:
|
found_key = PC1(t1_keyvec, bookkey_data)
|
||||||
raise DrmException("unknown encryption type: %d" % crypto_type)
|
else :
|
||||||
|
|
||||||
# calculate the keys
|
# calculate the keys
|
||||||
drm_ptr, drm_count, drm_size, drm_flags = struct.unpack('>LLLL', sect[0xA8:0xA8+16])
|
drm_ptr, drm_count, drm_size, drm_flags = struct.unpack('>LLLL', self.sect[0xA8:0xA8+16])
|
||||||
if drm_count == 0:
|
if drm_count == 0:
|
||||||
raise DrmException("no PIDs found in this file")
|
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.")
|
||||||
found_key = self.parseDRM(sect[drm_ptr:drm_ptr+drm_size], drm_count, pid)
|
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
||||||
if not found_key:
|
if not found_key:
|
||||||
raise DrmException("no key found. maybe the PID is incorrect")
|
raise DrmException("No key found. Most likely the correct PID has not been given.")
|
||||||
|
|
||||||
# kill the drm keys
|
# kill the drm keys
|
||||||
self.patchSection(0, "\0" * drm_size, drm_ptr)
|
self.patchSection(0, "\0" * drm_size, drm_ptr)
|
||||||
# kill the drm pointers
|
# kill the drm pointers
|
||||||
self.patchSection(0, "\xff" * 4 + "\0" * 12, 0xA8)
|
self.patchSection(0, "\xff" * 4 + "\0" * 12, 0xA8)
|
||||||
|
|
||||||
|
if pid=="00000000":
|
||||||
|
print "File has default encryption, no specific PID."
|
||||||
|
else:
|
||||||
|
print "File is encoded with PID "+checksumPid(pid)+"."
|
||||||
|
|
||||||
# clear the crypto type
|
# clear the crypto type
|
||||||
self.patchSection(0, "\0" * 2, 0xC)
|
self.patchSection(0, "\0" * 2, 0xC)
|
||||||
|
|
||||||
# decrypt sections
|
# decrypt sections
|
||||||
print "Decrypting. Please wait . . .",
|
print "Decrypting. Please wait . . .",
|
||||||
new_data = self.data_file[:self.sections[1][0]]
|
self.mobi_data = self.data_file[:self.sections[1][0]]
|
||||||
for i in xrange(1, records+1):
|
for i in xrange(1, self.records+1):
|
||||||
data = self.loadSection(i)
|
data = self.loadSection(i)
|
||||||
extra_size = getSizeOfTrailingDataEntries(data, len(data), extra_data_flags)
|
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
||||||
if i%100 == 0:
|
if i%100 == 0:
|
||||||
print ".",
|
print ".",
|
||||||
# print "record %d, extra_size %d" %(i,extra_size)
|
# print "record %d, extra_size %d" %(i,extra_size)
|
||||||
new_data += PC1(found_key, data[0:len(data) - extra_size])
|
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
|
||||||
|
if i==1:
|
||||||
|
self.print_replica = (decoded_data[0:4] == '%MOP')
|
||||||
|
self.mobi_data += decoded_data
|
||||||
if extra_size > 0:
|
if extra_size > 0:
|
||||||
new_data += data[-extra_size:]
|
self.mobi_data += data[-extra_size:]
|
||||||
#self.patchSection(i, PC1(found_key, data[0:len(data) - extra_size]))
|
if self.num_sections > self.records+1:
|
||||||
if self.num_sections > records+1:
|
self.mobi_data += self.data_file[self.sections[self.records+1][0]:]
|
||||||
new_data += self.data_file[self.sections[records+1][0]:]
|
|
||||||
self.data_file = new_data
|
|
||||||
print "done"
|
print "done"
|
||||||
|
return
|
||||||
def getResult(self):
|
|
||||||
return self.data_file
|
|
||||||
|
|
||||||
def getUnencryptedBook(infile,pid):
|
def getUnencryptedBook(infile,pid):
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
if not os.path.isfile(infile):
|
||||||
data_file = file(infile, 'rb').read()
|
raise DrmException('Input File Not Found')
|
||||||
strippedFile = DrmStripper(data_file, pid)
|
book = MobiBook(infile)
|
||||||
return strippedFile.getResult()
|
book.processBook([pid])
|
||||||
|
return book.mobi_data
|
||||||
|
|
||||||
|
def getUnencryptedBookWithList(infile,pidlist):
|
||||||
|
if not os.path.isfile(infile):
|
||||||
|
raise DrmException('Input File Not Found')
|
||||||
|
book = MobiBook(infile)
|
||||||
|
book.processBook(pidlist)
|
||||||
|
return book.mobi_data
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
print ('MobiDeDrm v%(__version__)s. '
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
'Copyright 2008-2010 The Dark Reverser.' % globals())
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
if len(argv)<4:
|
if len(argv)<3 or len(argv)>4:
|
||||||
print "Removes protection from Mobipocket books"
|
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
print " %s <infile> <outfile> <PID>" % sys.argv[0]
|
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
||||||
return 1
|
return 1
|
||||||
else:
|
else:
|
||||||
infile = argv[1]
|
infile = argv[1]
|
||||||
outfile = argv[2]
|
outfile = argv[2]
|
||||||
pid = argv[3]
|
if len(argv) is 4:
|
||||||
|
pidlist = argv[3].split(',')
|
||||||
|
else:
|
||||||
|
pidlist = {}
|
||||||
try:
|
try:
|
||||||
stripped_file = getUnencryptedBook(infile, pid)
|
stripped_file = getUnencryptedBookWithList(infile, pidlist)
|
||||||
file(outfile, 'wb').write(stripped_file)
|
file(outfile, 'wb').write(stripped_file)
|
||||||
except DrmException, e:
|
except DrmException, e:
|
||||||
print "Error: %s" % e
|
print "Error: %s" % e
|
||||||
|
|||||||
Binary file not shown.
@@ -24,17 +24,17 @@
|
|||||||
<key>CFBundleExecutable</key>
|
<key>CFBundleExecutable</key>
|
||||||
<string>droplet</string>
|
<string>droplet</string>
|
||||||
<key>CFBundleGetInfoString</key>
|
<key>CFBundleGetInfoString</key>
|
||||||
<string>DeDRM 1.2, Copyright © 2010 by Apprentice Alf.</string>
|
<string>DeDRM 3.1, Written 2010–2011 by Apprentice Alf and others.</string>
|
||||||
<key>CFBundleIconFile</key>
|
<key>CFBundleIconFile</key>
|
||||||
<string>droplet</string>
|
<string>droplet</string>
|
||||||
<key>CFBundleInfoDictionaryVersion</key>
|
<key>CFBundleInfoDictionaryVersion</key>
|
||||||
<string>6.0</string>
|
<string>6.0</string>
|
||||||
<key>CFBundleName</key>
|
<key>CFBundleName</key>
|
||||||
<string>DeDRM</string>
|
<string>DeDRM 3.1</string>
|
||||||
<key>CFBundlePackageType</key>
|
<key>CFBundlePackageType</key>
|
||||||
<string>APPL</string>
|
<string>APPL</string>
|
||||||
<key>CFBundleShortVersionString</key>
|
<key>CFBundleShortVersionString</key>
|
||||||
<string>1.2</string>
|
<string>3.1</string>
|
||||||
<key>CFBundleSignature</key>
|
<key>CFBundleSignature</key>
|
||||||
<string>dplt</string>
|
<string>dplt</string>
|
||||||
<key>LSMinimumSystemVersion</key>
|
<key>LSMinimumSystemVersion</key>
|
||||||
@@ -43,14 +43,18 @@
|
|||||||
<true/>
|
<true/>
|
||||||
<key>WindowState</key>
|
<key>WindowState</key>
|
||||||
<dict>
|
<dict>
|
||||||
|
<key>dividerCollapsed</key>
|
||||||
|
<false/>
|
||||||
|
<key>eventLogLevel</key>
|
||||||
|
<integer>-1</integer>
|
||||||
<key>name</key>
|
<key>name</key>
|
||||||
<string>ScriptWindowState</string>
|
<string>ScriptWindowState</string>
|
||||||
<key>positionOfDivider</key>
|
<key>positionOfDivider</key>
|
||||||
<real>885</real>
|
<real>460</real>
|
||||||
<key>savedFrame</key>
|
<key>savedFrame</key>
|
||||||
<string>1507 -64 1262 964 1440 -150 1680 1050 </string>
|
<string>39 106 1316 746 0 0 1440 878 </string>
|
||||||
<key>selectedTabView</key>
|
<key>selectedTabView</key>
|
||||||
<string>result</string>
|
<string>event log</string>
|
||||||
</dict>
|
</dict>
|
||||||
</dict>
|
</dict>
|
||||||
</plist>
|
</plist>
|
||||||
|
|||||||
Binary file not shown.
@@ -0,0 +1,832 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
# For use with Topaz Scripts Version 2.6
|
||||||
|
|
||||||
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
|
||||||
|
import csv
|
||||||
|
import os
|
||||||
|
import getopt
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Get a 7 bit encoded number from string. The most
|
||||||
|
# significant byte comes first and has the high bit (8th) set
|
||||||
|
|
||||||
|
def readEncodedNumber(file):
|
||||||
|
flag = False
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
|
||||||
|
if data == 0xFF:
|
||||||
|
flag = True
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
|
||||||
|
if data >= 0x80:
|
||||||
|
datax = (data & 0x7F)
|
||||||
|
while data >= 0x80 :
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
|
data = datax
|
||||||
|
|
||||||
|
if flag:
|
||||||
|
data = -data
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# returns a binary string that encodes a number into 7 bits
|
||||||
|
# most significant byte first which has the high bit set
|
||||||
|
|
||||||
|
def encodeNumber(number):
|
||||||
|
result = ""
|
||||||
|
negative = False
|
||||||
|
flag = 0
|
||||||
|
|
||||||
|
if number < 0 :
|
||||||
|
number = -number + 1
|
||||||
|
negative = True
|
||||||
|
|
||||||
|
while True:
|
||||||
|
byte = number & 0x7F
|
||||||
|
number = number >> 7
|
||||||
|
byte += flag
|
||||||
|
result += chr(byte)
|
||||||
|
flag = 0x80
|
||||||
|
if number == 0 :
|
||||||
|
if (byte == 0xFF and negative == False) :
|
||||||
|
result += chr(0x80)
|
||||||
|
break
|
||||||
|
|
||||||
|
if negative:
|
||||||
|
result += chr(0xFF)
|
||||||
|
|
||||||
|
return result[::-1]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# create / read a length prefixed string from the file
|
||||||
|
|
||||||
|
def lengthPrefixString(data):
|
||||||
|
return encodeNumber(len(data))+data
|
||||||
|
|
||||||
|
def readString(file):
|
||||||
|
stringLength = readEncodedNumber(file)
|
||||||
|
if (stringLength == None):
|
||||||
|
return ""
|
||||||
|
sv = file.read(stringLength)
|
||||||
|
if (len(sv) != stringLength):
|
||||||
|
return ""
|
||||||
|
return unpack(str(stringLength)+"s",sv)[0]
|
||||||
|
|
||||||
|
|
||||||
|
# convert a binary string generated by encodeNumber (7 bit encoded number)
|
||||||
|
# to the value you would find inside the page*.dat files to be processed
|
||||||
|
|
||||||
|
def convert(i):
|
||||||
|
result = ''
|
||||||
|
val = encodeNumber(i)
|
||||||
|
for j in xrange(len(val)):
|
||||||
|
c = ord(val[j:j+1])
|
||||||
|
result += '%02x' % c
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# the complete string table used to store all book text content
|
||||||
|
# as well as the xml tokens and values that make sense out of it
|
||||||
|
|
||||||
|
class Dictionary(object):
|
||||||
|
def __init__(self, dictFile):
|
||||||
|
self.filename = dictFile
|
||||||
|
self.size = 0
|
||||||
|
self.fo = file(dictFile,'rb')
|
||||||
|
self.stable = []
|
||||||
|
self.size = readEncodedNumber(self.fo)
|
||||||
|
for i in xrange(self.size):
|
||||||
|
self.stable.append(self.escapestr(readString(self.fo)))
|
||||||
|
self.pos = 0
|
||||||
|
|
||||||
|
def escapestr(self, str):
|
||||||
|
str = str.replace('&','&')
|
||||||
|
str = str.replace('<','<')
|
||||||
|
str = str.replace('>','>')
|
||||||
|
str = str.replace('=','=')
|
||||||
|
return str
|
||||||
|
|
||||||
|
def lookup(self,val):
|
||||||
|
if ((val >= 0) and (val < self.size)) :
|
||||||
|
self.pos = val
|
||||||
|
return self.stable[self.pos]
|
||||||
|
else:
|
||||||
|
print "Error - %d outside of string table limits" % val
|
||||||
|
raise TpzDRMError('outside of string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
|
||||||
|
def getSize(self):
|
||||||
|
return self.size
|
||||||
|
|
||||||
|
def getPos(self):
|
||||||
|
return self.pos
|
||||||
|
|
||||||
|
def dumpDict(self):
|
||||||
|
for i in xrange(self.size):
|
||||||
|
print "%d %s %s" % (i, convert(i), self.stable[i])
|
||||||
|
return
|
||||||
|
|
||||||
|
# parses the xml snippets that are represented by each page*.dat file.
|
||||||
|
# also parses the other0.dat file - the main stylesheet
|
||||||
|
# and information used to inject the xml snippets into page*.dat files
|
||||||
|
|
||||||
|
class PageParser(object):
|
||||||
|
def __init__(self, filename, dict, debug, flat_xml):
|
||||||
|
self.fo = file(filename,'rb')
|
||||||
|
self.id = os.path.basename(filename).replace('.dat','')
|
||||||
|
self.dict = dict
|
||||||
|
self.debug = debug
|
||||||
|
self.flat_xml = flat_xml
|
||||||
|
self.tagpath = []
|
||||||
|
self.doc = []
|
||||||
|
self.snippetList = []
|
||||||
|
|
||||||
|
|
||||||
|
# hash table used to enable the decoding process
|
||||||
|
# This has all been developed by trial and error so it may still have omissions or
|
||||||
|
# contain errors
|
||||||
|
# Format:
|
||||||
|
# tag : (number of arguments, argument type, subtags present, special case of subtags presents when escaped)
|
||||||
|
|
||||||
|
token_tags = {
|
||||||
|
'x' : (1, 'scalar_number', 0, 0),
|
||||||
|
'y' : (1, 'scalar_number', 0, 0),
|
||||||
|
'h' : (1, 'scalar_number', 0, 0),
|
||||||
|
'w' : (1, 'scalar_number', 0, 0),
|
||||||
|
'firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'rootID' : (1, 'scalar_number', 0, 0),
|
||||||
|
'stemID' : (1, 'scalar_number', 0, 0),
|
||||||
|
'type' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
|
'info' : (0, 'number', 1, 0),
|
||||||
|
|
||||||
|
'info.word' : (0, 'number', 1, 1),
|
||||||
|
'info.word.ocrText' : (1, 'text', 0, 0),
|
||||||
|
'info.word.firstGlyph' : (1, 'raw', 0, 0),
|
||||||
|
'info.word.lastGlyph' : (1, 'raw', 0, 0),
|
||||||
|
'info.word.bl' : (1, 'raw', 0, 0),
|
||||||
|
'info.word.link_id' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'glyph' : (0, 'number', 1, 1),
|
||||||
|
'glyph.x' : (1, 'number', 0, 0),
|
||||||
|
'glyph.y' : (1, 'number', 0, 0),
|
||||||
|
'glyph.glyphID' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'dehyphen' : (0, 'number', 1, 1),
|
||||||
|
'dehyphen.rootID' : (1, 'number', 0, 0),
|
||||||
|
'dehyphen.stemID' : (1, 'number', 0, 0),
|
||||||
|
'dehyphen.stemPage' : (1, 'number', 0, 0),
|
||||||
|
'dehyphen.sh' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'links' : (0, 'number', 1, 1),
|
||||||
|
'links.page' : (1, 'number', 0, 0),
|
||||||
|
'links.rel' : (1, 'number', 0, 0),
|
||||||
|
'links.row' : (1, 'number', 0, 0),
|
||||||
|
'links.title' : (1, 'text', 0, 0),
|
||||||
|
'links.href' : (1, 'text', 0, 0),
|
||||||
|
'links.type' : (1, 'text', 0, 0),
|
||||||
|
|
||||||
|
'paraCont' : (0, 'number', 1, 1),
|
||||||
|
'paraCont.rootID' : (1, 'number', 0, 0),
|
||||||
|
'paraCont.stemID' : (1, 'number', 0, 0),
|
||||||
|
'paraCont.stemPage' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'paraStems' : (0, 'number', 1, 1),
|
||||||
|
'paraStems.stemID' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'wordStems' : (0, 'number', 1, 1),
|
||||||
|
'wordStems.stemID' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'empty' : (1, 'snippets', 1, 0),
|
||||||
|
|
||||||
|
'page' : (1, 'snippets', 1, 0),
|
||||||
|
'page.pageid' : (1, 'scalar_text', 0, 0),
|
||||||
|
'page.pagelabel' : (1, 'scalar_text', 0, 0),
|
||||||
|
'page.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'page.h' : (1, 'scalar_number', 0, 0),
|
||||||
|
'page.w' : (1, 'scalar_number', 0, 0),
|
||||||
|
'page.startID' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'group' : (1, 'snippets', 1, 0),
|
||||||
|
'group.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'group._tag' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
|
'region' : (1, 'snippets', 1, 0),
|
||||||
|
'region.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'region.x' : (1, 'scalar_number', 0, 0),
|
||||||
|
'region.y' : (1, 'scalar_number', 0, 0),
|
||||||
|
'region.h' : (1, 'scalar_number', 0, 0),
|
||||||
|
'region.w' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'empty_text_region' : (1, 'snippets', 1, 0),
|
||||||
|
|
||||||
|
'img' : (1, 'snippets', 1, 0),
|
||||||
|
'img.x' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.y' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.h' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.w' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.src' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.color_src' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'paragraph' : (1, 'snippets', 1, 0),
|
||||||
|
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
||||||
|
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
|
||||||
|
'word_semantic' : (1, 'snippets', 1, 1),
|
||||||
|
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'word_semantic.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'word_semantic.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'word' : (1, 'snippets', 1, 0),
|
||||||
|
'word.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'word.class' : (1, 'scalar_text', 0, 0),
|
||||||
|
'word.firstGlyph' : (1, 'scalar_number', 0, 0),
|
||||||
|
'word.lastGlyph' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'_span' : (1, 'snippets', 1, 0),
|
||||||
|
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'span' : (1, 'snippets', 1, 0),
|
||||||
|
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'extratokens' : (1, 'snippets', 1, 0),
|
||||||
|
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'extratokens.firstGlyph' : (1, 'scalar_number', 0, 0),
|
||||||
|
'extratokens.lastGlyph' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'glyph.h' : (1, 'number', 0, 0),
|
||||||
|
'glyph.w' : (1, 'number', 0, 0),
|
||||||
|
'glyph.use' : (1, 'number', 0, 0),
|
||||||
|
'glyph.vtx' : (1, 'number', 0, 1),
|
||||||
|
'glyph.len' : (1, 'number', 0, 1),
|
||||||
|
'glyph.dpi' : (1, 'number', 0, 0),
|
||||||
|
'vtx' : (0, 'number', 1, 1),
|
||||||
|
'vtx.x' : (1, 'number', 0, 0),
|
||||||
|
'vtx.y' : (1, 'number', 0, 0),
|
||||||
|
'len' : (0, 'number', 1, 1),
|
||||||
|
'len.n' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'book' : (1, 'snippets', 1, 0),
|
||||||
|
'version' : (1, 'snippets', 1, 0),
|
||||||
|
'version.FlowEdit_1_id' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.FlowEdit_1_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.Schema_id' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.Schema_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.Topaz_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.WordDetailEdit_1_id' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.WordDetailEdit_1_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.ZoneEdit_1_id' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.ZoneEdit_1_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.chapterheaders' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.creation_date' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.header_footer' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.init_from_ocr' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.letter_insertion' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.xmlinj_convert' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.xmlinj_reflow' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.xmlinj_transform' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.findlists' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.page_num' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.page_type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.bad_text' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.glyph_mismatch' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.margins' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.staggered_lines' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.paragraph_continuation' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.toc' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
|
'stylesheet' : (1, 'snippets', 1, 0),
|
||||||
|
'style' : (1, 'snippets', 1, 0),
|
||||||
|
'style._tag' : (1, 'scalar_text', 0, 0),
|
||||||
|
'style.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'style._parent_type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'style.class' : (1, 'scalar_text', 0, 0),
|
||||||
|
'style._after_class' : (1, 'scalar_text', 0, 0),
|
||||||
|
'rule' : (1, 'snippets', 1, 0),
|
||||||
|
'rule.attr' : (1, 'scalar_text', 0, 0),
|
||||||
|
'rule.value' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
|
'original' : (0, 'number', 1, 1),
|
||||||
|
'original.pnum' : (1, 'number', 0, 0),
|
||||||
|
'original.pid' : (1, 'text', 0, 0),
|
||||||
|
'pages' : (0, 'number', 1, 1),
|
||||||
|
'pages.ref' : (1, 'number', 0, 0),
|
||||||
|
'pages.id' : (1, 'number', 0, 0),
|
||||||
|
'startID' : (0, 'number', 1, 1),
|
||||||
|
'startID.page' : (1, 'number', 0, 0),
|
||||||
|
'startID.id' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# full tag path record keeping routines
|
||||||
|
def tag_push(self, token):
|
||||||
|
self.tagpath.append(token)
|
||||||
|
def tag_pop(self):
|
||||||
|
if len(self.tagpath) > 0 :
|
||||||
|
self.tagpath.pop()
|
||||||
|
def tagpath_len(self):
|
||||||
|
return len(self.tagpath)
|
||||||
|
def get_tagpath(self, i):
|
||||||
|
cnt = len(self.tagpath)
|
||||||
|
if i < cnt : result = self.tagpath[i]
|
||||||
|
for j in xrange(i+1, cnt) :
|
||||||
|
result += '.' + self.tagpath[j]
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# list of absolute command byte values values that indicate
|
||||||
|
# various types of loop meachanisms typically used to generate vectors
|
||||||
|
|
||||||
|
cmd_list = (0x76, 0x76)
|
||||||
|
|
||||||
|
# peek at and return 1 byte that is ahead by i bytes
|
||||||
|
def peek(self, aheadi):
|
||||||
|
c = self.fo.read(aheadi)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
self.fo.seek(-aheadi,1)
|
||||||
|
c = c[-1:]
|
||||||
|
return ord(c)
|
||||||
|
|
||||||
|
|
||||||
|
# get the next value from the file being processed
|
||||||
|
def getNext(self):
|
||||||
|
nbyte = self.peek(1);
|
||||||
|
if (nbyte == None):
|
||||||
|
return None
|
||||||
|
val = readEncodedNumber(self.fo)
|
||||||
|
return val
|
||||||
|
|
||||||
|
|
||||||
|
# format an arg by argtype
|
||||||
|
def formatArg(self, arg, argtype):
|
||||||
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
|
result = self.dict.lookup(arg)
|
||||||
|
elif (argtype == 'raw') or (argtype == 'number') or (argtype == 'scalar_number') :
|
||||||
|
result = arg
|
||||||
|
elif (argtype == 'snippets') :
|
||||||
|
result = arg
|
||||||
|
else :
|
||||||
|
print "Error Unknown argtype %s" % argtype
|
||||||
|
sys.exit(-2)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# process the next tag token, recursively handling subtags,
|
||||||
|
# arguments, and commands
|
||||||
|
def procToken(self, token):
|
||||||
|
|
||||||
|
known_token = False
|
||||||
|
self.tag_push(token)
|
||||||
|
|
||||||
|
if self.debug : print 'Processing: ', self.get_tagpath(0)
|
||||||
|
cnt = self.tagpath_len()
|
||||||
|
for j in xrange(cnt):
|
||||||
|
tkn = self.get_tagpath(j)
|
||||||
|
if tkn in self.token_tags :
|
||||||
|
num_args = self.token_tags[tkn][0]
|
||||||
|
argtype = self.token_tags[tkn][1]
|
||||||
|
subtags = self.token_tags[tkn][2]
|
||||||
|
splcase = self.token_tags[tkn][3]
|
||||||
|
ntags = -1
|
||||||
|
known_token = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if known_token :
|
||||||
|
|
||||||
|
# handle subtags if present
|
||||||
|
subtagres = []
|
||||||
|
if (splcase == 1):
|
||||||
|
# this type of tag uses of escape marker 0x74 indicate subtag count
|
||||||
|
if self.peek(1) == 0x74:
|
||||||
|
skip = readEncodedNumber(self.fo)
|
||||||
|
subtags = 1
|
||||||
|
num_args = 0
|
||||||
|
|
||||||
|
if (subtags == 1):
|
||||||
|
ntags = readEncodedNumber(self.fo)
|
||||||
|
if self.debug : print 'subtags: ' + token + ' has ' + str(ntags)
|
||||||
|
for j in xrange(ntags):
|
||||||
|
val = readEncodedNumber(self.fo)
|
||||||
|
subtagres.append(self.procToken(self.dict.lookup(val)))
|
||||||
|
|
||||||
|
# arguments can be scalars or vectors of text or numbers
|
||||||
|
argres = []
|
||||||
|
if num_args > 0 :
|
||||||
|
firstarg = self.peek(1)
|
||||||
|
if (firstarg in self.cmd_list) and (argtype != 'scalar_number') and (argtype != 'scalar_text'):
|
||||||
|
# single argument is a variable length vector of data
|
||||||
|
arg = readEncodedNumber(self.fo)
|
||||||
|
argres = self.decodeCMD(arg,argtype)
|
||||||
|
else :
|
||||||
|
# num_arg scalar arguments
|
||||||
|
for i in xrange(num_args):
|
||||||
|
argres.append(self.formatArg(readEncodedNumber(self.fo), argtype))
|
||||||
|
|
||||||
|
# build the return tag
|
||||||
|
result = []
|
||||||
|
tkn = self.get_tagpath(0)
|
||||||
|
result.append(tkn)
|
||||||
|
result.append(subtagres)
|
||||||
|
result.append(argtype)
|
||||||
|
result.append(argres)
|
||||||
|
self.tag_pop()
|
||||||
|
return result
|
||||||
|
|
||||||
|
# all tokens that need to be processed should be in the hash
|
||||||
|
# table if it may indicate a problem, either new token
|
||||||
|
# or an out of sync condition
|
||||||
|
else:
|
||||||
|
result = []
|
||||||
|
if (self.debug):
|
||||||
|
print 'Unknown Token:', token
|
||||||
|
self.tag_pop()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# special loop used to process code snippets
|
||||||
|
# it is NEVER used to format arguments.
|
||||||
|
# builds the snippetList
|
||||||
|
def doLoop72(self, argtype):
|
||||||
|
cnt = readEncodedNumber(self.fo)
|
||||||
|
if self.debug :
|
||||||
|
result = 'Set of '+ str(cnt) + ' xml snippets. The overall structure \n'
|
||||||
|
result += 'of the document is indicated by snippet number sets at the\n'
|
||||||
|
result += 'end of each snippet. \n'
|
||||||
|
print result
|
||||||
|
for i in xrange(cnt):
|
||||||
|
if self.debug: print 'Snippet:',str(i)
|
||||||
|
snippet = []
|
||||||
|
snippet.append(i)
|
||||||
|
val = readEncodedNumber(self.fo)
|
||||||
|
snippet.append(self.procToken(self.dict.lookup(val)))
|
||||||
|
self.snippetList.append(snippet)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# general loop code gracisouly submitted by "skindle" - thank you!
|
||||||
|
def doLoop76Mode(self, argtype, cnt, mode):
|
||||||
|
result = []
|
||||||
|
adj = 0
|
||||||
|
if mode & 1:
|
||||||
|
adj = readEncodedNumber(self.fo)
|
||||||
|
mode = mode >> 1
|
||||||
|
x = []
|
||||||
|
for i in xrange(cnt):
|
||||||
|
x.append(readEncodedNumber(self.fo) - adj)
|
||||||
|
for i in xrange(mode):
|
||||||
|
for j in xrange(1, cnt):
|
||||||
|
x[j] = x[j] + x[j - 1]
|
||||||
|
for i in xrange(cnt):
|
||||||
|
result.append(self.formatArg(x[i],argtype))
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# dispatches loop commands bytes with various modes
|
||||||
|
# The 0x76 style loops are used to build vectors
|
||||||
|
|
||||||
|
# This was all derived by trial and error and
|
||||||
|
# new loop types may exist that are not handled here
|
||||||
|
# since they did not appear in the test cases
|
||||||
|
|
||||||
|
def decodeCMD(self, cmd, argtype):
|
||||||
|
if (cmd == 0x76):
|
||||||
|
|
||||||
|
# loop with cnt, and mode to control loop styles
|
||||||
|
cnt = readEncodedNumber(self.fo)
|
||||||
|
mode = readEncodedNumber(self.fo)
|
||||||
|
|
||||||
|
if self.debug : print 'Loop for', cnt, 'with mode', mode, ': '
|
||||||
|
return self.doLoop76Mode(argtype, cnt, mode)
|
||||||
|
|
||||||
|
if self.dbug: print "Unknown command", cmd
|
||||||
|
result = []
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# add full tag path to injected snippets
|
||||||
|
def updateName(self, tag, prefix):
|
||||||
|
name = tag[0]
|
||||||
|
subtagList = tag[1]
|
||||||
|
argtype = tag[2]
|
||||||
|
argList = tag[3]
|
||||||
|
nname = prefix + '.' + name
|
||||||
|
nsubtaglist = []
|
||||||
|
for j in subtagList:
|
||||||
|
nsubtaglist.append(self.updateName(j,prefix))
|
||||||
|
ntag = []
|
||||||
|
ntag.append(nname)
|
||||||
|
ntag.append(nsubtaglist)
|
||||||
|
ntag.append(argtype)
|
||||||
|
ntag.append(argList)
|
||||||
|
return ntag
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# perform depth first injection of specified snippets into this one
|
||||||
|
def injectSnippets(self, snippet):
|
||||||
|
snipno, tag = snippet
|
||||||
|
name = tag[0]
|
||||||
|
subtagList = tag[1]
|
||||||
|
argtype = tag[2]
|
||||||
|
argList = tag[3]
|
||||||
|
nsubtagList = []
|
||||||
|
if len(argList) > 0 :
|
||||||
|
for j in argList:
|
||||||
|
asnip = self.snippetList[j]
|
||||||
|
aso, atag = self.injectSnippets(asnip)
|
||||||
|
atag = self.updateName(atag, name)
|
||||||
|
nsubtagList.append(atag)
|
||||||
|
argtype='number'
|
||||||
|
argList=[]
|
||||||
|
if len(nsubtagList) > 0 :
|
||||||
|
subtagList.extend(nsubtagList)
|
||||||
|
tag = []
|
||||||
|
tag.append(name)
|
||||||
|
tag.append(subtagList)
|
||||||
|
tag.append(argtype)
|
||||||
|
tag.append(argList)
|
||||||
|
snippet = []
|
||||||
|
snippet.append(snipno)
|
||||||
|
snippet.append(tag)
|
||||||
|
return snippet
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# format the tag for output
|
||||||
|
def formatTag(self, node):
|
||||||
|
name = node[0]
|
||||||
|
subtagList = node[1]
|
||||||
|
argtype = node[2]
|
||||||
|
argList = node[3]
|
||||||
|
fullpathname = name.split('.')
|
||||||
|
nodename = fullpathname.pop()
|
||||||
|
ilvl = len(fullpathname)
|
||||||
|
indent = ' ' * (3 * ilvl)
|
||||||
|
result = indent + '<' + nodename + '>'
|
||||||
|
if len(argList) > 0:
|
||||||
|
argres = ''
|
||||||
|
for j in argList:
|
||||||
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
|
argres += j + '|'
|
||||||
|
else :
|
||||||
|
argres += str(j) + ','
|
||||||
|
argres = argres[0:-1]
|
||||||
|
if argtype == 'snippets' :
|
||||||
|
result += 'snippets:' + argres
|
||||||
|
else :
|
||||||
|
result += argres
|
||||||
|
if len(subtagList) > 0 :
|
||||||
|
result += '\n'
|
||||||
|
for j in subtagList:
|
||||||
|
if len(j) > 0 :
|
||||||
|
result += self.formatTag(j)
|
||||||
|
result += indent + '</' + nodename + '>\n'
|
||||||
|
else:
|
||||||
|
result += '</' + nodename + '>\n'
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# flatten tag
|
||||||
|
def flattenTag(self, node):
|
||||||
|
name = node[0]
|
||||||
|
subtagList = node[1]
|
||||||
|
argtype = node[2]
|
||||||
|
argList = node[3]
|
||||||
|
result = name
|
||||||
|
if (len(argList) > 0):
|
||||||
|
argres = ''
|
||||||
|
for j in argList:
|
||||||
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
|
argres += j + '|'
|
||||||
|
else :
|
||||||
|
argres += str(j) + '|'
|
||||||
|
argres = argres[0:-1]
|
||||||
|
if argtype == 'snippets' :
|
||||||
|
result += '.snippets=' + argres
|
||||||
|
else :
|
||||||
|
result += '=' + argres
|
||||||
|
result += '\n'
|
||||||
|
for j in subtagList:
|
||||||
|
if len(j) > 0 :
|
||||||
|
result += self.flattenTag(j)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# reduce create xml output
|
||||||
|
def formatDoc(self, flat_xml):
|
||||||
|
result = ''
|
||||||
|
for j in self.doc :
|
||||||
|
if len(j) > 0:
|
||||||
|
if flat_xml:
|
||||||
|
result += self.flattenTag(j)
|
||||||
|
else:
|
||||||
|
result += self.formatTag(j)
|
||||||
|
if self.debug : print result
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# main loop - parse the page.dat files
|
||||||
|
# to create structured document and snippets
|
||||||
|
|
||||||
|
# FIXME: value at end of magic appears to be a subtags count
|
||||||
|
# but for what? For now, inject an 'info" tag as it is in
|
||||||
|
# every dictionary and seems close to what is meant
|
||||||
|
# The alternative is to special case the last _ "0x5f" to mean something
|
||||||
|
|
||||||
|
def process(self):
|
||||||
|
|
||||||
|
# peek at the first bytes to see what type of file it is
|
||||||
|
magic = self.fo.read(9)
|
||||||
|
if (magic[0:1] == 'p') and (magic[2:9] == 'marker_'):
|
||||||
|
first_token = 'info'
|
||||||
|
elif (magic[0:1] == 'p') and (magic[2:9] == '__PAGE_'):
|
||||||
|
skip = self.fo.read(2)
|
||||||
|
first_token = 'info'
|
||||||
|
elif (magic[0:1] == 'p') and (magic[2:8] == '_PAGE_'):
|
||||||
|
first_token = 'info'
|
||||||
|
elif (magic[0:1] == 'g') and (magic[2:9] == '__GLYPH'):
|
||||||
|
skip = self.fo.read(3)
|
||||||
|
first_token = 'info'
|
||||||
|
else :
|
||||||
|
# other0.dat file
|
||||||
|
first_token = None
|
||||||
|
self.fo.seek(-9,1)
|
||||||
|
|
||||||
|
|
||||||
|
# main loop to read and build the document tree
|
||||||
|
while True:
|
||||||
|
|
||||||
|
if first_token != None :
|
||||||
|
# use "inserted" first token 'info' for page and glyph files
|
||||||
|
tag = self.procToken(first_token)
|
||||||
|
if len(tag) > 0 :
|
||||||
|
self.doc.append(tag)
|
||||||
|
first_token = None
|
||||||
|
|
||||||
|
v = self.getNext()
|
||||||
|
if (v == None):
|
||||||
|
break
|
||||||
|
|
||||||
|
if (v == 0x72):
|
||||||
|
self.doLoop72('number')
|
||||||
|
elif (v > 0) and (v < self.dict.getSize()) :
|
||||||
|
tag = self.procToken(self.dict.lookup(v))
|
||||||
|
if len(tag) > 0 :
|
||||||
|
self.doc.append(tag)
|
||||||
|
else:
|
||||||
|
if self.debug:
|
||||||
|
print "Main Loop: Unknown value: %x" % v
|
||||||
|
if (v == 0):
|
||||||
|
if (self.peek(1) == 0x5f):
|
||||||
|
skip = self.fo.read(1)
|
||||||
|
first_token = 'info'
|
||||||
|
|
||||||
|
# now do snippet injection
|
||||||
|
if len(self.snippetList) > 0 :
|
||||||
|
if self.debug : print 'Injecting Snippets:'
|
||||||
|
snippet = self.injectSnippets(self.snippetList[0])
|
||||||
|
snipno = snippet[0]
|
||||||
|
tag_add = snippet[1]
|
||||||
|
if self.debug : print self.formatTag(tag_add)
|
||||||
|
if len(tag_add) > 0:
|
||||||
|
self.doc.append(tag_add)
|
||||||
|
|
||||||
|
# handle generation of xml output
|
||||||
|
xmlpage = self.formatDoc(self.flat_xml)
|
||||||
|
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
|
|
||||||
|
def fromData(dict, fname):
|
||||||
|
flat_xml = True
|
||||||
|
debug = False
|
||||||
|
pp = PageParser(fname, dict, debug, flat_xml)
|
||||||
|
xmlpage = pp.process()
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
|
def getXML(dict, fname):
|
||||||
|
flat_xml = False
|
||||||
|
debug = False
|
||||||
|
pp = PageParser(fname, dict, debug, flat_xml)
|
||||||
|
xmlpage = pp.process()
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
|
def usage():
|
||||||
|
print 'Usage: '
|
||||||
|
print ' convert2xml.py dict0000.dat infile.dat '
|
||||||
|
print ' '
|
||||||
|
print ' Options:'
|
||||||
|
print ' -h print this usage help message '
|
||||||
|
print ' -d turn on debug output to check for potential errors '
|
||||||
|
print ' --flat-xml output the flattened xml page description only '
|
||||||
|
print ' '
|
||||||
|
print ' This program will attempt to convert a page*.dat file or '
|
||||||
|
print ' glyphs*.dat file, using the dict0000.dat file, to its xml description. '
|
||||||
|
print ' '
|
||||||
|
print ' Use "cmbtc_dump.py" first to unencrypt, uncompress, and dump '
|
||||||
|
print ' the *.dat files from a Topaz format e-book.'
|
||||||
|
|
||||||
|
#
|
||||||
|
# Main
|
||||||
|
#
|
||||||
|
|
||||||
|
def main(argv):
|
||||||
|
dictFile = ""
|
||||||
|
pageFile = ""
|
||||||
|
debug = False
|
||||||
|
flat_xml = False
|
||||||
|
printOutput = False
|
||||||
|
if len(argv) == 0:
|
||||||
|
printOutput = True
|
||||||
|
argv = sys.argv
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(argv[1:], "hd", ["flat-xml"])
|
||||||
|
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
|
||||||
|
# print help information and exit:
|
||||||
|
print str(err) # will print something like "option -a not recognized"
|
||||||
|
usage()
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
if len(opts) == 0 and len(args) == 0 :
|
||||||
|
usage()
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
for o, a in opts:
|
||||||
|
if o =="-d":
|
||||||
|
debug=True
|
||||||
|
if o =="-h":
|
||||||
|
usage()
|
||||||
|
sys.exit(0)
|
||||||
|
if o =="--flat-xml":
|
||||||
|
flat_xml = True
|
||||||
|
|
||||||
|
dictFile, pageFile = args[0], args[1]
|
||||||
|
|
||||||
|
# read in the string table dictionary
|
||||||
|
dict = Dictionary(dictFile)
|
||||||
|
# dict.dumpDict()
|
||||||
|
|
||||||
|
# create a page parser
|
||||||
|
pp = PageParser(pageFile, dict, debug, flat_xml)
|
||||||
|
|
||||||
|
xmlpage = pp.process()
|
||||||
|
|
||||||
|
if printOutput:
|
||||||
|
print xmlpage
|
||||||
|
return 0
|
||||||
|
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main(''))
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
{\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540
|
{\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360
|
||||||
{\fonttbl}
|
{\fonttbl}
|
||||||
{\colortbl;\red255\green255\blue255;}
|
{\colortbl;\red255\green255\blue255;}
|
||||||
}
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user