forked from jgoguen/calibre-kobo-driver
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcontainer.py
More file actions
565 lines (480 loc) · 20.9 KB
/
container.py
File metadata and controls
565 lines (480 loc) · 20.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
# vim:fileencoding=UTF-8:filetype=python:ts=4:sw=4:sta:et:sts=4:ai
"""Extend calibre's EPUBContainer to work for a KePub."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__license__ = "GPL v3"
__copyright__ = (
"2010, Kovid Goyal <kovid@kovidgoyal.net>; "
+ "2013, Joel Goguen <jgoguen@jgoguen.ca>"
)
__docformat__ = "restructuredtext en"
# Be careful editing this! This file has to work in multiple plugins at once,
# so don't import anything from calibre_plugins.
import os
import re
import shutil
import string
from copy import deepcopy
from calibre import guess_type
from calibre.ebooks.conversion.plugins.epub_input import ADOBE_OBFUSCATION
from calibre.ebooks.conversion.plugins.epub_input import IDPF_OBFUSCATION
from calibre.ebooks.conversion.utils import HeuristicProcessor
from calibre.ebooks.oeb.polish.container import EpubContainer
from calibre.utils.smartypants import smartyPants
from lxml import etree
# Support load_translations() without forcing calibre 1.9+
try:
load_translations()
except NameError:
pass
HTML_MIMETYPES = frozenset(["application/xhtml+xml", "text/html"]) # type: Set[str]
CSS_MIMETYPE = guess_type("a.css")[0] # type: str
JS_MIMETYPE = guess_type("a.js")[0] # type: str
EXCLUDE_FROM_ZIP = frozenset(
[".DS_Store", ".directory", "mimetype", "thumbs.db"]
) # type: Set[str]
NO_SPACE_BEFORE_CHARS = frozenset(
[c for c in string.punctuation] + ["\xbb"]
) # noqa: E501, type: Set[str]
ENCRYPTION_NAMESPACES = {
"enc": "http://www.w3.org/2001/04/xmlenc#",
"deenc": "http://ns.adobe.com/digitaleditions/enc",
} # type: Dict[str, str]
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml" # type: str
SPECIAL_TAGS = frozenset(["img"]) # type: Set[str]
ENCODING_RE = re.compile(r'^\<\?.+encoding="([^"]+)"', re.MULTILINE)
SELF_CLOSING_RE = re.compile(
r"<(meta|link) ([^>]+)></(?:meta|link)>", re.UNICODE | re.MULTILINE
)
FORCE_OPEN_TAG_RE = re.compile(r"<(script|p) (.+) ?/>", re.UNICODE | re.MULTILINE)
EMPTY_HEADINGS = re.compile(r"(?i)<h\d+[^>]+?>\s*</h\d+>", re.UNICODE | re.MULTILINE)
ELLIPSIS_RE = re.compile(r"(?u)(?<=\w)\s?(\.\s+?){2}\.", re.UNICODE | re.MULTILINE)
MS_CRUFT_RE_1 = re.compile(r"\s*<o:p>\s*</o:p>", re.UNICODE | re.MULTILINE)
MS_CRUFT_RE_2 = re.compile(r"(?i)</?st1:\w+>", re.UNICODE | re.MULTILINE)
# TODO: Refactor InvalidEpub from here and device/driver.py to be a common class
class InvalidEpub(ValueError):
"""Designates an invalid ePub file."""
pass
class ParseError(ValueError):
"""Designates an error parsing an ePub inner file."""
def __init__(self, name, desc): # type: (str, str) -> None
"""Initialize a ParseError."""
self.name = name
self.desc = desc
ValueError.__init__(
self, "Failed to parse: {0} with error: {1}".format(name, desc)
)
class KEPubContainer(EpubContainer):
"""Extends an EpubContainer to work for a KePub."""
__paragraph_counter = 0 # type: int
__segment_counter = 0 # type: int
def html_names(self):
"""Get all HTML files in the OPF file.
A generator function that yields only HTML file names from the ePub.
"""
for node in self.opf_xpath("//opf:manifest/opf:item[@href and @media-type]"):
if node.get("media-type") in HTML_MIMETYPES:
href = os.path.join(os.path.dirname(self.opf_name), node.get("href"))
href = os.path.normpath(href).replace(os.sep, "/")
yield href
@property
def is_drm_encumbered(self):
"""Determine if the ePub container is DRM-encumbered.
This method looks for the 'encryption.xml' file which denotes an
ePub encumbered by Digital Restrictions Management. DRM-encumbered
files cannot be edited.
"""
is_encumbered = False
if "META-INF/encryption.xml" in self.name_path_map:
try:
xml = self.parsed("META-INF/encryption.xml")
if xml is None:
# If encryption.xml can't be parsed, assume its presence
# means an encumbered file. This may be wrong, but so far
# it's proven accurate.
return True
for elem in xml.xpath(
"./enc:EncryptedData/enc:EncryptionMethod[@Algorithm]",
namespaces=ENCRYPTION_NAMESPACES,
):
alg = elem.get("Algorithm")
# Anything not an acceptable encryption algorithm is a
# sign of an encumbered file.
if alg not in {ADOBE_OBFUSCATION, IDPF_OBFUSCATION}:
is_encumbered = True
break
except Exception as e:
self.log.error("Could not parse encryption.xml: " + e.message)
raise
return is_encumbered
def get_raw(self, name, force_unicode=False):
"""Get the raw, unparsed contents of an ePub inner file."""
self.commit_item(name, keep_parsed=False)
try:
f = open(self.name_path_map[name], "rb")
except Exception:
return None
data = f.read()
f.close()
if force_unicode:
data = data.encode("UTF-8")
return data
def flush_cache(self):
"""Flush the cache, writing all cached values to disk."""
for name in [n for n in self.dirtied]:
self.commit_item(name, keep_parsed=True)
def copy_file_to_container(self, path, name=None, mt=None):
"""Copy a file into this Container instance.
@param path: The path to the file to copy into this Container.
@param name: The name to give to the copied file, relative to the
Container root. Set to None to use the basename of path.
@param mt: The MIME type of the file to set in the manifest. Set to
None to auto-detect.
@return: The name of the file relative to the Container root
"""
if path is None or not os.path.isfile(path):
raise ValueError("A source path must be given")
if name is None:
name = os.path.basename(path)
item = self.generate_item(name, media_type=mt)
name = self.href_to_name(item.get("href"), self.opf_name)
self.log.info(
"Copying file '{0}' to '{1}' as '{2}'".format(path, self.root, name)
)
try:
# Throws an error we can ignore if the directory already exists
os.makedirs(os.path.dirname(os.path.join(self.root, name)))
except Exception:
pass
shutil.copy(path, os.path.join(self.root, name))
return name
def add_content_file_reference(self, name):
"""Add a reference to the named file to all content files.
Adds a reference to the named file (see self.name_path_map) to all
content files (self.html_names()). Currently only CSS files with a
MIME type of text/css and JavaScript files with a MIME type of
application/x-javascript are supported.
"""
if name not in self.name_path_map or name not in self.mime_map:
raise ValueError(
_( # noqa: F821 - _ is defined in calibre
"A valid file name must be given (got {filename})"
).format(filename=name)
)
for infile in self.html_names():
self.log.info("Adding reference to {0} to file {1}".format(name, infile))
root = self.parsed(infile)
if root is None:
self.log.error("Could not retrieve content file {0}".format(infile))
continue
head = root.xpath("./xhtml:head", namespaces={"xhtml": XHTML_NAMESPACE})
if head is None:
self.log.error(
"Could not find a <head> element in content file {0}".format(infile)
)
continue
head = head[0]
if head is None:
self.log.error(
"A <head> section was found but was undefined in "
+ "content file {0}".format(infile)
)
continue
if self.mime_map[name] == CSS_MIMETYPE:
elem = head.makeelement(
"{%s}link" % XHTML_NAMESPACE,
rel="stylesheet",
href=os.path.relpath(name, os.path.dirname(infile)).replace(
os.sep, "/"
),
)
elif self.mime_map[name] == JS_MIMETYPE:
elem = head.makeelement(
"{%s}script" % XHTML_NAMESPACE,
type="text/javascript",
src=os.path.relpath(name, os.path.dirname(infile)).replace(
os.sep, "/"
),
)
else:
elem = None
if elem is not None:
head.append(elem)
if self.mime_map[name] == CSS_MIMETYPE:
self.fix_tail(elem)
self.dirty(infile)
def fix_tail(self, item):
"""Fix self-closing elements.
Designed only to work with self closing elements after item has just
been inserted/appended
"""
parent = item.getparent()
idx = parent.index(item)
if idx == 0:
# item is the first child element, move the text to after item
item.tail = parent.text
else:
# There are other elements, possibly also text, before this child
# element.
# Move this element's tail to the previous element (note: .text is
# only the text after the last child element, text before that and
# surrounding elements are attributes of the elemenets)
item.tail = parent[idx - 1].tail
# If this is the last child element, it gets the remaining text.
if idx == len(parent) - 1:
parent[idx - 1].tail = parent.text
def forced_cleanup(self):
"""Perform cleanup considered essential for standards compliance."""
for name in self.html_names():
self.log.info("Forcing cleanup for file {0}".format(name))
html = self.get_raw(name, force_unicode=True)
if html is None:
continue
encoding_match = ENCODING_RE.search(html[:75])
if (
encoding_match
and encoding_match.group(1)
and encoding_match.group(1).upper() != "UTF-8"
):
html = html.decode(encoding_match.group(1))
html = re.sub(encoding_match.group(1), "UTF-8", html, 1, re.MULTILINE)
html = html.encode("UTF-8")
# Force meta and link tags to be self-closing
html = SELF_CLOSING_RE.sub(r"<\1 \2 />", html)
# Force open script tags
html = FORCE_OPEN_TAG_RE.sub(r"<\1 \2></\1>", html)
# Remove Unicode replacement characters
html = string.replace(html, "\uFFFD", "")
self.dirty(name)
self.flush_cache()
def clean_markup(self):
"""Clean HTML markup.
This cleans the HTML markup for things which are not strictly
non-compliant but can cause problems.
"""
for name in self.html_names():
self.log.info("Cleaning markup for file {0}".format(name))
html = self.get_raw(name, force_unicode=True)
if html is None:
continue
# Get rid of Microsoft cruft
html = MS_CRUFT_RE_1.sub(" ", html)
html = MS_CRUFT_RE_2.sub("", html)
# Remove empty headings
html = EMPTY_HEADINGS.sub("", html)
self.dirty(name)
self.flush_cache()
def smarten_punctuation(self):
"""Convert standard punctuation to "smart" punctuation."""
preprocessor = HeuristicProcessor(log=self.log)
for name in self.html_names():
self.log.info("Smartening punctuation for file {0}".format(name))
html = self.get_raw(name, force_unicode=True)
if html is None:
continue
# Fix non-breaking space indents
html = preprocessor.fix_nbsp_indents(html)
# Smarten punctuation
html = smartyPants(html)
# Ellipsis to HTML entity
html = ELLIPSIS_RE.sub("…", html)
# Double-dash and unicode char code to em-dash
html = string.replace(html, "---", " – ")
html = string.replace(html, "\x97", " – ")
html = string.replace(html, "\u2013", " – ")
html = string.replace(html, "--", " — ")
html = string.replace(html, "\u2014", " — ")
# Fix comment nodes that got mangled
html = string.replace(html, "<! — ", "<!-- ")
html = string.replace(html, " — >", " -->")
self.dirty(name)
self.flush_cache()
def add_kobo_divs(self):
"""Add KePub divs to each HTML file in the book."""
for name in self.html_names():
self.log.info("Adding Kobo divs to {0}".format(name))
root = self.parsed(name)
kobo_div_count = root.xpath(
'count(//xhtml:div[@id="book-inner"])',
namespaces={"xhtml": XHTML_NAMESPACE},
)
if kobo_div_count > 0:
self.log.info("\tSkipping file")
continue
# NOTE: Hackish heuristic: Forgo this if we have more div's than
# p's, which would potentially indicate a book using div's instead
# of p's...
# Apparently, doing this on those books appears to blow up in a
# spectacular way, so, err, don't ;).
# FIXME: Try to figure out what's really happening instead of
# sidestepping the issue?
div_count = int(
root.xpath("count(//xhtml:div)", namespaces={"xhtml": XHTML_NAMESPACE})
)
p_count = int(
root.xpath("count(//xhtml:p)", namespaces={"xhtml": XHTML_NAMESPACE})
)
if div_count > p_count:
self.log.info(
"\tSkipping file ({0:d} div tags, {1:d} p tags)".format(
div_count, p_count
)
)
continue
self.__add_kobo_divs_to_body(root)
self.parsed_cache[name] = root
self.dirty(name)
self.flush_cache()
return True
def __add_kobo_divs_to_body(self, root):
body = root.xpath("./xhtml:body", namespaces={"xhtml": XHTML_NAMESPACE})[0]
# save node content for later
body_text = body.text
body_children = deepcopy(body.getchildren())
body_attrs = {}
for key in body.keys():
body_attrs[key] = body.get(key)
# reset current node, to start from scratch
body.clear()
# restore node attributes
for key in body_attrs:
body.set(key, body_attrs[key])
# Wrap the full body in a div
inner_div = etree.Element(
"{%s}div" % (XHTML_NAMESPACE,), attrib={"id": "book-inner"}
)
# Handle the node text
if body_text is not None:
inner_div.text = body_text
# re-add the node children, but as children of the div
for child in body_children:
# save child tail for later
child_tail = child.tail
child.tail = None
inner_div.append(child)
# Handle the child tail
if child_tail is not None:
inner_div[-1].tail = child_tail
# Finally, wrap that div in another one...
outer_div = etree.Element(
"{%s}div" % (XHTML_NAMESPACE,), attrib={"id": "book-columns"}
)
outer_div.append(inner_div)
# And re-chuck the full div pyramid in the now empty body
body.append(outer_div)
def add_kobo_spans(self):
"""Add KePub spans (used for in-book location) to each HTML file."""
for name in self.html_names():
self.log.info("Adding Kobo spans to {0}".format(name))
root = self.parsed(name)
kobo_span_count = root.xpath(
'count(.//xhtml:span[@class="koboSpan" '
+ 'or starts-with(@id, "kobo.")])',
namespaces={"xhtml": XHTML_NAMESPACE},
)
if kobo_span_count > 0:
self.log.info("\tSkipping file")
continue
self.__paragraph_counter = 1
self.__segment_counter = 1
body = root.xpath("./xhtml:body", namespaces={"xhtml": XHTML_NAMESPACE})[0]
self.__add_kobo_spans_to_node(body)
self.parsed_cache[name] = root
self.dirty(name)
self.flush_cache()
return True
def __add_kobo_spans_to_node(self, node):
# process node only if it is not a comment or a processing instruction
if not (
node is None
or isinstance(node, etree._Comment)
or isinstance(node, etree._ProcessingInstruction)
):
# Special case: <img> tags
special_tag_match = re.search(r"^(?:\{[^\}]+\})?(\w+)$", node.tag)
if special_tag_match and special_tag_match.group(1) in SPECIAL_TAGS:
span = etree.Element(
"{%s}span" % (XHTML_NAMESPACE,),
attrib={
"id": "kobo.{0}.{1}".format(
self.__paragraph_counter, self.__segment_counter
),
"class": "koboSpan",
},
)
span.append(node)
self.__paragraph_counter += 1
self.__segment_counter = 1
return span
# save node content for later
node_text = node.text
node_children = deepcopy(node.getchildren())
node_attrs = {}
for key in node.keys():
node_attrs[key] = node.get(key)
# reset current node, to start from scratch
node.clear()
# restore node attributes
for key in node_attrs:
node.set(key, node_attrs[key])
# the node text is converted to spans
if node_text is not None:
if not self.__append_kobo_spans_from_text(node, node_text):
# didn't add spans, restore text
node.text = node_text
# re-add the node children
for child in node_children:
# save child tail for later
child_tail = child.tail
child.tail = None
node.append(self.__add_kobo_spans_to_node(child))
# the child tail is converted to spans
if child_tail is not None:
self.__paragraph_counter += 1
self.__segment_counter = 1
if not self.__append_kobo_spans_from_text(node, child_tail):
# didn't add spans, restore tail on last child
self.__paragraph_counter -= 1
node[-1].tail = child_tail
self.__paragraph_counter += 1
self.__segment_counter = 1
else:
if node is not None:
node.tail = None
return node
def __append_kobo_spans_from_text(self, node, text):
if text is not None:
# if text is only whitespace, don't add spans
if re.match(r"^\s+$", text, flags=re.UNICODE | re.MULTILINE):
return False
else:
# split text in sentences
groups = re.split(
r'(.*?[\.\!\?\:][\'"\u201d\u2019“…]?\s*)',
text,
flags=re.UNICODE | re.MULTILINE,
)
# remove empty strings resulting from split()
groups = [g.decode("utf-8") for g in groups if g != ""]
# TODO: To match Kobo KePubs, the trailing whitespace needs to
# be prepended to the next group. Probably equivalent to make
# sure the space stays in the span at the end.
# add each sentence in its own span
for g in groups:
span = etree.Element(
"{%s}span" % (XHTML_NAMESPACE,),
attrib={
"id": "kobo.{0}.{1}".format(
self.__paragraph_counter, self.__segment_counter
),
"class": "koboSpan",
},
)
span.text = g
node.append(span)
self.__segment_counter += 1
return True
else:
return False