Added dependency printing
This commit is contained in:
parent
cd07b3abf8
commit
f14f28d3a4
7 changed files with 43 additions and 5 deletions
|
@ -3,7 +3,6 @@ from panflute import Doc, Element, Div, Span
|
||||||
from typing import Union, Callable
|
from typing import Union, Callable
|
||||||
from types import ModuleType
|
from types import ModuleType
|
||||||
import os
|
import os
|
||||||
import warnings
|
|
||||||
|
|
||||||
from .command import Command
|
from .command import Command
|
||||||
|
|
||||||
|
@ -31,6 +30,7 @@ class Context:
|
||||||
filename: str
|
filename: str
|
||||||
root_dir: str # Absolute path to the dir of the file formátítko was called on
|
root_dir: str # Absolute path to the dir of the file formátítko was called on
|
||||||
rel_dir: str # Relative path to the current dir from the root dir
|
rel_dir: str # Relative path to the current dir from the root dir
|
||||||
|
deps: set[str]
|
||||||
|
|
||||||
def __init__(self, doc: Doc, path: str, parent: Union['Context', None]=None, trusted: bool=True):
|
def __init__(self, doc: Doc, path: str, parent: Union['Context', None]=None, trusted: bool=True):
|
||||||
self.parent = parent
|
self.parent = parent
|
||||||
|
@ -43,6 +43,8 @@ class Context:
|
||||||
self.filename = os.path.basename(path)
|
self.filename = os.path.basename(path)
|
||||||
self.root_dir = parent.root_dir if parent else os.path.abspath(self.dir)
|
self.root_dir = parent.root_dir if parent else os.path.abspath(self.dir)
|
||||||
self.rel_dir = os.path.relpath(self.dir, self.root_dir)
|
self.rel_dir = os.path.relpath(self.dir, self.root_dir)
|
||||||
|
self.deps = set()
|
||||||
|
self.add_dep(path)
|
||||||
if self.get_metadata("flags", immediate=True) is None:
|
if self.get_metadata("flags", immediate=True) is None:
|
||||||
self.set_metadata("flags", {})
|
self.set_metadata("flags", {})
|
||||||
|
|
||||||
|
@ -148,8 +150,24 @@ class Context:
|
||||||
data = data[k]
|
data = data[k]
|
||||||
del data[keys[-1]]
|
del data[keys[-1]]
|
||||||
|
|
||||||
|
def get_deps(self) -> list[str]:
|
||||||
|
if self.parent is not None:
|
||||||
|
return self.parent.get_deps()
|
||||||
|
else:
|
||||||
|
return self.deps
|
||||||
|
|
||||||
|
def add_dep(self, dep: str):
|
||||||
|
self.get_deps().add(os.path.abspath(dep))
|
||||||
|
|
||||||
|
def add_deps(self, deps: list[str]):
|
||||||
|
self.get_deps().update([os.path.abspath(path) for path in deps])
|
||||||
|
|
||||||
|
|
||||||
|
def get_context_from_doc(doc: Doc) -> Context:
|
||||||
|
if len(doc.content) == 1 and isinstance(doc.content[0], Group):
|
||||||
|
return doc.content[0].context
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
# This is a custom element which creates \begingroup \endgroup groups in TeX
|
# This is a custom element which creates \begingroup \endgroup groups in TeX
|
||||||
# and also causes KaTeX math blocks to be isolated in a similar way.
|
# and also causes KaTeX math blocks to be isolated in a similar way.
|
||||||
|
|
|
@ -15,6 +15,7 @@ from .html_generator import HTMLGenerator, StandaloneHTMLGenerator
|
||||||
from .transform_processor import TransformProcessor
|
from .transform_processor import TransformProcessor
|
||||||
from .pandoc_processor import PandocProcessor
|
from .pandoc_processor import PandocProcessor
|
||||||
from .tex_generator import UCWTexGenerator
|
from .tex_generator import UCWTexGenerator
|
||||||
|
from .context import get_context_from_doc
|
||||||
|
|
||||||
from panflute import convert_text
|
from panflute import convert_text
|
||||||
|
|
||||||
|
@ -36,6 +37,7 @@ def main():
|
||||||
parser.add_argument("input_filename", help="The markdown file to process.", nargs="?" if "--katex-server" in sys.argv else None)
|
parser.add_argument("input_filename", help="The markdown file to process.", nargs="?" if "--katex-server" in sys.argv else None)
|
||||||
parser.add_argument("--debug", action='store_true')
|
parser.add_argument("--debug", action='store_true')
|
||||||
parser.add_argument("--traceback-limit", help="Traceback limit for when errors happen, defaults to 0, as it is only useful for internal debugging.", default=0)
|
parser.add_argument("--traceback-limit", help="Traceback limit for when errors happen, defaults to 0, as it is only useful for internal debugging.", default=0)
|
||||||
|
parser.add_argument("--deps", help="File to write list of dependencies to. May depend on output formats used.")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if args.katex_server:
|
if args.katex_server:
|
||||||
|
@ -113,6 +115,11 @@ def main():
|
||||||
subprocess.run(["pdfcsplain", "-halt-on-error", "-output-directory="+outdir.name, "-jobname=formatitko", filename], check=True)
|
subprocess.run(["pdfcsplain", "-halt-on-error", "-output-directory="+outdir.name, "-jobname=formatitko", filename], check=True)
|
||||||
shutil.move(outdir.name+"/formatitko.pdf", args.output_pdf)
|
shutil.move(outdir.name+"/formatitko.pdf", args.output_pdf)
|
||||||
|
|
||||||
|
if args.deps is not None:
|
||||||
|
with open(args.deps, "w") as file:
|
||||||
|
for dep in get_context_from_doc(doc).get_deps():
|
||||||
|
file.write(dep + "\n")
|
||||||
|
|
||||||
if args.debug:
|
if args.debug:
|
||||||
print("-----------------------------------")
|
print("-----------------------------------")
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -137,6 +137,7 @@ class HTMLGenerator(OutputGenerator):
|
||||||
url = e.url
|
url = e.url
|
||||||
|
|
||||||
additional_args = self.get_image_processor_args(e.attributes)
|
additional_args = self.get_image_processor_args(e.attributes)
|
||||||
|
additional_args["context"] = self.context
|
||||||
|
|
||||||
# The directory of the current file relative to the current working directory
|
# The directory of the current file relative to the current working directory
|
||||||
source_dir = self.context.dir
|
source_dir = self.context.dir
|
||||||
|
|
|
@ -4,6 +4,8 @@ import shutil
|
||||||
import subprocess
|
import subprocess
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
|
from .context import Context
|
||||||
|
|
||||||
|
|
||||||
class FileInWrongDirError(Exception):
|
class FileInWrongDirError(Exception):
|
||||||
pass
|
pass
|
||||||
|
@ -160,7 +162,7 @@ class ImageProcessor:
|
||||||
def get_searcher_by_path(self, path: str, rel_dir: str, source_dir: str) -> ImageProcessorNamespaceSearcher:
|
def get_searcher_by_path(self, path: str, rel_dir: str, source_dir: str) -> ImageProcessorNamespaceSearcher:
|
||||||
return ImageProcessorNamespaceSearcher(self.get_namespace_by_path(path), rel_dir, source_dir)
|
return ImageProcessorNamespaceSearcher(self.get_namespace_by_path(path), rel_dir, source_dir)
|
||||||
|
|
||||||
def process_image(self, input_filename: str, format: str, searcher: ImageProcessorSearcher, width: int=None, height:int=None, quality: int=None, dpi: int=None, fit: bool=True, deps: list[str]=[]) -> str:
|
def process_image(self, input_filename: str, format: str, searcher: ImageProcessorSearcher, context: Context=None, width: int=None, height:int=None, quality: int=None, dpi: int=None, fit: bool=True, deps: list[str]=[]) -> str:
|
||||||
name = os.path.basename(input_filename)
|
name = os.path.basename(input_filename)
|
||||||
base, ext = os.path.splitext(name)
|
base, ext = os.path.splitext(name)
|
||||||
ext = ext[1:]
|
ext = ext[1:]
|
||||||
|
@ -237,6 +239,8 @@ class ImageProcessor:
|
||||||
if subprocess.run(['convert', *density_arg, full_path, *resize_arg, *quality_arg, target_path]).returncode != 0:
|
if subprocess.run(['convert', *density_arg, full_path, *resize_arg, *quality_arg, target_path]).returncode != 0:
|
||||||
raise ImageMagickError(f"Could not convert '{full_path}' to '{format}'")
|
raise ImageMagickError(f"Could not convert '{full_path}' to '{format}'")
|
||||||
|
|
||||||
|
if context is not None:
|
||||||
|
context.add_deps(deps_full)
|
||||||
return target_name
|
return target_name
|
||||||
|
|
||||||
def is_outdated(self, target: str, deps: list[str]):
|
def is_outdated(self, target: str, deps: list[str]):
|
||||||
|
|
|
@ -498,6 +498,7 @@ class OutputGenerator:
|
||||||
self.generate_simple_tag(e)
|
self.generate_simple_tag(e)
|
||||||
if "footer_content" in e.metadata:
|
if "footer_content" in e.metadata:
|
||||||
self.generate(e.metadata["footer_content"])
|
self.generate(e.metadata["footer_content"])
|
||||||
|
|
||||||
def generate_BlockGroup(self, e: BlockGroup):
|
def generate_BlockGroup(self, e: BlockGroup):
|
||||||
self.generate_simple_tag(e)
|
self.generate_simple_tag(e)
|
||||||
|
|
||||||
|
|
|
@ -110,6 +110,7 @@ class UCWTexGenerator(OutputGenerator):
|
||||||
url = e.url
|
url = e.url
|
||||||
|
|
||||||
additional_args = self.get_image_processor_args(e.attributes)
|
additional_args = self.get_image_processor_args(e.attributes)
|
||||||
|
additional_args["context"] = self.context
|
||||||
|
|
||||||
# The directory of the current file relative to the current working directory
|
# The directory of the current file relative to the current working directory
|
||||||
source_dir = self.context.dir
|
source_dir = self.context.dir
|
||||||
|
|
|
@ -153,7 +153,9 @@ class TransformProcessor(NOPProcessor):
|
||||||
pwd = os.path.abspath(".")
|
pwd = os.path.abspath(".")
|
||||||
if os.path.commonpath([full_path, pwd]) != os.path.commonpath([pwd]):
|
if os.path.commonpath([full_path, pwd]) != os.path.commonpath([pwd]):
|
||||||
return nullify(e)
|
return nullify(e)
|
||||||
text = open(self.context.dir + "/" + e.attributes["partial"], "r").read()
|
filename = self.context.dir + "/" + e.attributes["partial"]
|
||||||
|
self.context.add_dep(filename)
|
||||||
|
text = open(filename, "r").read()
|
||||||
path = self.context.dir + "/" + e.attributes["partial"]
|
path = self.context.dir + "/" + e.attributes["partial"]
|
||||||
if e.attributes["type"] == "md":
|
if e.attributes["type"] == "md":
|
||||||
includedDoc = import_md(text)
|
includedDoc = import_md(text)
|
||||||
|
@ -217,7 +219,9 @@ class TransformProcessor(NOPProcessor):
|
||||||
if not "type" in e.attributes:
|
if not "type" in e.attributes:
|
||||||
e.attributes["type"] = "module"
|
e.attributes["type"] = "module"
|
||||||
if e.attributes["type"] == "md":
|
if e.attributes["type"] == "md":
|
||||||
importedDoc = import_md(open(self.context.dir + "/" + e.content[0].text[1:], "r").read())
|
filename = self.context.dir + "/" + e.content[0].text[1:]
|
||||||
|
self.context.add_dep(filename)
|
||||||
|
importedDoc = import_md(open(filename, "r").read())
|
||||||
self.transform(importedDoc.content)
|
self.transform(importedDoc.content)
|
||||||
elif e.attributes["type"] == "module":
|
elif e.attributes["type"] == "module":
|
||||||
matches = re.match(r"^(\w+)(?: as (\w+))?$", e.content[0].text[1:])
|
matches = re.match(r"^(\w+)(?: as (\w+))?$", e.content[0].text[1:])
|
||||||
|
@ -227,7 +231,9 @@ class TransformProcessor(NOPProcessor):
|
||||||
module_name = matches.group(1) if matches.group(2) is None else matches.group(2)
|
module_name = matches.group(1) if matches.group(2) is None else matches.group(2)
|
||||||
self.context.add_commands_from_module(module, module_name)
|
self.context.add_commands_from_module(module, module_name)
|
||||||
elif e.attributes["type"] == "metadata":
|
elif e.attributes["type"] == "metadata":
|
||||||
data = json.load(open(self.context.dir + "/" + e.content[0].text[1:], "r"))
|
filename = self.context.dir + "/" + e.content[0].text[1:]
|
||||||
|
self.context.add_dep(filename)
|
||||||
|
data = json.load(open(filename, "r"))
|
||||||
key = "" if not "key" in e.attributes else e.attributes["key"]
|
key = "" if not "key" in e.attributes else e.attributes["key"]
|
||||||
self.context.import_metadata(data, key)
|
self.context.import_metadata(data, key)
|
||||||
else:
|
else:
|
||||||
|
|
Loading…
Reference in a new issue