++ +Batch: + + $ markdown-it README.md README.footer.md > index.html +""" + ), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("-v", "--version", action="version", version=version_str) + parser.add_argument( + "filenames", nargs="*", help="specify an optional list of files to convert" + ) + return parser.parse_args(args) + + +def print_heading() -> None: + print(f"{version_str} (interactive)") + print("Type Ctrl-D to complete input, or Ctrl-C to exit.") + + +if __name__ == "__main__": + exit_code = main(sys.argv[1:]) + sys.exit(exit_code) diff --git a/Scripts/Lib/site-packages/markdown_it/common/__init__.py b/Scripts/Lib/site-packages/markdown_it/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Scripts/Lib/site-packages/markdown_it/common/__pycache__/__init__.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000..22f2957 Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/__init__.cpython-310.pyc differ diff --git a/Scripts/Lib/site-packages/markdown_it/common/__pycache__/entities.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/entities.cpython-310.pyc new file mode 100644 index 0000000..ee1304a Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/entities.cpython-310.pyc differ diff --git a/Scripts/Lib/site-packages/markdown_it/common/__pycache__/html_blocks.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/html_blocks.cpython-310.pyc new file mode 100644 index 0000000..6382e37 Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/html_blocks.cpython-310.pyc differ diff --git a/Scripts/Lib/site-packages/markdown_it/common/__pycache__/html_re.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/html_re.cpython-310.pyc new file mode 100644 index 0000000..c8925fe Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/html_re.cpython-310.pyc differ diff --git a/Scripts/Lib/site-packages/markdown_it/common/__pycache__/normalize_url.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/normalize_url.cpython-310.pyc new file mode 100644 index 0000000..a0deac1 Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/normalize_url.cpython-310.pyc differ diff --git a/Scripts/Lib/site-packages/markdown_it/common/__pycache__/utils.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000..677a890 Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/common/__pycache__/utils.cpython-310.pyc differ diff --git a/Scripts/Lib/site-packages/markdown_it/common/entities.py b/Scripts/Lib/site-packages/markdown_it/common/entities.py new file mode 100644 index 0000000..14d08ec --- /dev/null +++ b/Scripts/Lib/site-packages/markdown_it/common/entities.py @@ -0,0 +1,5 @@ +"""HTML5 entities map: { name -> characters }.""" + +import html.entities + +entities = {name.rstrip(";"): chars for name, chars in html.entities.html5.items()} diff --git a/Scripts/Lib/site-packages/markdown_it/common/html_blocks.py b/Scripts/Lib/site-packages/markdown_it/common/html_blocks.py new file mode 100644 index 0000000..8a3b0b7 --- /dev/null +++ b/Scripts/Lib/site-packages/markdown_it/common/html_blocks.py @@ -0,0 +1,69 @@ +"""List of valid html blocks names, according to commonmark spec +http://jgm.github.io/CommonMark/spec.html#html-blocks +""" + +# see https://spec.commonmark.org/0.31.2/#html-blocks +block_names = [ + "address", + "article", + "aside", + "base", + "basefont", + "blockquote", + "body", + "caption", + "center", + "col", + "colgroup", + "dd", + "details", + "dialog", + "dir", + "div", + "dl", + "dt", + "fieldset", + "figcaption", + "figure", + "footer", + "form", + "frame", + "frameset", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "hr", + "html", + "iframe", + "legend", + "li", + "link", + "main", + "menu", + "menuitem", + "nav", + "noframes", + "ol", + "optgroup", + "option", + "p", + "param", + "search", + "section", + "summary", + "table", + "tbody", + "td", + "tfoot", + "th", + "thead", + "title", + "tr", + "track", + "ul", +] diff --git a/Scripts/Lib/site-packages/markdown_it/common/html_re.py b/Scripts/Lib/site-packages/markdown_it/common/html_re.py new file mode 100644 index 0000000..ab822c5 --- /dev/null +++ b/Scripts/Lib/site-packages/markdown_it/common/html_re.py @@ -0,0 +1,39 @@ +"""Regexps to match html elements""" + +import re + +attr_name = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + +unquoted = "[^\"'=<>`\\x00-\\x20]+" +single_quoted = "'[^']*'" +double_quoted = '"[^"]*"' + +attr_value = "(?:" + unquoted + "|" + single_quoted + "|" + double_quoted + ")" + +attribute = "(?:\\s+" + attr_name + "(?:\\s*=\\s*" + attr_value + ")?)" + +open_tag = "<[A-Za-z][A-Za-z0-9\\-]*" + attribute + "*\\s*\\/?>" + +close_tag = "<\\/[A-Za-z][A-Za-z0-9\\-]*\\s*>" +comment = "" +processing = "<[?][\\s\\S]*?[?]>" +declaration = "]*>" +cdata = "" + +HTML_TAG_RE = re.compile( + "^(?:" + + open_tag + + "|" + + close_tag + + "|" + + comment + + "|" + + processing + + "|" + + declaration + + "|" + + cdata + + ")" +) +HTML_OPEN_CLOSE_TAG_STR = "^(?:" + open_tag + "|" + close_tag + ")" +HTML_OPEN_CLOSE_TAG_RE = re.compile(HTML_OPEN_CLOSE_TAG_STR) diff --git a/Scripts/Lib/site-packages/markdown_it/common/normalize_url.py b/Scripts/Lib/site-packages/markdown_it/common/normalize_url.py new file mode 100644 index 0000000..92720b3 --- /dev/null +++ b/Scripts/Lib/site-packages/markdown_it/common/normalize_url.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from collections.abc import Callable +from contextlib import suppress +import re +from urllib.parse import quote, unquote, urlparse, urlunparse # noqa: F401 + +import mdurl + +from .. import _punycode + +RECODE_HOSTNAME_FOR = ("http:", "https:", "mailto:") + + +def normalizeLink(url: str) -> str: + """Normalize destination URLs in links + + :: + + [label]: destination 'title' + ^^^^^^^^^^^ + """ + parsed = mdurl.parse(url, slashes_denote_host=True) + + # Encode hostnames in urls like: + # `http://host/`, `https://host/`, `mailto:user@host`, `//host/` + # + # We don't encode unknown schemas, because it's likely that we encode + # something we shouldn't (e.g. `skype:name` treated as `skype:host`) + # + if parsed.hostname and ( + not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR + ): + with suppress(Exception): + parsed = parsed._replace(hostname=_punycode.to_ascii(parsed.hostname)) + + return mdurl.encode(mdurl.format(parsed)) + + +def normalizeLinkText(url: str) -> str: + """Normalize autolink content + + :: + +markdown input
+
` tags.
+ """
+ env = {} if env is None else env
+ return self.renderer.render(self.parseInline(src, env), self.options, env)
+
+ # link methods
+
+ def validateLink(self, url: str) -> bool:
+ """Validate if the URL link is allowed in output.
+
+ This validator can prohibit more than really needed to prevent XSS.
+ It's a tradeoff to keep code simple and to be secure by default.
+
+ Note: the url should be normalized at this point, and existing entities decoded.
+ """
+ return normalize_url.validateLink(url)
+
+ def normalizeLink(self, url: str) -> str:
+ """Normalize destination URLs in links
+
+ ::
+
+ [label]: destination 'title'
+ ^^^^^^^^^^^
+ """
+ return normalize_url.normalizeLink(url)
+
+ def normalizeLinkText(self, link: str) -> str:
+ """Normalize autolink content
+
+ ::
+
+
+ markdown input This is a doc This is a doc
)
+ "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks
+ # Highlighter function. Should return escaped HTML,
+ # or '' if the source string is not changed and should be escaped externally.
+ # If result starts with
)
+ "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks
+ # Highlighter function. Should return escaped HTML,
+ # or '' if the source string is not changed and should be escaped externally.
+ # If result starts with
)
+ "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks
+ # Highlighter function. Should return escaped HTML,
+ # or '' if the source string is not changed and should be escaped externally.
+ # If result starts with `.
+ #
+ needLf = False
+
+ result += ">\n" if needLf else ">"
+
+ return result
+
+ @staticmethod
+ def renderAttrs(token: Token) -> str:
+ """Render token attributes to string."""
+ result = ""
+
+ for key, value in token.attrItems():
+ result += " " + escapeHtml(key) + '="' + escapeHtml(str(value)) + '"'
+
+ return result
+
+ def renderInlineAsText(
+ self,
+ tokens: Sequence[Token] | None,
+ options: OptionsDict,
+ env: EnvType,
+ ) -> str:
+ """Special kludge for image `alt` attributes to conform CommonMark spec.
+
+ Don't try to use it! Spec requires to show `alt` content with stripped markup,
+ instead of simple escaping.
+
+ :param tokens: list on block tokens to render
+ :param options: params of parser instance
+ :param env: additional data from parsed input
+ """
+ result = ""
+
+ for token in tokens or []:
+ if token.type == "text":
+ result += token.content
+ elif token.type == "image":
+ if token.children:
+ result += self.renderInlineAsText(token.children, options, env)
+ elif token.type == "softbreak":
+ result += "\n"
+
+ return result
+
+ ###################################################
+
+ def code_inline(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ token = tokens[idx]
+ return (
+ "
"
+ + escapeHtml(tokens[idx].content)
+ + ""
+ )
+
+ def code_block(
+ self,
+ tokens: Sequence[Token],
+ idx: int,
+ options: OptionsDict,
+ env: EnvType,
+ ) -> str:
+ token = tokens[idx]
+
+ return (
+ "
\n"
+ )
+
+ def fence(
+ self,
+ tokens: Sequence[Token],
+ idx: int,
+ options: OptionsDict,
+ env: EnvType,
+ ) -> str:
+ token = tokens[idx]
+ info = unescapeAll(token.info).strip() if token.info else ""
+ langName = ""
+ langAttrs = ""
+
+ if info:
+ arr = info.split(maxsplit=1)
+ langName = arr[0]
+ if len(arr) == 2:
+ langAttrs = arr[1]
+
+ if options.highlight:
+ highlighted = options.highlight(
+ token.content, langName, langAttrs
+ ) or escapeHtml(token.content)
+ else:
+ highlighted = escapeHtml(token.content)
+
+ if highlighted.startswith(""
+ + escapeHtml(tokens[idx].content)
+ + "
\n"
+ )
+
+ return (
+ ""
+ + highlighted
+ + "
\n"
+ )
+
+ def image(
+ self,
+ tokens: Sequence[Token],
+ idx: int,
+ options: OptionsDict,
+ env: EnvType,
+ ) -> str:
+ token = tokens[idx]
+
+ # "alt" attr MUST be set, even if empty. Because it's mandatory and
+ # should be placed on proper position for tests.
+ if token.children:
+ token.attrSet("alt", self.renderInlineAsText(token.children, options, env))
+ else:
+ token.attrSet("alt", "")
+
+ return self.renderToken(tokens, idx, options, env)
+
+ def hardbreak(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return ""
+ + highlighted
+ + "
\n" if options.xhtmlOut else "
\n"
+
+ def softbreak(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return (
+ ("
\n" if options.xhtmlOut else "
\n") if options.breaks else "\n"
+ )
+
+ def text(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return escapeHtml(tokens[idx].content)
+
+ def html_block(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return tokens[idx].content
+
+ def html_inline(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return tokens[idx].content
diff --git a/Scripts/Lib/site-packages/markdown_it/ruler.py b/Scripts/Lib/site-packages/markdown_it/ruler.py
new file mode 100644
index 0000000..91ab580
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/ruler.py
@@ -0,0 +1,275 @@
+"""
+class Ruler
+
+Helper class, used by [[MarkdownIt#core]], [[MarkdownIt#block]] and
+[[MarkdownIt#inline]] to manage sequences of functions (rules):
+
+- keep rules in defined order
+- assign the name to each rule
+- enable/disable rules
+- add/replace rules
+- allow assign rules to additional named chains (in the same)
+- caching lists of active rules
+
+You will not need use this class directly until write plugins. For simple
+rules control use [[MarkdownIt.disable]], [[MarkdownIt.enable]] and
+[[MarkdownIt.use]].
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterable
+from dataclasses import dataclass, field
+from typing import TYPE_CHECKING, Generic, TypedDict, TypeVar
+import warnings
+
+from .utils import EnvType
+
+if TYPE_CHECKING:
+ from markdown_it import MarkdownIt
+
+
+class StateBase:
+ def __init__(self, src: str, md: MarkdownIt, env: EnvType):
+ self.src = src
+ self.env = env
+ self.md = md
+
+ @property
+ def src(self) -> str:
+ return self._src
+
+ @src.setter
+ def src(self, value: str) -> None:
+ self._src = value
+ self._srcCharCode: tuple[int, ...] | None = None
+
+ @property
+ def srcCharCode(self) -> tuple[int, ...]:
+ warnings.warn(
+ "StateBase.srcCharCode is deprecated. Use StateBase.src instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if self._srcCharCode is None:
+ self._srcCharCode = tuple(ord(c) for c in self._src)
+ return self._srcCharCode
+
+
+class RuleOptionsType(TypedDict, total=False):
+ alt: list[str]
+
+
+RuleFuncTv = TypeVar("RuleFuncTv")
+"""A rule function, whose signature is dependent on the state type."""
+
+
+@dataclass(slots=True)
+class Rule(Generic[RuleFuncTv]):
+ name: str
+ enabled: bool
+ fn: RuleFuncTv = field(repr=False)
+ alt: list[str]
+
+
+class Ruler(Generic[RuleFuncTv]):
+ def __init__(self) -> None:
+ # List of added rules.
+ self.__rules__: list[Rule[RuleFuncTv]] = []
+ # Cached rule chains.
+ # First level - chain name, '' for default.
+ # Second level - diginal anchor for fast filtering by charcodes.
+ self.__cache__: dict[str, list[RuleFuncTv]] | None = None
+
+ def __find__(self, name: str) -> int:
+ """Find rule index by name"""
+ for i, rule in enumerate(self.__rules__):
+ if rule.name == name:
+ return i
+ return -1
+
+ def __compile__(self) -> None:
+ """Build rules lookup cache"""
+ chains = {""}
+ # collect unique names
+ for rule in self.__rules__:
+ if not rule.enabled:
+ continue
+ for name in rule.alt:
+ chains.add(name)
+ self.__cache__ = {}
+ for chain in chains:
+ self.__cache__[chain] = []
+ for rule in self.__rules__:
+ if not rule.enabled:
+ continue
+ if chain and (chain not in rule.alt):
+ continue
+ self.__cache__[chain].append(rule.fn)
+
+ def at(
+ self, ruleName: str, fn: RuleFuncTv, options: RuleOptionsType | None = None
+ ) -> None:
+ """Replace rule by name with new function & options.
+
+ :param ruleName: rule name to replace.
+ :param fn: new rule function.
+ :param options: new rule options (not mandatory).
+ :raises: KeyError if name not found
+ """
+ index = self.__find__(ruleName)
+ options = options or {}
+ if index == -1:
+ raise KeyError(f"Parser rule not found: {ruleName}")
+ self.__rules__[index].fn = fn
+ self.__rules__[index].alt = options.get("alt", [])
+ self.__cache__ = None
+
+ def before(
+ self,
+ beforeName: str,
+ ruleName: str,
+ fn: RuleFuncTv,
+ options: RuleOptionsType | None = None,
+ ) -> None:
+ """Add new rule to chain before one with given name.
+
+ :param beforeName: new rule will be added before this one.
+ :param ruleName: new rule will be added before this one.
+ :param fn: new rule function.
+ :param options: new rule options (not mandatory).
+ :raises: KeyError if name not found
+ """
+ index = self.__find__(beforeName)
+ options = options or {}
+ if index == -1:
+ raise KeyError(f"Parser rule not found: {beforeName}")
+ self.__rules__.insert(
+ index, Rule[RuleFuncTv](ruleName, True, fn, options.get("alt", []))
+ )
+ self.__cache__ = None
+
+ def after(
+ self,
+ afterName: str,
+ ruleName: str,
+ fn: RuleFuncTv,
+ options: RuleOptionsType | None = None,
+ ) -> None:
+ """Add new rule to chain after one with given name.
+
+ :param afterName: new rule will be added after this one.
+ :param ruleName: new rule will be added after this one.
+ :param fn: new rule function.
+ :param options: new rule options (not mandatory).
+ :raises: KeyError if name not found
+ """
+ index = self.__find__(afterName)
+ options = options or {}
+ if index == -1:
+ raise KeyError(f"Parser rule not found: {afterName}")
+ self.__rules__.insert(
+ index + 1, Rule[RuleFuncTv](ruleName, True, fn, options.get("alt", []))
+ )
+ self.__cache__ = None
+
+ def push(
+ self, ruleName: str, fn: RuleFuncTv, options: RuleOptionsType | None = None
+ ) -> None:
+ """Push new rule to the end of chain.
+
+ :param ruleName: new rule will be added to the end of chain.
+ :param fn: new rule function.
+ :param options: new rule options (not mandatory).
+
+ """
+ self.__rules__.append(
+ Rule[RuleFuncTv](ruleName, True, fn, (options or {}).get("alt", []))
+ )
+ self.__cache__ = None
+
+ def enable(
+ self, names: str | Iterable[str], ignoreInvalid: bool = False
+ ) -> list[str]:
+ """Enable rules with given names.
+
+ :param names: name or list of rule names to enable.
+ :param ignoreInvalid: ignore errors when rule not found
+ :raises: KeyError if name not found and not ignoreInvalid
+ :return: list of found rule names
+ """
+ if isinstance(names, str):
+ names = [names]
+ result: list[str] = []
+ for name in names:
+ idx = self.__find__(name)
+ if (idx < 0) and ignoreInvalid:
+ continue
+ if (idx < 0) and not ignoreInvalid:
+ raise KeyError(f"Rules manager: invalid rule name {name}")
+ self.__rules__[idx].enabled = True
+ result.append(name)
+ self.__cache__ = None
+ return result
+
+ def enableOnly(
+ self, names: str | Iterable[str], ignoreInvalid: bool = False
+ ) -> list[str]:
+ """Enable rules with given names, and disable everything else.
+
+ :param names: name or list of rule names to enable.
+ :param ignoreInvalid: ignore errors when rule not found
+ :raises: KeyError if name not found and not ignoreInvalid
+ :return: list of found rule names
+ """
+ if isinstance(names, str):
+ names = [names]
+ for rule in self.__rules__:
+ rule.enabled = False
+ return self.enable(names, ignoreInvalid)
+
+ def disable(
+ self, names: str | Iterable[str], ignoreInvalid: bool = False
+ ) -> list[str]:
+ """Disable rules with given names.
+
+ :param names: name or list of rule names to enable.
+ :param ignoreInvalid: ignore errors when rule not found
+ :raises: KeyError if name not found and not ignoreInvalid
+ :return: list of found rule names
+ """
+ if isinstance(names, str):
+ names = [names]
+ result = []
+ for name in names:
+ idx = self.__find__(name)
+ if (idx < 0) and ignoreInvalid:
+ continue
+ if (idx < 0) and not ignoreInvalid:
+ raise KeyError(f"Rules manager: invalid rule name {name}")
+ self.__rules__[idx].enabled = False
+ result.append(name)
+ self.__cache__ = None
+ return result
+
+ def getRules(self, chainName: str = "") -> list[RuleFuncTv]:
+ """Return array of active functions (rules) for given chain name.
+ It analyzes rules configuration, compiles caches if not exists and returns result.
+
+ Default chain name is `''` (empty string). It can't be skipped.
+ That's done intentionally, to keep signature monomorphic for high speed.
+
+ """
+ if self.__cache__ is None:
+ self.__compile__()
+ assert self.__cache__ is not None
+ # Chain can be empty, if rules disabled. But we still have to return Array.
+ return self.__cache__.get(chainName, []) or []
+
+ def get_all_rules(self) -> list[str]:
+ """Return all available rule names."""
+ return [r.name for r in self.__rules__]
+
+ def get_active_rules(self) -> list[str]:
+ """Return the active rule names."""
+ return [r.name for r in self.__rules__ if r.enabled]
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__init__.py b/Scripts/Lib/site-packages/markdown_it/rules_block/__init__.py
new file mode 100644
index 0000000..517da23
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/__init__.py
@@ -0,0 +1,27 @@
+__all__ = (
+ "StateBlock",
+ "blockquote",
+ "code",
+ "fence",
+ "heading",
+ "hr",
+ "html_block",
+ "lheading",
+ "list_block",
+ "paragraph",
+ "reference",
+ "table",
+)
+
+from .blockquote import blockquote
+from .code import code
+from .fence import fence
+from .heading import heading
+from .hr import hr
+from .html_block import html_block
+from .lheading import lheading
+from .list import list_block
+from .paragraph import paragraph
+from .reference import reference
+from .state_block import StateBlock
+from .table import table
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/__init__.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..9d82867
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/__init__.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/blockquote.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/blockquote.cpython-310.pyc
new file mode 100644
index 0000000..3c90c50
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/blockquote.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/code.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/code.cpython-310.pyc
new file mode 100644
index 0000000..2b72854
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/code.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/fence.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/fence.cpython-310.pyc
new file mode 100644
index 0000000..a254108
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/fence.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/heading.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/heading.cpython-310.pyc
new file mode 100644
index 0000000..a80c504
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/heading.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/hr.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/hr.cpython-310.pyc
new file mode 100644
index 0000000..6e64def
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/hr.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/html_block.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/html_block.cpython-310.pyc
new file mode 100644
index 0000000..6b15835
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/html_block.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/lheading.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/lheading.cpython-310.pyc
new file mode 100644
index 0000000..72131ae
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/lheading.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/list.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/list.cpython-310.pyc
new file mode 100644
index 0000000..3dd2487
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/list.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/paragraph.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/paragraph.cpython-310.pyc
new file mode 100644
index 0000000..d893445
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/paragraph.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/reference.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/reference.cpython-310.pyc
new file mode 100644
index 0000000..e671453
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/reference.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/state_block.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/state_block.cpython-310.pyc
new file mode 100644
index 0000000..edd5d55
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/state_block.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/table.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/table.cpython-310.pyc
new file mode 100644
index 0000000..a280b99
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_block/__pycache__/table.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/blockquote.py b/Scripts/Lib/site-packages/markdown_it/rules_block/blockquote.py
new file mode 100644
index 0000000..0c9081b
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/blockquote.py
@@ -0,0 +1,299 @@
+# Block quotes
+from __future__ import annotations
+
+import logging
+
+from ..common.utils import isStrSpace
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def blockquote(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug(
+ "entering blockquote: %s, %s, %s, %s", state, startLine, endLine, silent
+ )
+
+ oldLineMax = state.lineMax
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ max = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ # check the block quote marker
+ try:
+ if state.src[pos] != ">":
+ return False
+ except IndexError:
+ return False
+ pos += 1
+
+ # we know that it's going to be a valid blockquote,
+ # so no point trying to find the end of it in silent mode
+ if silent:
+ return True
+
+ # set offset past spaces and ">"
+ initial = offset = state.sCount[startLine] + 1
+
+ try:
+ second_char: str | None = state.src[pos]
+ except IndexError:
+ second_char = None
+
+ # skip one optional space after '>'
+ if second_char == " ":
+ # ' > test '
+ # ^ -- position start of line here:
+ pos += 1
+ initial += 1
+ offset += 1
+ adjustTab = False
+ spaceAfterMarker = True
+ elif second_char == "\t":
+ spaceAfterMarker = True
+
+ if (state.bsCount[startLine] + offset) % 4 == 3:
+ # ' >\t test '
+ # ^ -- position start of line here (tab has width==1)
+ pos += 1
+ initial += 1
+ offset += 1
+ adjustTab = False
+ else:
+ # ' >\t test '
+ # ^ -- position start of line here + shift bsCount slightly
+ # to make extra space appear
+ adjustTab = True
+
+ else:
+ spaceAfterMarker = False
+
+ oldBMarks = [state.bMarks[startLine]]
+ state.bMarks[startLine] = pos
+
+ while pos < max:
+ ch = state.src[pos]
+
+ if isStrSpace(ch):
+ if ch == "\t":
+ offset += (
+ 4
+ - (offset + state.bsCount[startLine] + (1 if adjustTab else 0)) % 4
+ )
+ else:
+ offset += 1
+
+ else:
+ break
+
+ pos += 1
+
+ oldBSCount = [state.bsCount[startLine]]
+ state.bsCount[startLine] = (
+ state.sCount[startLine] + 1 + (1 if spaceAfterMarker else 0)
+ )
+
+ lastLineEmpty = pos >= max
+
+ oldSCount = [state.sCount[startLine]]
+ state.sCount[startLine] = offset - initial
+
+ oldTShift = [state.tShift[startLine]]
+ state.tShift[startLine] = pos - state.bMarks[startLine]
+
+ terminatorRules = state.md.block.ruler.getRules("blockquote")
+
+ oldParentType = state.parentType
+ state.parentType = "blockquote"
+
+ # Search the end of the block
+ #
+ # Block ends with either:
+ # 1. an empty line outside:
+ # ```
+ # > test
+ #
+ # ```
+ # 2. an empty line inside:
+ # ```
+ # >
+ # test
+ # ```
+ # 3. another tag:
+ # ```
+ # > test
+ # - - -
+ # ```
+
+ # for (nextLine = startLine + 1; nextLine < endLine; nextLine++) {
+ nextLine = startLine + 1
+ while nextLine < endLine:
+ # check if it's outdented, i.e. it's inside list item and indented
+ # less than said list item:
+ #
+ # ```
+ # 1. anything
+ # > current blockquote
+ # 2. checking this line
+ # ```
+ isOutdented = state.sCount[nextLine] < state.blkIndent
+
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ max = state.eMarks[nextLine]
+
+ if pos >= max:
+ # Case 1: line is not inside the blockquote, and this line is empty.
+ break
+
+ evaluatesTrue = state.src[pos] == ">" and not isOutdented
+ pos += 1
+ if evaluatesTrue:
+ # This line is inside the blockquote.
+
+ # set offset past spaces and ">"
+ initial = offset = state.sCount[nextLine] + 1
+
+ try:
+ next_char: str | None = state.src[pos]
+ except IndexError:
+ next_char = None
+
+ # skip one optional space after '>'
+ if next_char == " ":
+ # ' > test '
+ # ^ -- position start of line here:
+ pos += 1
+ initial += 1
+ offset += 1
+ adjustTab = False
+ spaceAfterMarker = True
+ elif next_char == "\t":
+ spaceAfterMarker = True
+
+ if (state.bsCount[nextLine] + offset) % 4 == 3:
+ # ' >\t test '
+ # ^ -- position start of line here (tab has width==1)
+ pos += 1
+ initial += 1
+ offset += 1
+ adjustTab = False
+ else:
+ # ' >\t test '
+ # ^ -- position start of line here + shift bsCount slightly
+ # to make extra space appear
+ adjustTab = True
+
+ else:
+ spaceAfterMarker = False
+
+ oldBMarks.append(state.bMarks[nextLine])
+ state.bMarks[nextLine] = pos
+
+ while pos < max:
+ ch = state.src[pos]
+
+ if isStrSpace(ch):
+ if ch == "\t":
+ offset += (
+ 4
+ - (
+ offset
+ + state.bsCount[nextLine]
+ + (1 if adjustTab else 0)
+ )
+ % 4
+ )
+ else:
+ offset += 1
+ else:
+ break
+
+ pos += 1
+
+ lastLineEmpty = pos >= max
+
+ oldBSCount.append(state.bsCount[nextLine])
+ state.bsCount[nextLine] = (
+ state.sCount[nextLine] + 1 + (1 if spaceAfterMarker else 0)
+ )
+
+ oldSCount.append(state.sCount[nextLine])
+ state.sCount[nextLine] = offset - initial
+
+ oldTShift.append(state.tShift[nextLine])
+ state.tShift[nextLine] = pos - state.bMarks[nextLine]
+
+ nextLine += 1
+ continue
+
+ # Case 2: line is not inside the blockquote, and the last line was empty.
+ if lastLineEmpty:
+ break
+
+ # Case 3: another tag found.
+ terminate = False
+
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ if terminate:
+ # Quirk to enforce "hard termination mode" for paragraphs;
+ # normally if you call `tokenize(state, startLine, nextLine)`,
+ # paragraphs will look below nextLine for paragraph continuation,
+ # but if blockquote is terminated by another tag, they shouldn't
+ state.lineMax = nextLine
+
+ if state.blkIndent != 0:
+ # state.blkIndent was non-zero, we now set it to zero,
+ # so we need to re-calculate all offsets to appear as
+ # if indent wasn't changed
+ oldBMarks.append(state.bMarks[nextLine])
+ oldBSCount.append(state.bsCount[nextLine])
+ oldTShift.append(state.tShift[nextLine])
+ oldSCount.append(state.sCount[nextLine])
+ state.sCount[nextLine] -= state.blkIndent
+
+ break
+
+ oldBMarks.append(state.bMarks[nextLine])
+ oldBSCount.append(state.bsCount[nextLine])
+ oldTShift.append(state.tShift[nextLine])
+ oldSCount.append(state.sCount[nextLine])
+
+ # A negative indentation means that this is a paragraph continuation
+ #
+ state.sCount[nextLine] = -1
+
+ nextLine += 1
+
+ oldIndent = state.blkIndent
+ state.blkIndent = 0
+
+ token = state.push("blockquote_open", "blockquote", 1)
+ token.markup = ">"
+ token.map = lines = [startLine, 0]
+
+ state.md.block.tokenize(state, startLine, nextLine)
+
+ token = state.push("blockquote_close", "blockquote", -1)
+ token.markup = ">"
+
+ state.lineMax = oldLineMax
+ state.parentType = oldParentType
+ lines[1] = state.line
+
+ # Restore original tShift; this might not be necessary since the parser
+ # has already been here, but just to make sure we can do that.
+ for i, item in enumerate(oldTShift):
+ state.bMarks[i + startLine] = oldBMarks[i]
+ state.tShift[i + startLine] = item
+ state.sCount[i + startLine] = oldSCount[i]
+ state.bsCount[i + startLine] = oldBSCount[i]
+
+ state.blkIndent = oldIndent
+
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/code.py b/Scripts/Lib/site-packages/markdown_it/rules_block/code.py
new file mode 100644
index 0000000..af8a41c
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/code.py
@@ -0,0 +1,36 @@
+"""Code block (4 spaces padded)."""
+
+import logging
+
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def code(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering code: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ if not state.is_code_block(startLine):
+ return False
+
+ last = nextLine = startLine + 1
+
+ while nextLine < endLine:
+ if state.isEmpty(nextLine):
+ nextLine += 1
+ continue
+
+ if state.is_code_block(nextLine):
+ nextLine += 1
+ last = nextLine
+ continue
+
+ break
+
+ state.line = last
+
+ token = state.push("code_block", "code", 0)
+ token.content = state.getLines(startLine, last, 4 + state.blkIndent, False) + "\n"
+ token.map = [startLine, state.line]
+
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/fence.py b/Scripts/Lib/site-packages/markdown_it/rules_block/fence.py
new file mode 100644
index 0000000..263f1b8
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/fence.py
@@ -0,0 +1,101 @@
+# fences (``` lang, ~~~ lang)
+import logging
+
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def fence(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering fence: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ haveEndMarker = False
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ if pos + 3 > maximum:
+ return False
+
+ marker = state.src[pos]
+
+ if marker not in ("~", "`"):
+ return False
+
+ # scan marker length
+ mem = pos
+ pos = state.skipCharsStr(pos, marker)
+
+ length = pos - mem
+
+ if length < 3:
+ return False
+
+ markup = state.src[mem:pos]
+ params = state.src[pos:maximum]
+
+ if marker == "`" and marker in params:
+ return False
+
+ # Since start is found, we can report success here in validation mode
+ if silent:
+ return True
+
+ # search end of block
+ nextLine = startLine
+
+ while True:
+ nextLine += 1
+ if nextLine >= endLine:
+ # unclosed block should be autoclosed by end of document.
+ # also block seems to be autoclosed by end of parent
+ break
+
+ pos = mem = state.bMarks[nextLine] + state.tShift[nextLine]
+ maximum = state.eMarks[nextLine]
+
+ if pos < maximum and state.sCount[nextLine] < state.blkIndent:
+ # non-empty line with negative indent should stop the list:
+ # - ```
+ # test
+ break
+
+ try:
+ if state.src[pos] != marker:
+ continue
+ except IndexError:
+ break
+
+ if state.is_code_block(nextLine):
+ continue
+
+ pos = state.skipCharsStr(pos, marker)
+
+ # closing code fence must be at least as long as the opening one
+ if pos - mem < length:
+ continue
+
+ # make sure tail has spaces only
+ pos = state.skipSpaces(pos)
+
+ if pos < maximum:
+ continue
+
+ haveEndMarker = True
+ # found!
+ break
+
+ # If a fence has heading spaces, they should be removed from its inner block
+ length = state.sCount[startLine]
+
+ state.line = nextLine + (1 if haveEndMarker else 0)
+
+ token = state.push("fence", "code", 0)
+ token.info = params
+ token.content = state.getLines(startLine + 1, nextLine, length, True)
+ token.markup = markup
+ token.map = [startLine, state.line]
+
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/heading.py b/Scripts/Lib/site-packages/markdown_it/rules_block/heading.py
new file mode 100644
index 0000000..afcf9ed
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/heading.py
@@ -0,0 +1,69 @@
+"""Atex heading (#, ##, ...)"""
+
+from __future__ import annotations
+
+import logging
+
+from ..common.utils import isStrSpace
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def heading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering heading: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ ch: str | None = state.src[pos]
+
+ if ch != "#" or pos >= maximum:
+ return False
+
+ # count heading level
+ level = 1
+ pos += 1
+ try:
+ ch = state.src[pos]
+ except IndexError:
+ ch = None
+ while ch == "#" and pos < maximum and level <= 6:
+ level += 1
+ pos += 1
+ try:
+ ch = state.src[pos]
+ except IndexError:
+ ch = None
+
+ if level > 6 or (pos < maximum and not isStrSpace(ch)):
+ return False
+
+ if silent:
+ return True
+
+ # Let's cut tails like ' ### ' from the end of string
+
+ maximum = state.skipSpacesBack(maximum, pos)
+ tmp = state.skipCharsStrBack(maximum, "#", pos)
+ if tmp > pos and isStrSpace(state.src[tmp - 1]):
+ maximum = tmp
+
+ state.line = startLine + 1
+
+ token = state.push("heading_open", "h" + str(level), 1)
+ token.markup = "########"[:level]
+ token.map = [startLine, state.line]
+
+ token = state.push("inline", "", 0)
+ token.content = state.src[pos:maximum].strip()
+ token.map = [startLine, state.line]
+ token.children = []
+
+ token = state.push("heading_close", "h" + str(level), -1)
+ token.markup = "########"[:level]
+
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/hr.py b/Scripts/Lib/site-packages/markdown_it/rules_block/hr.py
new file mode 100644
index 0000000..fca7d79
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/hr.py
@@ -0,0 +1,56 @@
+"""Horizontal rule
+
+At least 3 of these characters on a line * - _
+"""
+
+import logging
+
+from ..common.utils import isStrSpace
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def hr(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering hr: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ try:
+ marker = state.src[pos]
+ except IndexError:
+ return False
+ pos += 1
+
+ # Check hr marker
+ if marker not in ("*", "-", "_"):
+ return False
+
+ # markers can be mixed with spaces, but there should be at least 3 of them
+
+ cnt = 1
+ while pos < maximum:
+ ch = state.src[pos]
+ pos += 1
+ if ch != marker and not isStrSpace(ch):
+ return False
+ if ch == marker:
+ cnt += 1
+
+ if cnt < 3:
+ return False
+
+ if silent:
+ return True
+
+ state.line = startLine + 1
+
+ token = state.push("hr", "hr", 0)
+ token.map = [startLine, state.line]
+ token.markup = marker * (cnt + 1)
+
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/html_block.py b/Scripts/Lib/site-packages/markdown_it/rules_block/html_block.py
new file mode 100644
index 0000000..3d43f6e
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/html_block.py
@@ -0,0 +1,90 @@
+# HTML block
+from __future__ import annotations
+
+import logging
+import re
+
+from ..common.html_blocks import block_names
+from ..common.html_re import HTML_OPEN_CLOSE_TAG_STR
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+# An array of opening and corresponding closing sequences for html tags,
+# last argument defines whether it can terminate a paragraph or not
+HTML_SEQUENCES: list[tuple[re.Pattern[str], re.Pattern[str], bool]] = [
+ (
+ re.compile(r"^<(script|pre|style|textarea)(?=(\s|>|$))", re.IGNORECASE),
+ re.compile(r"<\/(script|pre|style|textarea)>", re.IGNORECASE),
+ True,
+ ),
+ (re.compile(r"^"), True),
+ (re.compile(r"^<\?"), re.compile(r"\?>"), True),
+ (re.compile(r"^"), True),
+ (re.compile(r"^"), True),
+ (
+ re.compile("^?(" + "|".join(block_names) + ")(?=(\\s|/?>|$))", re.IGNORECASE),
+ re.compile(r"^$"),
+ True,
+ ),
+ (re.compile(HTML_OPEN_CLOSE_TAG_STR + "\\s*$"), re.compile(r"^$"), False),
+]
+
+
+def html_block(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug(
+ "entering html_block: %s, %s, %s, %s", state, startLine, endLine, silent
+ )
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ if not state.md.options.get("html", None):
+ return False
+
+ if state.src[pos] != "<":
+ return False
+
+ lineText = state.src[pos:maximum]
+
+ html_seq = None
+ for HTML_SEQUENCE in HTML_SEQUENCES:
+ if HTML_SEQUENCE[0].search(lineText):
+ html_seq = HTML_SEQUENCE
+ break
+
+ if not html_seq:
+ return False
+
+ if silent:
+ # true if this sequence can be a terminator, false otherwise
+ return html_seq[2]
+
+ nextLine = startLine + 1
+
+ # If we are here - we detected HTML block.
+ # Let's roll down till block end.
+ if not html_seq[1].search(lineText):
+ while nextLine < endLine:
+ if state.sCount[nextLine] < state.blkIndent:
+ break
+
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ maximum = state.eMarks[nextLine]
+ lineText = state.src[pos:maximum]
+
+ if html_seq[1].search(lineText):
+ if len(lineText) != 0:
+ nextLine += 1
+ break
+ nextLine += 1
+
+ state.line = nextLine
+
+ token = state.push("html_block", "", 0)
+ token.map = [startLine, nextLine]
+ token.content = state.getLines(startLine, nextLine, state.blkIndent, True)
+
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/lheading.py b/Scripts/Lib/site-packages/markdown_it/rules_block/lheading.py
new file mode 100644
index 0000000..3522207
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/lheading.py
@@ -0,0 +1,86 @@
+# lheading (---, ==)
+import logging
+
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def lheading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering lheading: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ level = None
+ nextLine = startLine + 1
+ ruler = state.md.block.ruler
+ terminatorRules = ruler.getRules("paragraph")
+
+ if state.is_code_block(startLine):
+ return False
+
+ oldParentType = state.parentType
+ state.parentType = "paragraph" # use paragraph to match terminatorRules
+
+ # jump line-by-line until empty one or EOF
+ while nextLine < endLine and not state.isEmpty(nextLine):
+ # this would be a code block normally, but after paragraph
+ # it's considered a lazy continuation regardless of what's there
+ if state.sCount[nextLine] - state.blkIndent > 3:
+ nextLine += 1
+ continue
+
+ # Check for underline in setext header
+ if state.sCount[nextLine] >= state.blkIndent:
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ maximum = state.eMarks[nextLine]
+
+ if pos < maximum:
+ marker = state.src[pos]
+
+ if marker in ("-", "="):
+ pos = state.skipCharsStr(pos, marker)
+ pos = state.skipSpaces(pos)
+
+ # /* = */
+ if pos >= maximum:
+ level = 1 if marker == "=" else 2
+ break
+
+ # quirk for blockquotes, this line should already be checked by that rule
+ if state.sCount[nextLine] < 0:
+ nextLine += 1
+ continue
+
+ # Some tags can terminate paragraph without empty line.
+ terminate = False
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+ if terminate:
+ break
+
+ nextLine += 1
+
+ if not level:
+ # Didn't find valid underline
+ return False
+
+ content = state.getLines(startLine, nextLine, state.blkIndent, False).strip()
+
+ state.line = nextLine + 1
+
+ token = state.push("heading_open", "h" + str(level), 1)
+ token.markup = marker
+ token.map = [startLine, state.line]
+
+ token = state.push("inline", "", 0)
+ token.content = content
+ token.map = [startLine, state.line - 1]
+ token.children = []
+
+ token = state.push("heading_close", "h" + str(level), -1)
+ token.markup = marker
+
+ state.parentType = oldParentType
+
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/list.py b/Scripts/Lib/site-packages/markdown_it/rules_block/list.py
new file mode 100644
index 0000000..d8070d7
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/list.py
@@ -0,0 +1,345 @@
+# Lists
+import logging
+
+from ..common.utils import isStrSpace
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+# Search `[-+*][\n ]`, returns next pos after marker on success
+# or -1 on fail.
+def skipBulletListMarker(state: StateBlock, startLine: int) -> int:
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ try:
+ marker = state.src[pos]
+ except IndexError:
+ return -1
+ pos += 1
+
+ if marker not in ("*", "-", "+"):
+ return -1
+
+ if pos < maximum:
+ ch = state.src[pos]
+
+ if not isStrSpace(ch):
+ # " -test " - is not a list item
+ return -1
+
+ return pos
+
+
+# Search `\d+[.)][\n ]`, returns next pos after marker on success
+# or -1 on fail.
+def skipOrderedListMarker(state: StateBlock, startLine: int) -> int:
+ start = state.bMarks[startLine] + state.tShift[startLine]
+ pos = start
+ maximum = state.eMarks[startLine]
+
+ # List marker should have at least 2 chars (digit + dot)
+ if pos + 1 >= maximum:
+ return -1
+
+ ch = state.src[pos]
+ pos += 1
+
+ ch_ord = ord(ch)
+ # /* 0 */ /* 9 */
+ if ch_ord < 0x30 or ch_ord > 0x39:
+ return -1
+
+ while True:
+ # EOL -> fail
+ if pos >= maximum:
+ return -1
+
+ ch = state.src[pos]
+ pos += 1
+
+ # /* 0 */ /* 9 */
+ ch_ord = ord(ch)
+ if ch_ord >= 0x30 and ch_ord <= 0x39:
+ # List marker should have no more than 9 digits
+ # (prevents integer overflow in browsers)
+ if pos - start >= 10:
+ return -1
+
+ continue
+
+ # found valid marker
+ if ch in (")", "."):
+ break
+
+ return -1
+
+ if pos < maximum:
+ ch = state.src[pos]
+
+ if not isStrSpace(ch):
+ # " 1.test " - is not a list item
+ return -1
+
+ return pos
+
+
+def markTightParagraphs(state: StateBlock, idx: int) -> None:
+ level = state.level + 2
+
+ i = idx + 2
+ length = len(state.tokens) - 2
+ while i < length:
+ if state.tokens[i].level == level and state.tokens[i].type == "paragraph_open":
+ state.tokens[i + 2].hidden = True
+ state.tokens[i].hidden = True
+ i += 2
+ i += 1
+
+
+def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering list: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ isTerminatingParagraph = False
+ tight = True
+
+ if state.is_code_block(startLine):
+ return False
+
+ # Special case:
+ # - item 1
+ # - item 2
+ # - item 3
+ # - item 4
+ # - this one is a paragraph continuation
+ if (
+ state.listIndent >= 0
+ and state.sCount[startLine] - state.listIndent >= 4
+ and state.sCount[startLine] < state.blkIndent
+ ):
+ return False
+
+ # limit conditions when list can interrupt
+ # a paragraph (validation mode only)
+ # Next list item should still terminate previous list item
+ #
+ # This code can fail if plugins use blkIndent as well as lists,
+ # but I hope the spec gets fixed long before that happens.
+ #
+ if (
+ silent
+ and state.parentType == "paragraph"
+ and state.sCount[startLine] >= state.blkIndent
+ ):
+ isTerminatingParagraph = True
+
+ # Detect list type and position after marker
+ posAfterMarker = skipOrderedListMarker(state, startLine)
+ if posAfterMarker >= 0:
+ isOrdered = True
+ start = state.bMarks[startLine] + state.tShift[startLine]
+ markerValue = int(state.src[start : posAfterMarker - 1])
+
+ # If we're starting a new ordered list right after
+ # a paragraph, it should start with 1.
+ if isTerminatingParagraph and markerValue != 1:
+ return False
+ else:
+ posAfterMarker = skipBulletListMarker(state, startLine)
+ if posAfterMarker >= 0:
+ isOrdered = False
+ else:
+ return False
+
+ # If we're starting a new unordered list right after
+ # a paragraph, first line should not be empty.
+ if (
+ isTerminatingParagraph
+ and state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]
+ ):
+ return False
+
+ # We should terminate list on style change. Remember first one to compare.
+ markerChar = state.src[posAfterMarker - 1]
+
+ # For validation mode we can terminate immediately
+ if silent:
+ return True
+
+ # Start list
+ listTokIdx = len(state.tokens)
+
+ if isOrdered:
+ token = state.push("ordered_list_open", "ol", 1)
+ if markerValue != 1:
+ token.attrs = {"start": markerValue}
+
+ else:
+ token = state.push("bullet_list_open", "ul", 1)
+
+ token.map = listLines = [startLine, 0]
+ token.markup = markerChar
+
+ #
+ # Iterate list items
+ #
+
+ nextLine = startLine
+ prevEmptyEnd = False
+ terminatorRules = state.md.block.ruler.getRules("list")
+
+ oldParentType = state.parentType
+ state.parentType = "list"
+
+ while nextLine < endLine:
+ pos = posAfterMarker
+ maximum = state.eMarks[nextLine]
+
+ initial = offset = (
+ state.sCount[nextLine]
+ + posAfterMarker
+ - (state.bMarks[startLine] + state.tShift[startLine])
+ )
+
+ while pos < maximum:
+ ch = state.src[pos]
+
+ if ch == "\t":
+ offset += 4 - (offset + state.bsCount[nextLine]) % 4
+ elif ch == " ":
+ offset += 1
+ else:
+ break
+
+ pos += 1
+
+ contentStart = pos
+
+ # trimming space in "- \n 3" case, indent is 1 here
+ indentAfterMarker = 1 if contentStart >= maximum else offset - initial
+
+ # If we have more than 4 spaces, the indent is 1
+ # (the rest is just indented code block)
+ if indentAfterMarker > 4:
+ indentAfterMarker = 1
+
+ # " - test"
+ # ^^^^^ - calculating total length of this thing
+ indent = initial + indentAfterMarker
+
+ # Run subparser & write tokens
+ token = state.push("list_item_open", "li", 1)
+ token.markup = markerChar
+ token.map = itemLines = [startLine, 0]
+ if isOrdered:
+ token.info = state.src[start : posAfterMarker - 1]
+
+ # change current state, then restore it after parser subcall
+ oldTight = state.tight
+ oldTShift = state.tShift[startLine]
+ oldSCount = state.sCount[startLine]
+
+ # - example list
+ # ^ listIndent position will be here
+ # ^ blkIndent position will be here
+ #
+ oldListIndent = state.listIndent
+ state.listIndent = state.blkIndent
+ state.blkIndent = indent
+
+ state.tight = True
+ state.tShift[startLine] = contentStart - state.bMarks[startLine]
+ state.sCount[startLine] = offset
+
+ if contentStart >= maximum and state.isEmpty(startLine + 1):
+ # workaround for this case
+ # (list item is empty, list terminates before "foo"):
+ # ~~~~~~~~
+ # -
+ #
+ # foo
+ # ~~~~~~~~
+ state.line = min(state.line + 2, endLine)
+ else:
+ # NOTE in list.js this was:
+ # state.md.block.tokenize(state, startLine, endLine, True)
+ # but tokeniz does not take the final parameter
+ state.md.block.tokenize(state, startLine, endLine)
+
+ # If any of list item is tight, mark list as tight
+ if (not state.tight) or prevEmptyEnd:
+ tight = False
+
+ # Item become loose if finish with empty line,
+ # but we should filter last element, because it means list finish
+ prevEmptyEnd = (state.line - startLine) > 1 and state.isEmpty(state.line - 1)
+
+ state.blkIndent = state.listIndent
+ state.listIndent = oldListIndent
+ state.tShift[startLine] = oldTShift
+ state.sCount[startLine] = oldSCount
+ state.tight = oldTight
+
+ token = state.push("list_item_close", "li", -1)
+ token.markup = markerChar
+
+ nextLine = startLine = state.line
+ itemLines[1] = nextLine
+
+ if nextLine >= endLine:
+ break
+
+ contentStart = state.bMarks[startLine]
+
+ #
+ # Try to check if list is terminated or continued.
+ #
+ if state.sCount[nextLine] < state.blkIndent:
+ break
+
+ if state.is_code_block(startLine):
+ break
+
+ # fail if terminating block found
+ terminate = False
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ if terminate:
+ break
+
+ # fail if list has another type
+ if isOrdered:
+ posAfterMarker = skipOrderedListMarker(state, nextLine)
+ if posAfterMarker < 0:
+ break
+ start = state.bMarks[nextLine] + state.tShift[nextLine]
+ else:
+ posAfterMarker = skipBulletListMarker(state, nextLine)
+ if posAfterMarker < 0:
+ break
+
+ if markerChar != state.src[posAfterMarker - 1]:
+ break
+
+ # Finalize list
+ if isOrdered:
+ token = state.push("ordered_list_close", "ol", -1)
+ else:
+ token = state.push("bullet_list_close", "ul", -1)
+
+ token.markup = markerChar
+
+ listLines[1] = nextLine
+ state.line = nextLine
+
+ state.parentType = oldParentType
+
+ # mark paragraphs tight if needed
+ if tight:
+ markTightParagraphs(state, listTokIdx)
+
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/paragraph.py b/Scripts/Lib/site-packages/markdown_it/rules_block/paragraph.py
new file mode 100644
index 0000000..30ba877
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/paragraph.py
@@ -0,0 +1,66 @@
+"""Paragraph."""
+
+import logging
+
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def paragraph(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug(
+ "entering paragraph: %s, %s, %s, %s", state, startLine, endLine, silent
+ )
+
+ nextLine = startLine + 1
+ ruler = state.md.block.ruler
+ terminatorRules = ruler.getRules("paragraph")
+ endLine = state.lineMax
+
+ oldParentType = state.parentType
+ state.parentType = "paragraph"
+
+ # jump line-by-line until empty one or EOF
+ while nextLine < endLine:
+ if state.isEmpty(nextLine):
+ break
+ # this would be a code block normally, but after paragraph
+ # it's considered a lazy continuation regardless of what's there
+ if state.sCount[nextLine] - state.blkIndent > 3:
+ nextLine += 1
+ continue
+
+ # quirk for blockquotes, this line should already be checked by that rule
+ if state.sCount[nextLine] < 0:
+ nextLine += 1
+ continue
+
+ # Some tags can terminate paragraph without empty line.
+ terminate = False
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ if terminate:
+ break
+
+ nextLine += 1
+
+ content = state.getLines(startLine, nextLine, state.blkIndent, False).strip()
+
+ state.line = nextLine
+
+ token = state.push("paragraph_open", "p", 1)
+ token.map = [startLine, state.line]
+
+ token = state.push("inline", "", 0)
+ token.content = content
+ token.map = [startLine, state.line]
+ token.children = []
+
+ token = state.push("paragraph_close", "p", -1)
+
+ state.parentType = oldParentType
+
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/reference.py b/Scripts/Lib/site-packages/markdown_it/rules_block/reference.py
new file mode 100644
index 0000000..ad94d40
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/reference.py
@@ -0,0 +1,235 @@
+import logging
+
+from ..common.utils import charCodeAt, isSpace, normalizeReference
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def reference(state: StateBlock, startLine: int, _endLine: int, silent: bool) -> bool:
+ LOGGER.debug(
+ "entering reference: %s, %s, %s, %s", state, startLine, _endLine, silent
+ )
+
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+ nextLine = startLine + 1
+
+ if state.is_code_block(startLine):
+ return False
+
+ if state.src[pos] != "[":
+ return False
+
+ string = state.src[pos : maximum + 1]
+
+ # string = state.getLines(startLine, nextLine, state.blkIndent, False).strip()
+ maximum = len(string)
+
+ labelEnd = None
+ pos = 1
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if ch == 0x5B: # /* [ */
+ return False
+ elif ch == 0x5D: # /* ] */
+ labelEnd = pos
+ break
+ elif ch == 0x0A: # /* \n */
+ if (lineContent := getNextLine(state, nextLine)) is not None:
+ string += lineContent
+ maximum = len(string)
+ nextLine += 1
+ elif ch == 0x5C: # /* \ */
+ pos += 1
+ if (
+ pos < maximum
+ and charCodeAt(string, pos) == 0x0A
+ and (lineContent := getNextLine(state, nextLine)) is not None
+ ):
+ string += lineContent
+ maximum = len(string)
+ nextLine += 1
+ pos += 1
+
+ if (
+ labelEnd is None or labelEnd < 0 or charCodeAt(string, labelEnd + 1) != 0x3A
+ ): # /* : */
+ return False
+
+ # [label]: destination 'title'
+ # ^^^ skip optional whitespace here
+ pos = labelEnd + 2
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if ch == 0x0A:
+ if (lineContent := getNextLine(state, nextLine)) is not None:
+ string += lineContent
+ maximum = len(string)
+ nextLine += 1
+ elif isSpace(ch):
+ pass
+ else:
+ break
+ pos += 1
+
+ # [label]: destination 'title'
+ # ^^^^^^^^^^^ parse this
+ destRes = state.md.helpers.parseLinkDestination(string, pos, maximum)
+ if not destRes.ok:
+ return False
+
+ href = state.md.normalizeLink(destRes.str)
+ if not state.md.validateLink(href):
+ return False
+
+ pos = destRes.pos
+
+ # save cursor state, we could require to rollback later
+ destEndPos = pos
+ destEndLineNo = nextLine
+
+ # [label]: destination 'title'
+ # ^^^ skipping those spaces
+ start = pos
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if ch == 0x0A:
+ if (lineContent := getNextLine(state, nextLine)) is not None:
+ string += lineContent
+ maximum = len(string)
+ nextLine += 1
+ elif isSpace(ch):
+ pass
+ else:
+ break
+ pos += 1
+
+ # [label]: destination 'title'
+ # ^^^^^^^ parse this
+ titleRes = state.md.helpers.parseLinkTitle(string, pos, maximum, None)
+ while titleRes.can_continue:
+ if (lineContent := getNextLine(state, nextLine)) is None:
+ break
+ string += lineContent
+ pos = maximum
+ maximum = len(string)
+ nextLine += 1
+ titleRes = state.md.helpers.parseLinkTitle(string, pos, maximum, titleRes)
+
+ if pos < maximum and start != pos and titleRes.ok:
+ title = titleRes.str
+ pos = titleRes.pos
+ else:
+ title = ""
+ pos = destEndPos
+ nextLine = destEndLineNo
+
+ # skip trailing spaces until the rest of the line
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if not isSpace(ch):
+ break
+ pos += 1
+
+ if pos < maximum and charCodeAt(string, pos) != 0x0A and title:
+ # garbage at the end of the line after title,
+ # but it could still be a valid reference if we roll back
+ title = ""
+ pos = destEndPos
+ nextLine = destEndLineNo
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if not isSpace(ch):
+ break
+ pos += 1
+
+ if pos < maximum and charCodeAt(string, pos) != 0x0A:
+ # garbage at the end of the line
+ return False
+
+ label = normalizeReference(string[1:labelEnd])
+ if not label:
+ # CommonMark 0.20 disallows empty labels
+ return False
+
+ # Reference can not terminate anything. This check is for safety only.
+ if silent:
+ return True
+
+ if "references" not in state.env:
+ state.env["references"] = {}
+
+ state.line = nextLine
+
+ # note, this is not part of markdown-it JS, but is useful for renderers
+ if state.md.options.get("inline_definitions", False):
+ token = state.push("definition", "", 0)
+ token.meta = {
+ "id": label,
+ "title": title,
+ "url": href,
+ "label": string[1:labelEnd],
+ }
+ token.map = [startLine, state.line]
+
+ if label not in state.env["references"]:
+ state.env["references"][label] = {
+ "title": title,
+ "href": href,
+ "map": [startLine, state.line],
+ }
+ else:
+ state.env.setdefault("duplicate_refs", []).append(
+ {
+ "title": title,
+ "href": href,
+ "label": label,
+ "map": [startLine, state.line],
+ }
+ )
+
+ return True
+
+
+def getNextLine(state: StateBlock, nextLine: int) -> None | str:
+ endLine = state.lineMax
+
+ if nextLine >= endLine or state.isEmpty(nextLine):
+ # empty line or end of input
+ return None
+
+ isContinuation = False
+
+ # this would be a code block normally, but after paragraph
+ # it's considered a lazy continuation regardless of what's there
+ if state.is_code_block(nextLine):
+ isContinuation = True
+
+ # quirk for blockquotes, this line should already be checked by that rule
+ if state.sCount[nextLine] < 0:
+ isContinuation = True
+
+ if not isContinuation:
+ terminatorRules = state.md.block.ruler.getRules("reference")
+ oldParentType = state.parentType
+ state.parentType = "reference"
+
+ # Some tags can terminate paragraph without empty line.
+ terminate = False
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ state.parentType = oldParentType
+
+ if terminate:
+ # terminated by another block
+ return None
+
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ maximum = state.eMarks[nextLine]
+
+ # max + 1 explicitly includes the newline
+ return state.src[pos : maximum + 1]
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/state_block.py b/Scripts/Lib/site-packages/markdown_it/rules_block/state_block.py
new file mode 100644
index 0000000..445ad26
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/state_block.py
@@ -0,0 +1,261 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Literal
+
+from ..common.utils import isStrSpace
+from ..ruler import StateBase
+from ..token import Token
+from ..utils import EnvType
+
+if TYPE_CHECKING:
+ from markdown_it.main import MarkdownIt
+
+
+class StateBlock(StateBase):
+ def __init__(
+ self, src: str, md: MarkdownIt, env: EnvType, tokens: list[Token]
+ ) -> None:
+ self.src = src
+
+ # link to parser instance
+ self.md = md
+
+ self.env = env
+
+ #
+ # Internal state variables
+ #
+
+ self.tokens = tokens
+
+ self.bMarks: list[int] = [] # line begin offsets for fast jumps
+ self.eMarks: list[int] = [] # line end offsets for fast jumps
+ # offsets of the first non-space characters (tabs not expanded)
+ self.tShift: list[int] = []
+ self.sCount: list[int] = [] # indents for each line (tabs expanded)
+
+ # An amount of virtual spaces (tabs expanded) between beginning
+ # of each line (bMarks) and real beginning of that line.
+ #
+ # It exists only as a hack because blockquotes override bMarks
+ # losing information in the process.
+ #
+ # It's used only when expanding tabs, you can think about it as
+ # an initial tab length, e.g. bsCount=21 applied to string `\t123`
+ # means first tab should be expanded to 4-21%4 === 3 spaces.
+ #
+ self.bsCount: list[int] = []
+
+ # block parser variables
+ self.blkIndent = 0 # required block content indent (for example, if we are
+ # inside a list, it would be positioned after list marker)
+ self.line = 0 # line index in src
+ self.lineMax = 0 # lines count
+ self.tight = False # loose/tight mode for lists
+ self.ddIndent = -1 # indent of the current dd block (-1 if there isn't any)
+ self.listIndent = -1 # indent of the current list block (-1 if there isn't any)
+
+ # can be 'blockquote', 'list', 'root', 'paragraph' or 'reference'
+ # used in lists to determine if they interrupt a paragraph
+ self.parentType = "root"
+
+ self.level = 0
+
+ # renderer
+ self.result = ""
+
+ # Create caches
+ # Generate markers.
+ indent_found = False
+
+ start = pos = indent = offset = 0
+ length = len(self.src)
+
+ for pos, character in enumerate(self.src):
+ if not indent_found:
+ if isStrSpace(character):
+ indent += 1
+
+ if character == "\t":
+ offset += 4 - offset % 4
+ else:
+ offset += 1
+ continue
+ else:
+ indent_found = True
+
+ if character == "\n" or pos == length - 1:
+ if character != "\n":
+ pos += 1
+ self.bMarks.append(start)
+ self.eMarks.append(pos)
+ self.tShift.append(indent)
+ self.sCount.append(offset)
+ self.bsCount.append(0)
+
+ indent_found = False
+ indent = 0
+ offset = 0
+ start = pos + 1
+
+ # Push fake entry to simplify cache bounds checks
+ self.bMarks.append(length)
+ self.eMarks.append(length)
+ self.tShift.append(0)
+ self.sCount.append(0)
+ self.bsCount.append(0)
+
+ self.lineMax = len(self.bMarks) - 1 # don't count last fake line
+
+ # pre-check if code blocks are enabled, to speed up is_code_block method
+ self._code_enabled = "code" in self.md["block"].ruler.get_active_rules()
+
+ def __repr__(self) -> str:
+ return (
+ f"{self.__class__.__name__}"
+ f"(line={self.line},level={self.level},tokens={len(self.tokens)})"
+ )
+
+ def push(self, ttype: str, tag: str, nesting: Literal[-1, 0, 1]) -> Token:
+ """Push new token to "stream"."""
+ token = Token(ttype, tag, nesting)
+ token.block = True
+ if nesting < 0:
+ self.level -= 1 # closing tag
+ token.level = self.level
+ if nesting > 0:
+ self.level += 1 # opening tag
+ self.tokens.append(token)
+ return token
+
+ def isEmpty(self, line: int) -> bool:
+ """."""
+ return (self.bMarks[line] + self.tShift[line]) >= self.eMarks[line]
+
+ def skipEmptyLines(self, from_pos: int) -> int:
+ """."""
+ while from_pos < self.lineMax:
+ try:
+ if (self.bMarks[from_pos] + self.tShift[from_pos]) < self.eMarks[
+ from_pos
+ ]:
+ break
+ except IndexError:
+ pass
+ from_pos += 1
+ return from_pos
+
+ def skipSpaces(self, pos: int) -> int:
+ """Skip spaces from given position."""
+ while True:
+ try:
+ current = self.src[pos]
+ except IndexError:
+ break
+ if not isStrSpace(current):
+ break
+ pos += 1
+ return pos
+
+ def skipSpacesBack(self, pos: int, minimum: int) -> int:
+ """Skip spaces from given position in reverse."""
+ if pos <= minimum:
+ return pos
+ while pos > minimum:
+ pos -= 1
+ if not isStrSpace(self.src[pos]):
+ return pos + 1
+ return pos
+
+ def skipChars(self, pos: int, code: int) -> int:
+ """Skip character code from given position."""
+ while True:
+ try:
+ current = self.srcCharCode[pos]
+ except IndexError:
+ break
+ if current != code:
+ break
+ pos += 1
+ return pos
+
+ def skipCharsStr(self, pos: int, ch: str) -> int:
+ """Skip character string from given position."""
+ while True:
+ try:
+ current = self.src[pos]
+ except IndexError:
+ break
+ if current != ch:
+ break
+ pos += 1
+ return pos
+
+ def skipCharsBack(self, pos: int, code: int, minimum: int) -> int:
+ """Skip character code reverse from given position - 1."""
+ if pos <= minimum:
+ return pos
+ while pos > minimum:
+ pos -= 1
+ if code != self.srcCharCode[pos]:
+ return pos + 1
+ return pos
+
+ def skipCharsStrBack(self, pos: int, ch: str, minimum: int) -> int:
+ """Skip character string reverse from given position - 1."""
+ if pos <= minimum:
+ return pos
+ while pos > minimum:
+ pos -= 1
+ if ch != self.src[pos]:
+ return pos + 1
+ return pos
+
+ def getLines(self, begin: int, end: int, indent: int, keepLastLF: bool) -> str:
+ """Cut lines range from source."""
+ line = begin
+ if begin >= end:
+ return ""
+
+ queue = [""] * (end - begin)
+
+ i = 1
+ while line < end:
+ lineIndent = 0
+ lineStart = first = self.bMarks[line]
+ last = (
+ self.eMarks[line] + 1
+ if line + 1 < end or keepLastLF
+ else self.eMarks[line]
+ )
+
+ while (first < last) and (lineIndent < indent):
+ ch = self.src[first]
+ if isStrSpace(ch):
+ if ch == "\t":
+ lineIndent += 4 - (lineIndent + self.bsCount[line]) % 4
+ else:
+ lineIndent += 1
+ elif first - lineStart < self.tShift[line]:
+ lineIndent += 1
+ else:
+ break
+ first += 1
+
+ if lineIndent > indent:
+ # partially expanding tabs in code blocks, e.g '\t\tfoobar'
+ # with indent=2 becomes ' \tfoobar'
+ queue[i - 1] = (" " * (lineIndent - indent)) + self.src[first:last]
+ else:
+ queue[i - 1] = self.src[first:last]
+
+ line += 1
+ i += 1
+
+ return "".join(queue)
+
+ def is_code_block(self, line: int) -> bool:
+ """Check if line is a code block,
+ i.e. the code block rule is enabled and text is indented by more than 3 spaces.
+ """
+ return self._code_enabled and (self.sCount[line] - self.blkIndent) >= 4
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_block/table.py b/Scripts/Lib/site-packages/markdown_it/rules_block/table.py
new file mode 100644
index 0000000..c52553d
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_block/table.py
@@ -0,0 +1,250 @@
+# GFM table, https://github.github.com/gfm/#tables-extension-
+from __future__ import annotations
+
+import re
+
+from ..common.utils import charStrAt, isStrSpace
+from .state_block import StateBlock
+
+headerLineRe = re.compile(r"^:?-+:?$")
+enclosingPipesRe = re.compile(r"^\||\|$")
+
+# Limit the amount of empty autocompleted cells in a table,
+# see https://github.com/markdown-it/markdown-it/issues/1000,
+# Both pulldown-cmark and commonmark-hs limit the number of cells this way to ~200k.
+# We set it to 65k, which can expand user input by a factor of x370
+# (256x256 square is 1.8kB expanded into 650kB).
+MAX_AUTOCOMPLETED_CELLS = 0x10000
+
+
+def getLine(state: StateBlock, line: int) -> str:
+ pos = state.bMarks[line] + state.tShift[line]
+ maximum = state.eMarks[line]
+
+ # return state.src.substr(pos, max - pos)
+ return state.src[pos:maximum]
+
+
+def escapedSplit(string: str) -> list[str]:
+ result: list[str] = []
+ pos = 0
+ max = len(string)
+ isEscaped = False
+ lastPos = 0
+ current = ""
+ ch = charStrAt(string, pos)
+
+ while pos < max:
+ if ch == "|":
+ if not isEscaped:
+ # pipe separating cells, '|'
+ result.append(current + string[lastPos:pos])
+ current = ""
+ lastPos = pos + 1
+ else:
+ # escaped pipe, '\|'
+ current += string[lastPos : pos - 1]
+ lastPos = pos
+
+ isEscaped = ch == "\\"
+ pos += 1
+
+ ch = charStrAt(string, pos)
+
+ result.append(current + string[lastPos:])
+
+ return result
+
+
+def table(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ tbodyLines = None
+
+ # should have at least two lines
+ if startLine + 2 > endLine:
+ return False
+
+ nextLine = startLine + 1
+
+ if state.sCount[nextLine] < state.blkIndent:
+ return False
+
+ if state.is_code_block(nextLine):
+ return False
+
+ # first character of the second line should be '|', '-', ':',
+ # and no other characters are allowed but spaces;
+ # basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp
+
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ if pos >= state.eMarks[nextLine]:
+ return False
+ first_ch = state.src[pos]
+ pos += 1
+ if first_ch not in ("|", "-", ":"):
+ return False
+
+ if pos >= state.eMarks[nextLine]:
+ return False
+ second_ch = state.src[pos]
+ pos += 1
+ if second_ch not in ("|", "-", ":") and not isStrSpace(second_ch):
+ return False
+
+ # if first character is '-', then second character must not be a space
+ # (due to parsing ambiguity with list)
+ if first_ch == "-" and isStrSpace(second_ch):
+ return False
+
+ while pos < state.eMarks[nextLine]:
+ ch = state.src[pos]
+
+ if ch not in ("|", "-", ":") and not isStrSpace(ch):
+ return False
+
+ pos += 1
+
+ lineText = getLine(state, startLine + 1)
+
+ columns = lineText.split("|")
+ aligns = []
+ for i in range(len(columns)):
+ t = columns[i].strip()
+ if not t:
+ # allow empty columns before and after table, but not in between columns;
+ # e.g. allow ` |---| `, disallow ` ---||--- `
+ if i == 0 or i == len(columns) - 1:
+ continue
+ else:
+ return False
+
+ if not headerLineRe.search(t):
+ return False
+ if charStrAt(t, len(t) - 1) == ":":
+ aligns.append("center" if charStrAt(t, 0) == ":" else "right")
+ elif charStrAt(t, 0) == ":":
+ aligns.append("left")
+ else:
+ aligns.append("")
+
+ lineText = getLine(state, startLine).strip()
+ if "|" not in lineText:
+ return False
+ if state.is_code_block(startLine):
+ return False
+ columns = escapedSplit(lineText)
+ if columns and columns[0] == "":
+ columns.pop(0)
+ if columns and columns[-1] == "":
+ columns.pop()
+
+ # header row will define an amount of columns in the entire table,
+ # and align row should be exactly the same (the rest of the rows can differ)
+ columnCount = len(columns)
+ if columnCount == 0 or columnCount != len(aligns):
+ return False
+
+ if silent:
+ return True
+
+ oldParentType = state.parentType
+ state.parentType = "table"
+
+ # use 'blockquote' lists for termination because it's
+ # the most similar to tables
+ terminatorRules = state.md.block.ruler.getRules("blockquote")
+
+ token = state.push("table_open", "table", 1)
+ token.map = tableLines = [startLine, 0]
+
+ token = state.push("thead_open", "thead", 1)
+ token.map = [startLine, startLine + 1]
+
+ token = state.push("tr_open", "tr", 1)
+ token.map = [startLine, startLine + 1]
+
+ for i in range(len(columns)):
+ token = state.push("th_open", "th", 1)
+ if aligns[i]:
+ token.attrs = {"style": "text-align:" + aligns[i]}
+
+ token = state.push("inline", "", 0)
+ # note in markdown-it this map was removed in v12.0.0 however, we keep it,
+ # since it is helpful to propagate to children tokens
+ token.map = [startLine, startLine + 1]
+ token.content = columns[i].strip()
+ token.children = []
+
+ token = state.push("th_close", "th", -1)
+
+ token = state.push("tr_close", "tr", -1)
+ token = state.push("thead_close", "thead", -1)
+
+ autocompleted_cells = 0
+ nextLine = startLine + 2
+ while nextLine < endLine:
+ if state.sCount[nextLine] < state.blkIndent:
+ break
+
+ terminate = False
+ for i in range(len(terminatorRules)):
+ if terminatorRules[i](state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ if terminate:
+ break
+ lineText = getLine(state, nextLine).strip()
+ if not lineText:
+ break
+ if state.is_code_block(nextLine):
+ break
+ columns = escapedSplit(lineText)
+ if columns and columns[0] == "":
+ columns.pop(0)
+ if columns and columns[-1] == "":
+ columns.pop()
+
+ # note: autocomplete count can be negative if user specifies more columns than header,
+ # but that does not affect intended use (which is limiting expansion)
+ autocompleted_cells += columnCount - len(columns)
+ if autocompleted_cells > MAX_AUTOCOMPLETED_CELLS:
+ break
+
+ if nextLine == startLine + 2:
+ token = state.push("tbody_open", "tbody", 1)
+ token.map = tbodyLines = [startLine + 2, 0]
+
+ token = state.push("tr_open", "tr", 1)
+ token.map = [nextLine, nextLine + 1]
+
+ for i in range(columnCount):
+ token = state.push("td_open", "td", 1)
+ if aligns[i]:
+ token.attrs = {"style": "text-align:" + aligns[i]}
+
+ token = state.push("inline", "", 0)
+ # note in markdown-it this map was removed in v12.0.0 however, we keep it,
+ # since it is helpful to propagate to children tokens
+ token.map = [nextLine, nextLine + 1]
+ try:
+ token.content = columns[i].strip() if columns[i] else ""
+ except IndexError:
+ token.content = ""
+ token.children = []
+
+ token = state.push("td_close", "td", -1)
+
+ token = state.push("tr_close", "tr", -1)
+
+ nextLine += 1
+
+ if tbodyLines:
+ token = state.push("tbody_close", "tbody", -1)
+ tbodyLines[1] = nextLine
+
+ token = state.push("table_close", "table", -1)
+
+ tableLines[1] = nextLine
+ state.parentType = oldParentType
+ state.line = nextLine
+ return True
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__init__.py b/Scripts/Lib/site-packages/markdown_it/rules_core/__init__.py
new file mode 100644
index 0000000..e7d7753
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_core/__init__.py
@@ -0,0 +1,19 @@
+__all__ = (
+ "StateCore",
+ "block",
+ "inline",
+ "linkify",
+ "normalize",
+ "replace",
+ "smartquotes",
+ "text_join",
+)
+
+from .block import block
+from .inline import inline
+from .linkify import linkify
+from .normalize import normalize
+from .replacements import replace
+from .smartquotes import smartquotes
+from .state_core import StateCore
+from .text_join import text_join
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/__init__.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..6b23115
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/__init__.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/block.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/block.cpython-310.pyc
new file mode 100644
index 0000000..1c3a7f8
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/block.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/inline.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/inline.cpython-310.pyc
new file mode 100644
index 0000000..da1093d
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/inline.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/linkify.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/linkify.cpython-310.pyc
new file mode 100644
index 0000000..1e29ee8
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/linkify.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/normalize.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/normalize.cpython-310.pyc
new file mode 100644
index 0000000..874ceca
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/normalize.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/replacements.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/replacements.cpython-310.pyc
new file mode 100644
index 0000000..8aa7e95
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/replacements.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/smartquotes.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/smartquotes.cpython-310.pyc
new file mode 100644
index 0000000..7c975e2
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/smartquotes.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/state_core.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/state_core.cpython-310.pyc
new file mode 100644
index 0000000..efcbd27
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/state_core.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/text_join.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/text_join.cpython-310.pyc
new file mode 100644
index 0000000..6c8d41c
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_core/__pycache__/text_join.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/block.py b/Scripts/Lib/site-packages/markdown_it/rules_core/block.py
new file mode 100644
index 0000000..a6c3bb8
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_core/block.py
@@ -0,0 +1,13 @@
+from ..token import Token
+from .state_core import StateCore
+
+
+def block(state: StateCore) -> None:
+ if state.inlineMode:
+ token = Token("inline", "", 0)
+ token.content = state.src
+ token.map = [0, 1]
+ token.children = []
+ state.tokens.append(token)
+ else:
+ state.md.block.parse(state.src, state.md, state.env, state.tokens)
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/inline.py b/Scripts/Lib/site-packages/markdown_it/rules_core/inline.py
new file mode 100644
index 0000000..c3fd0b5
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_core/inline.py
@@ -0,0 +1,10 @@
+from .state_core import StateCore
+
+
+def inline(state: StateCore) -> None:
+ """Parse inlines"""
+ for token in state.tokens:
+ if token.type == "inline":
+ if token.children is None:
+ token.children = []
+ state.md.inline.parse(token.content, state.md, state.env, token.children)
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/linkify.py b/Scripts/Lib/site-packages/markdown_it/rules_core/linkify.py
new file mode 100644
index 0000000..efbc9d4
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_core/linkify.py
@@ -0,0 +1,149 @@
+from __future__ import annotations
+
+import re
+from typing import Protocol
+
+from ..common.utils import arrayReplaceAt, isLinkClose, isLinkOpen
+from ..token import Token
+from .state_core import StateCore
+
+HTTP_RE = re.compile(r"^http://")
+MAILTO_RE = re.compile(r"^mailto:")
+TEST_MAILTO_RE = re.compile(r"^mailto:", flags=re.IGNORECASE)
+
+
+def linkify(state: StateCore) -> None:
+ """Rule for identifying plain-text links."""
+ if not state.md.options.linkify:
+ return
+
+ if not state.md.linkify:
+ raise ModuleNotFoundError("Linkify enabled but not installed.")
+
+ for inline_token in state.tokens:
+ if inline_token.type != "inline" or not state.md.linkify.pretest(
+ inline_token.content
+ ):
+ continue
+
+ tokens = inline_token.children
+
+ htmlLinkLevel = 0
+
+ # We scan from the end, to keep position when new tags added.
+ # Use reversed logic in links start/end match
+ assert tokens is not None
+ i = len(tokens)
+ while i >= 1:
+ i -= 1
+ assert isinstance(tokens, list)
+ currentToken = tokens[i]
+
+ # Skip content of markdown links
+ if currentToken.type == "link_close":
+ i -= 1
+ while (
+ tokens[i].level != currentToken.level
+ and tokens[i].type != "link_open"
+ ):
+ i -= 1
+ continue
+
+ # Skip content of html tag links
+ if currentToken.type == "html_inline":
+ if isLinkOpen(currentToken.content) and htmlLinkLevel > 0:
+ htmlLinkLevel -= 1
+ if isLinkClose(currentToken.content):
+ htmlLinkLevel += 1
+ if htmlLinkLevel > 0:
+ continue
+
+ if currentToken.type == "text" and state.md.linkify.test(
+ currentToken.content
+ ):
+ text = currentToken.content
+ links: list[_LinkType] = state.md.linkify.match(text) or []
+
+ # Now split string to nodes
+ nodes = []
+ level = currentToken.level
+ lastPos = 0
+
+ # forbid escape sequence at the start of the string,
+ # this avoids http\://example.com/ from being linkified as
+ # http://example.com/
+ if (
+ links
+ and links[0].index == 0
+ and i > 0
+ and tokens[i - 1].type == "text_special"
+ ):
+ links = links[1:]
+
+ for link in links:
+ url = link.url
+ fullUrl = state.md.normalizeLink(url)
+ if not state.md.validateLink(fullUrl):
+ continue
+
+ urlText = link.text
+
+ # Linkifier might send raw hostnames like "example.com", where url
+ # starts with domain name. So we prepend http:// in those cases,
+ # and remove it afterwards.
+ if not link.schema:
+ urlText = HTTP_RE.sub(
+ "", state.md.normalizeLinkText("http://" + urlText)
+ )
+ elif link.schema == "mailto:" and TEST_MAILTO_RE.search(urlText):
+ urlText = MAILTO_RE.sub(
+ "", state.md.normalizeLinkText("mailto:" + urlText)
+ )
+ else:
+ urlText = state.md.normalizeLinkText(urlText)
+
+ pos = link.index
+
+ if pos > lastPos:
+ token = Token("text", "", 0)
+ token.content = text[lastPos:pos]
+ token.level = level
+ nodes.append(token)
+
+ token = Token("link_open", "a", 1)
+ token.attrs = {"href": fullUrl}
+ token.level = level
+ level += 1
+ token.markup = "linkify"
+ token.info = "auto"
+ nodes.append(token)
+
+ token = Token("text", "", 0)
+ token.content = urlText
+ token.level = level
+ nodes.append(token)
+
+ token = Token("link_close", "a", -1)
+ level -= 1
+ token.level = level
+ token.markup = "linkify"
+ token.info = "auto"
+ nodes.append(token)
+
+ lastPos = link.last_index
+
+ if lastPos < len(text):
+ token = Token("text", "", 0)
+ token.content = text[lastPos:]
+ token.level = level
+ nodes.append(token)
+
+ inline_token.children = tokens = arrayReplaceAt(tokens, i, nodes)
+
+
+class _LinkType(Protocol):
+ url: str
+ text: str
+ index: int
+ last_index: int
+ schema: str | None
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/normalize.py b/Scripts/Lib/site-packages/markdown_it/rules_core/normalize.py
new file mode 100644
index 0000000..3243924
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_core/normalize.py
@@ -0,0 +1,19 @@
+"""Normalize input string."""
+
+import re
+
+from .state_core import StateCore
+
+# https://spec.commonmark.org/0.29/#line-ending
+NEWLINES_RE = re.compile(r"\r\n?|\n")
+NULL_RE = re.compile(r"\0")
+
+
+def normalize(state: StateCore) -> None:
+ # Normalize newlines
+ string = NEWLINES_RE.sub("\n", state.src)
+
+ # Replace NULL characters
+ string = NULL_RE.sub("\ufffd", string)
+
+ state.src = string
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/replacements.py b/Scripts/Lib/site-packages/markdown_it/rules_core/replacements.py
new file mode 100644
index 0000000..bcc9980
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_core/replacements.py
@@ -0,0 +1,127 @@
+"""Simple typographic replacements
+
+* ``(c)``, ``(C)`` → ©
+* ``(tm)``, ``(TM)`` → ™
+* ``(r)``, ``(R)`` → ®
+* ``+-`` → ±
+* ``...`` → …
+* ``?....`` → ?..
+* ``!....`` → !..
+* ``????????`` → ???
+* ``!!!!!`` → !!!
+* ``,,,`` → ,
+* ``--`` → &ndash
+* ``---`` → &mdash
+"""
+
+from __future__ import annotations
+
+import logging
+import re
+
+from ..token import Token
+from .state_core import StateCore
+
+LOGGER = logging.getLogger(__name__)
+
+# TODO:
+# - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾
+# - multiplication 2 x 4 -> 2 × 4
+
+RARE_RE = re.compile(r"\+-|\.\.|\?\?\?\?|!!!!|,,|--")
+
+# Workaround for phantomjs - need regex without /g flag,
+# or root check will fail every second time
+# SCOPED_ABBR_TEST_RE = r"\((c|tm|r)\)"
+
+SCOPED_ABBR_RE = re.compile(r"\((c|tm|r)\)", flags=re.IGNORECASE)
+
+PLUS_MINUS_RE = re.compile(r"\+-")
+
+ELLIPSIS_RE = re.compile(r"\.{2,}")
+
+ELLIPSIS_QUESTION_EXCLAMATION_RE = re.compile(r"([?!])…")
+
+QUESTION_EXCLAMATION_RE = re.compile(r"([?!]){4,}")
+
+COMMA_RE = re.compile(r",{2,}")
+
+EM_DASH_RE = re.compile(r"(^|[^-])---(?=[^-]|$)", flags=re.MULTILINE)
+
+EN_DASH_RE = re.compile(r"(^|\s)--(?=\s|$)", flags=re.MULTILINE)
+
+EN_DASH_INDENT_RE = re.compile(r"(^|[^-\s])--(?=[^-\s]|$)", flags=re.MULTILINE)
+
+
+SCOPED_ABBR = {"c": "©", "r": "®", "tm": "™"}
+
+
+def replaceFn(match: re.Match[str]) -> str:
+ return SCOPED_ABBR[match.group(1).lower()]
+
+
+def replace_scoped(inlineTokens: list[Token]) -> None:
+ inside_autolink = 0
+
+ for token in inlineTokens:
+ if token.type == "text" and not inside_autolink:
+ token.content = SCOPED_ABBR_RE.sub(replaceFn, token.content)
+
+ if token.type == "link_open" and token.info == "auto":
+ inside_autolink -= 1
+
+ if token.type == "link_close" and token.info == "auto":
+ inside_autolink += 1
+
+
+def replace_rare(inlineTokens: list[Token]) -> None:
+ inside_autolink = 0
+
+ for token in inlineTokens:
+ if (
+ token.type == "text"
+ and (not inside_autolink)
+ and RARE_RE.search(token.content)
+ ):
+ # +- -> ±
+ token.content = PLUS_MINUS_RE.sub("±", token.content)
+
+ # .., ..., ....... -> …
+ token.content = ELLIPSIS_RE.sub("…", token.content)
+
+ # but ?..... & !..... -> ?.. & !..
+ token.content = ELLIPSIS_QUESTION_EXCLAMATION_RE.sub("\\1..", token.content)
+ token.content = QUESTION_EXCLAMATION_RE.sub("\\1\\1\\1", token.content)
+
+ # ,, ,,, ,,,, -> ,
+ token.content = COMMA_RE.sub(",", token.content)
+
+ # em-dash
+ token.content = EM_DASH_RE.sub("\\1\u2014", token.content)
+
+ # en-dash
+ token.content = EN_DASH_RE.sub("\\1\u2013", token.content)
+ token.content = EN_DASH_INDENT_RE.sub("\\1\u2013", token.content)
+
+ if token.type == "link_open" and token.info == "auto":
+ inside_autolink -= 1
+
+ if token.type == "link_close" and token.info == "auto":
+ inside_autolink += 1
+
+
+def replace(state: StateCore) -> None:
+ if not state.md.options.typographer:
+ return
+
+ for token in state.tokens:
+ if token.type != "inline":
+ continue
+ if token.children is None:
+ continue
+
+ if SCOPED_ABBR_RE.search(token.content):
+ replace_scoped(token.children)
+
+ if RARE_RE.search(token.content):
+ replace_rare(token.children)
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/smartquotes.py b/Scripts/Lib/site-packages/markdown_it/rules_core/smartquotes.py
new file mode 100644
index 0000000..f9b8b45
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_core/smartquotes.py
@@ -0,0 +1,202 @@
+"""Convert straight quotation marks to typographic ones"""
+
+from __future__ import annotations
+
+import re
+from typing import Any
+
+from ..common.utils import charCodeAt, isMdAsciiPunct, isPunctChar, isWhiteSpace
+from ..token import Token
+from .state_core import StateCore
+
+QUOTE_TEST_RE = re.compile(r"['\"]")
+QUOTE_RE = re.compile(r"['\"]")
+APOSTROPHE = "\u2019" # ’
+
+
+def replaceAt(string: str, index: int, ch: str) -> str:
+ # When the index is negative, the behavior is different from the js version.
+ # But basically, the index will not be negative.
+ assert index >= 0
+ return string[:index] + ch + string[index + 1 :]
+
+
+def process_inlines(tokens: list[Token], state: StateCore) -> None:
+ stack: list[dict[str, Any]] = []
+
+ for i, token in enumerate(tokens):
+ thisLevel = token.level
+
+ j = 0
+ for j in range(len(stack))[::-1]:
+ if stack[j]["level"] <= thisLevel:
+ break
+ else:
+ # When the loop is terminated without a "break".
+ # Subtract 1 to get the same index as the js version.
+ j -= 1
+
+ stack = stack[: j + 1]
+
+ if token.type != "text":
+ continue
+
+ text = token.content
+ pos = 0
+ maximum = len(text)
+
+ while pos < maximum:
+ goto_outer = False
+ lastIndex = pos
+ t = QUOTE_RE.search(text[lastIndex:])
+ if not t:
+ break
+
+ canOpen = canClose = True
+ pos = t.start(0) + lastIndex + 1
+ isSingle = t.group(0) == "'"
+
+ # Find previous character,
+ # default to space if it's the beginning of the line
+ lastChar: None | int = 0x20
+
+ if t.start(0) + lastIndex - 1 >= 0:
+ lastChar = charCodeAt(text, t.start(0) + lastIndex - 1)
+ else:
+ for j in range(i)[::-1]:
+ if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak":
+ break
+ # should skip all tokens except 'text', 'html_inline' or 'code_inline'
+ if not tokens[j].content:
+ continue
+
+ lastChar = charCodeAt(tokens[j].content, len(tokens[j].content) - 1)
+ break
+
+ # Find next character,
+ # default to space if it's the end of the line
+ nextChar: None | int = 0x20
+
+ if pos < maximum:
+ nextChar = charCodeAt(text, pos)
+ else:
+ for j in range(i + 1, len(tokens)):
+ # nextChar defaults to 0x20
+ if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak":
+ break
+ # should skip all tokens except 'text', 'html_inline' or 'code_inline'
+ if not tokens[j].content:
+ continue
+
+ nextChar = charCodeAt(tokens[j].content, 0)
+ break
+
+ isLastPunctChar = lastChar is not None and (
+ isMdAsciiPunct(lastChar) or isPunctChar(chr(lastChar))
+ )
+ isNextPunctChar = nextChar is not None and (
+ isMdAsciiPunct(nextChar) or isPunctChar(chr(nextChar))
+ )
+
+ isLastWhiteSpace = lastChar is not None and isWhiteSpace(lastChar)
+ isNextWhiteSpace = nextChar is not None and isWhiteSpace(nextChar)
+
+ if isNextWhiteSpace: # noqa: SIM114
+ canOpen = False
+ elif isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar):
+ canOpen = False
+
+ if isLastWhiteSpace: # noqa: SIM114
+ canClose = False
+ elif isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar):
+ canClose = False
+
+ if nextChar == 0x22 and t.group(0) == '"': # 0x22: " # noqa: SIM102
+ if (
+ lastChar is not None and lastChar >= 0x30 and lastChar <= 0x39
+ ): # 0x30: 0, 0x39: 9
+ # special case: 1"" - count first quote as an inch
+ canClose = canOpen = False
+
+ if canOpen and canClose:
+ # Replace quotes in the middle of punctuation sequence, but not
+ # in the middle of the words, i.e.:
+ #
+ # 1. foo " bar " baz - not replaced
+ # 2. foo-"-bar-"-baz - replaced
+ # 3. foo"bar"baz - not replaced
+ canOpen = isLastPunctChar
+ canClose = isNextPunctChar
+
+ if not canOpen and not canClose:
+ # middle of word
+ if isSingle:
+ token.content = replaceAt(
+ token.content, t.start(0) + lastIndex, APOSTROPHE
+ )
+ continue
+
+ if canClose:
+ # this could be a closing quote, rewind the stack to get a match
+ for j in range(len(stack))[::-1]:
+ item = stack[j]
+ if stack[j]["level"] < thisLevel:
+ break
+ if item["single"] == isSingle and stack[j]["level"] == thisLevel:
+ item = stack[j]
+
+ if isSingle:
+ openQuote = state.md.options.quotes[2]
+ closeQuote = state.md.options.quotes[3]
+ else:
+ openQuote = state.md.options.quotes[0]
+ closeQuote = state.md.options.quotes[1]
+
+ # replace token.content *before* tokens[item.token].content,
+ # because, if they are pointing at the same token, replaceAt
+ # could mess up indices when quote length != 1
+ token.content = replaceAt(
+ token.content, t.start(0) + lastIndex, closeQuote
+ )
+ tokens[item["token"]].content = replaceAt(
+ tokens[item["token"]].content, item["pos"], openQuote
+ )
+
+ pos += len(closeQuote) - 1
+ if item["token"] == i:
+ pos += len(openQuote) - 1
+
+ text = token.content
+ maximum = len(text)
+
+ stack = stack[:j]
+ goto_outer = True
+ break
+ if goto_outer:
+ goto_outer = False
+ continue
+
+ if canOpen:
+ stack.append(
+ {
+ "token": i,
+ "pos": t.start(0) + lastIndex,
+ "single": isSingle,
+ "level": thisLevel,
+ }
+ )
+ elif canClose and isSingle:
+ token.content = replaceAt(
+ token.content, t.start(0) + lastIndex, APOSTROPHE
+ )
+
+
+def smartquotes(state: StateCore) -> None:
+ if not state.md.options.typographer:
+ return
+
+ for token in state.tokens:
+ if token.type != "inline" or not QUOTE_RE.search(token.content):
+ continue
+ if token.children is not None:
+ process_inlines(token.children, state)
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/state_core.py b/Scripts/Lib/site-packages/markdown_it/rules_core/state_core.py
new file mode 100644
index 0000000..a938041
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_core/state_core.py
@@ -0,0 +1,25 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from ..ruler import StateBase
+from ..token import Token
+from ..utils import EnvType
+
+if TYPE_CHECKING:
+ from markdown_it import MarkdownIt
+
+
+class StateCore(StateBase):
+ def __init__(
+ self,
+ src: str,
+ md: MarkdownIt,
+ env: EnvType,
+ tokens: list[Token] | None = None,
+ ) -> None:
+ self.src = src
+ self.md = md # link to parser instance
+ self.env = env
+ self.tokens: list[Token] = tokens or []
+ self.inlineMode = False
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_core/text_join.py b/Scripts/Lib/site-packages/markdown_it/rules_core/text_join.py
new file mode 100644
index 0000000..5379f6d
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_core/text_join.py
@@ -0,0 +1,35 @@
+"""Join raw text tokens with the rest of the text
+
+This is set as a separate rule to provide an opportunity for plugins
+to run text replacements after text join, but before escape join.
+
+For example, `\\:)` shouldn't be replaced with an emoji.
+"""
+
+from __future__ import annotations
+
+from ..token import Token
+from .state_core import StateCore
+
+
+def text_join(state: StateCore) -> None:
+ """Join raw text for escape sequences (`text_special`) tokens with the rest of the text"""
+
+ for inline_token in state.tokens[:]:
+ if inline_token.type != "inline":
+ continue
+
+ # convert text_special to text and join all adjacent text nodes
+ new_tokens: list[Token] = []
+ for child_token in inline_token.children or []:
+ if child_token.type == "text_special":
+ child_token.type = "text"
+ if (
+ child_token.type == "text"
+ and new_tokens
+ and new_tokens[-1].type == "text"
+ ):
+ new_tokens[-1].content += child_token.content
+ else:
+ new_tokens.append(child_token)
+ inline_token.children = new_tokens
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__init__.py b/Scripts/Lib/site-packages/markdown_it/rules_inline/__init__.py
new file mode 100644
index 0000000..d82ef8f
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_inline/__init__.py
@@ -0,0 +1,31 @@
+__all__ = (
+ "StateInline",
+ "autolink",
+ "backtick",
+ "emphasis",
+ "entity",
+ "escape",
+ "fragments_join",
+ "html_inline",
+ "image",
+ "link",
+ "link_pairs",
+ "linkify",
+ "newline",
+ "strikethrough",
+ "text",
+)
+from . import emphasis, strikethrough
+from .autolink import autolink
+from .backticks import backtick
+from .balance_pairs import link_pairs
+from .entity import entity
+from .escape import escape
+from .fragments_join import fragments_join
+from .html_inline import html_inline
+from .image import image
+from .link import link
+from .linkify import linkify
+from .newline import newline
+from .state_inline import StateInline
+from .text import text
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/__init__.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..2739fd0
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/__init__.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/autolink.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/autolink.cpython-310.pyc
new file mode 100644
index 0000000..beeac81
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/autolink.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/backticks.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/backticks.cpython-310.pyc
new file mode 100644
index 0000000..ca1bbf2
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/backticks.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-310.pyc
new file mode 100644
index 0000000..75b8e52
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/emphasis.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/emphasis.cpython-310.pyc
new file mode 100644
index 0000000..fdb4b74
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/emphasis.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/entity.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/entity.cpython-310.pyc
new file mode 100644
index 0000000..e487923
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/entity.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/escape.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/escape.cpython-310.pyc
new file mode 100644
index 0000000..376f877
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/escape.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/fragments_join.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/fragments_join.cpython-310.pyc
new file mode 100644
index 0000000..f9e2acf
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/fragments_join.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/html_inline.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/html_inline.cpython-310.pyc
new file mode 100644
index 0000000..9298365
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/html_inline.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/image.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/image.cpython-310.pyc
new file mode 100644
index 0000000..7f15f3c
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/image.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/link.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/link.cpython-310.pyc
new file mode 100644
index 0000000..02d4b74
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/link.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/linkify.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/linkify.cpython-310.pyc
new file mode 100644
index 0000000..9fe7a54
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/linkify.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/newline.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/newline.cpython-310.pyc
new file mode 100644
index 0000000..53b19a4
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/newline.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/state_inline.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/state_inline.cpython-310.pyc
new file mode 100644
index 0000000..7adc14b
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/state_inline.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/strikethrough.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/strikethrough.cpython-310.pyc
new file mode 100644
index 0000000..1702304
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/strikethrough.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/text.cpython-310.pyc b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/text.cpython-310.pyc
new file mode 100644
index 0000000..17f7b73
Binary files /dev/null and b/Scripts/Lib/site-packages/markdown_it/rules_inline/__pycache__/text.cpython-310.pyc differ
diff --git a/Scripts/Lib/site-packages/markdown_it/rules_inline/autolink.py b/Scripts/Lib/site-packages/markdown_it/rules_inline/autolink.py
new file mode 100644
index 0000000..6546e25
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it/rules_inline/autolink.py
@@ -0,0 +1,77 @@
+# Process autolinks '
)."""
+ breaks: bool
+ """Convert newlines in paragraphs into
."""
+ langPrefix: str
+ """CSS language prefix for fenced blocks."""
+ highlight: Callable[[str, str, str], str] | None
+ """Highlighter function: (content, lang, attrs) -> str."""
+ store_labels: NotRequired[bool]
+ """Store link label in link/image token's metadata (under Token.meta['label']).
+
+ This is a Python only option, and is intended for the use of round-trip parsing.
+ """
+
+
+class PresetType(TypedDict):
+ """Preset configuration for markdown-it."""
+
+ options: OptionsType
+ """Options for parsing."""
+ components: MutableMapping[str, MutableMapping[str, list[str]]]
+ """Components for parsing and rendering."""
+
+
+class OptionsDict(MutableMappingABC): # type: ignore
+ """A dictionary, with attribute access to core markdownit configuration options."""
+
+ # Note: ideally we would probably just remove attribute access entirely,
+ # but we keep it for backwards compatibility.
+
+ def __init__(self, options: OptionsType) -> None:
+ self._options = cast(OptionsType, dict(options))
+
+ def __getitem__(self, key: str) -> Any:
+ return self._options[key] # type: ignore[literal-required]
+
+ def __setitem__(self, key: str, value: Any) -> None:
+ self._options[key] = value # type: ignore[literal-required]
+
+ def __delitem__(self, key: str) -> None:
+ del self._options[key] # type: ignore
+
+ def __iter__(self) -> Iterable[str]: # type: ignore
+ return iter(self._options)
+
+ def __len__(self) -> int:
+ return len(self._options)
+
+ def __repr__(self) -> str:
+ return repr(self._options)
+
+ def __str__(self) -> str:
+ return str(self._options)
+
+ @property
+ def maxNesting(self) -> int:
+ """Internal protection, recursion limit."""
+ return self._options["maxNesting"]
+
+ @maxNesting.setter
+ def maxNesting(self, value: int) -> None:
+ self._options["maxNesting"] = value
+
+ @property
+ def html(self) -> bool:
+ """Enable HTML tags in source."""
+ return self._options["html"]
+
+ @html.setter
+ def html(self, value: bool) -> None:
+ self._options["html"] = value
+
+ @property
+ def linkify(self) -> bool:
+ """Enable autoconversion of URL-like texts to links."""
+ return self._options["linkify"]
+
+ @linkify.setter
+ def linkify(self, value: bool) -> None:
+ self._options["linkify"] = value
+
+ @property
+ def typographer(self) -> bool:
+ """Enable smartquotes and replacements."""
+ return self._options["typographer"]
+
+ @typographer.setter
+ def typographer(self, value: bool) -> None:
+ self._options["typographer"] = value
+
+ @property
+ def quotes(self) -> str:
+ """Quote characters."""
+ return self._options["quotes"]
+
+ @quotes.setter
+ def quotes(self, value: str) -> None:
+ self._options["quotes"] = value
+
+ @property
+ def xhtmlOut(self) -> bool:
+ """Use '/' to close single tags (
)."""
+ return self._options["xhtmlOut"]
+
+ @xhtmlOut.setter
+ def xhtmlOut(self, value: bool) -> None:
+ self._options["xhtmlOut"] = value
+
+ @property
+ def breaks(self) -> bool:
+ """Convert newlines in paragraphs into
."""
+ return self._options["breaks"]
+
+ @breaks.setter
+ def breaks(self, value: bool) -> None:
+ self._options["breaks"] = value
+
+ @property
+ def langPrefix(self) -> str:
+ """CSS language prefix for fenced blocks."""
+ return self._options["langPrefix"]
+
+ @langPrefix.setter
+ def langPrefix(self, value: str) -> None:
+ self._options["langPrefix"] = value
+
+ @property
+ def highlight(self) -> Callable[[str, str, str], str] | None:
+ """Highlighter function: (content, langName, langAttrs) -> escaped HTML."""
+ return self._options["highlight"]
+
+ @highlight.setter
+ def highlight(self, value: Callable[[str, str, str], str] | None) -> None:
+ self._options["highlight"] = value
+
+
+def read_fixture_file(path: str | Path) -> list[list[Any]]:
+ text = Path(path).read_text(encoding="utf-8")
+ tests = []
+ section = 0
+ last_pos = 0
+ lines = text.splitlines(keepends=True)
+ for i in range(len(lines)):
+ if lines[i].rstrip() == ".":
+ if section == 0:
+ tests.append([i, lines[i - 1].strip()])
+ section = 1
+ elif section == 1:
+ tests[-1].append("".join(lines[last_pos + 1 : i]))
+ section = 2
+ elif section == 2:
+ tests[-1].append("".join(lines[last_pos + 1 : i]))
+ section = 0
+
+ last_pos = i
+ return tests
diff --git a/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/INSTALLER b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/METADATA b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/METADATA
new file mode 100644
index 0000000..0f2b466
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/METADATA
@@ -0,0 +1,219 @@
+Metadata-Version: 2.4
+Name: markdown-it-py
+Version: 4.0.0
+Summary: Python port of markdown-it. Markdown parsing, done right!
+Keywords: markdown,lexer,parser,commonmark,markdown-it
+Author-email: Chris Sewell
+
Example
+
+
+
+Batch:
+
+ $ markdown-it README.md README.footer.md > index.html
+
+```
+
+## References / Thanks
+
+Big thanks to the authors of [markdown-it]:
+
+- Alex Kocharin [github/rlidwka](https://github.com/rlidwka)
+- Vitaly Puzrin [github/puzrin](https://github.com/puzrin)
+
+Also [John MacFarlane](https://github.com/jgm) for his work on the CommonMark spec and reference implementations.
+
+[github-ci]: https://github.com/executablebooks/markdown-it-py/actions/workflows/tests.yml/badge.svg?branch=master
+[github-link]: https://github.com/executablebooks/markdown-it-py
+[pypi-badge]: https://img.shields.io/pypi/v/markdown-it-py.svg
+[pypi-link]: https://pypi.org/project/markdown-it-py
+[conda-badge]: https://anaconda.org/conda-forge/markdown-it-py/badges/version.svg
+[conda-link]: https://anaconda.org/conda-forge/markdown-it-py
+[codecov-badge]: https://codecov.io/gh/executablebooks/markdown-it-py/branch/master/graph/badge.svg
+[codecov-link]: https://codecov.io/gh/executablebooks/markdown-it-py
+[install-badge]: https://img.shields.io/pypi/dw/markdown-it-py?label=pypi%20installs
+[install-link]: https://pypistats.org/packages/markdown-it-py
+
+[CommonMark spec]: http://spec.commonmark.org/
+[markdown-it]: https://github.com/markdown-it/markdown-it
+[markdown-it-readme]: https://github.com/markdown-it/markdown-it/blob/master/README.md
+[md-security]: https://markdown-it-py.readthedocs.io/en/latest/security.html
+[md-performance]: https://markdown-it-py.readthedocs.io/en/latest/performance.html
+[md-plugins]: https://markdown-it-py.readthedocs.io/en/latest/plugins.html
+
diff --git a/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/RECORD b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/RECORD
new file mode 100644
index 0000000..2e54ade
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/RECORD
@@ -0,0 +1,142 @@
+../../Scripts/markdown-it.exe,sha256=1YhP5O2owmiW1J-uaeKDxMexuBy6SdXo7d1wATnwkjU,106400
+markdown_it/__init__.py,sha256=R7fMvDxageYJ4Q6doBcimogy1ctcV1eBuCFu5Pr8bbA,114
+markdown_it/__pycache__/__init__.cpython-310.pyc,,
+markdown_it/__pycache__/_compat.cpython-310.pyc,,
+markdown_it/__pycache__/_punycode.cpython-310.pyc,,
+markdown_it/__pycache__/main.cpython-310.pyc,,
+markdown_it/__pycache__/parser_block.cpython-310.pyc,,
+markdown_it/__pycache__/parser_core.cpython-310.pyc,,
+markdown_it/__pycache__/parser_inline.cpython-310.pyc,,
+markdown_it/__pycache__/renderer.cpython-310.pyc,,
+markdown_it/__pycache__/ruler.cpython-310.pyc,,
+markdown_it/__pycache__/token.cpython-310.pyc,,
+markdown_it/__pycache__/tree.cpython-310.pyc,,
+markdown_it/__pycache__/utils.cpython-310.pyc,,
+markdown_it/_compat.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35
+markdown_it/_punycode.py,sha256=JvSOZJ4VKr58z7unFGM0KhfTxqHMk2w8gglxae2QszM,2373
+markdown_it/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+markdown_it/cli/__pycache__/__init__.cpython-310.pyc,,
+markdown_it/cli/__pycache__/parse.cpython-310.pyc,,
+markdown_it/cli/parse.py,sha256=Un3N7fyGHhZAQouGVnRx-WZcpKwEK2OF08rzVAEBie8,2881
+markdown_it/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+markdown_it/common/__pycache__/__init__.cpython-310.pyc,,
+markdown_it/common/__pycache__/entities.cpython-310.pyc,,
+markdown_it/common/__pycache__/html_blocks.cpython-310.pyc,,
+markdown_it/common/__pycache__/html_re.cpython-310.pyc,,
+markdown_it/common/__pycache__/normalize_url.cpython-310.pyc,,
+markdown_it/common/__pycache__/utils.cpython-310.pyc,,
+markdown_it/common/entities.py,sha256=EYRCmUL7ZU1FRGLSXQlPx356lY8EUBdFyx96eSGc6d0,157
+markdown_it/common/html_blocks.py,sha256=QXbUDMoN9lXLgYFk2DBYllnLiFukL6dHn2X98Y6Wews,986
+markdown_it/common/html_re.py,sha256=FggAEv9IL8gHQqsGTkHcf333rTojwG0DQJMH9oVu0fU,926
+markdown_it/common/normalize_url.py,sha256=avOXnLd9xw5jU1q5PLftjAM9pvGx8l9QDEkmZSyrMgg,2568
+markdown_it/common/utils.py,sha256=pMgvMOE3ZW-BdJ7HfuzlXNKyD1Ivk7jHErc2J_B8J5M,8734
+markdown_it/helpers/__init__.py,sha256=YH2z7dS0WUc_9l51MWPvrLtFoBPh4JLGw58OuhGRCK0,253
+markdown_it/helpers/__pycache__/__init__.cpython-310.pyc,,
+markdown_it/helpers/__pycache__/parse_link_destination.cpython-310.pyc,,
+markdown_it/helpers/__pycache__/parse_link_label.cpython-310.pyc,,
+markdown_it/helpers/__pycache__/parse_link_title.cpython-310.pyc,,
+markdown_it/helpers/parse_link_destination.py,sha256=u-xxWVP3g1s7C1bQuQItiYyDrYoYHJzXaZXPgr-o6mY,1906
+markdown_it/helpers/parse_link_label.py,sha256=PIHG6ZMm3BUw0a2m17lCGqNrl3vaz911tuoGviWD3I4,1037
+markdown_it/helpers/parse_link_title.py,sha256=jkLoYQMKNeX9bvWQHkaSroiEo27HylkEUNmj8xBRlp4,2273
+markdown_it/main.py,sha256=vzuT23LJyKrPKNyHKKAbOHkNWpwIldOGUM-IGsv2DHM,12732
+markdown_it/parser_block.py,sha256=-MyugXB63Te71s4NcSQZiK5bE6BHkdFyZv_bviuatdI,3939
+markdown_it/parser_core.py,sha256=SRmJjqe8dC6GWzEARpWba59cBmxjCr3Gsg8h29O8sQk,1016
+markdown_it/parser_inline.py,sha256=y0jCig8CJxQO7hBz0ZY3sGvPlAKTohOwIgaqnlSaS5A,5024
+markdown_it/port.yaml,sha256=jt_rdwOnfocOV5nc35revTybAAQMIp_-1fla_527sVE,2447
+markdown_it/presets/__init__.py,sha256=22vFtwJEY7iqFRtgVZ-pJthcetfpr1Oig8XOF9x1328,970
+markdown_it/presets/__pycache__/__init__.cpython-310.pyc,,
+markdown_it/presets/__pycache__/commonmark.cpython-310.pyc,,
+markdown_it/presets/__pycache__/default.cpython-310.pyc,,
+markdown_it/presets/__pycache__/zero.cpython-310.pyc,,
+markdown_it/presets/commonmark.py,sha256=ygfb0R7WQ_ZoyQP3df-B0EnYMqNXCVOSw9SAdMjsGow,2869
+markdown_it/presets/default.py,sha256=FfKVUI0HH3M-_qy6RwotLStdC4PAaAxE7Dq0_KQtRtc,1811
+markdown_it/presets/zero.py,sha256=okXWTBEI-2nmwx5XKeCjxInRf65oC11gahtRl-QNtHM,2113
+markdown_it/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26
+markdown_it/renderer.py,sha256=Lzr0glqd5oxFL10DOfjjW8kg4Gp41idQ4viEQaE47oA,9947
+markdown_it/ruler.py,sha256=eMAtWGRAfSM33aiJed0k5923BEkuMVsMq1ct8vU-ql4,9142
+markdown_it/rules_block/__init__.py,sha256=SQpg0ocmsHeILPAWRHhzgLgJMKIcNkQyELH13o_6Ktc,553
+markdown_it/rules_block/__pycache__/__init__.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/blockquote.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/code.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/fence.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/heading.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/hr.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/html_block.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/lheading.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/list.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/paragraph.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/reference.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/state_block.cpython-310.pyc,,
+markdown_it/rules_block/__pycache__/table.cpython-310.pyc,,
+markdown_it/rules_block/blockquote.py,sha256=7uymS36dcrned3DsIaRcqcbFU1NlymhvsZpEXTD3_n8,8887
+markdown_it/rules_block/code.py,sha256=iTAxv0U1-MDhz88M1m1pi2vzOhEMSEROsXMo2Qq--kU,860
+markdown_it/rules_block/fence.py,sha256=BJgU-PqZ4vAlCqGcrc8UtdLpJJyMeRWN-G-Op-zxrMc,2537
+markdown_it/rules_block/heading.py,sha256=4Lh15rwoVsQjE1hVhpbhidQ0k9xKHihgjAeYSbwgO5k,1745
+markdown_it/rules_block/hr.py,sha256=QCoY5kImaQRvF7PyP8OoWft6A8JVH1v6MN-0HR9Ikpg,1227
+markdown_it/rules_block/html_block.py,sha256=wA8pb34LtZr1BkIATgGKQBIGX5jQNOkwZl9UGEqvb5M,2721
+markdown_it/rules_block/lheading.py,sha256=fWoEuUo7S2svr5UMKmyQMkh0hheYAHg2gMM266Mogs4,2625
+markdown_it/rules_block/list.py,sha256=gIodkAJFyOIyKCZCj5lAlL7jIj5kAzrDb-K-2MFNplY,9668
+markdown_it/rules_block/paragraph.py,sha256=9pmCwA7eMu4LBdV4fWKzC4EdwaOoaGw2kfeYSQiLye8,1819
+markdown_it/rules_block/reference.py,sha256=ue1qZbUaUP0GIvwTjh6nD1UtCij8uwsIMuYW1xBkckc,6983
+markdown_it/rules_block/state_block.py,sha256=HowsQyy5hGUibH4HRZWKfLIlXeDUnuWL7kpF0-rSwoM,8422
+markdown_it/rules_block/table.py,sha256=8nMd9ONGOffER7BXmc9kbbhxkLjtpX79dVLR0iatGnM,7682
+markdown_it/rules_core/__init__.py,sha256=QFGBe9TUjnRQJDU7xY4SQYpxyTHNwg8beTSwXpNGRjE,394
+markdown_it/rules_core/__pycache__/__init__.cpython-310.pyc,,
+markdown_it/rules_core/__pycache__/block.cpython-310.pyc,,
+markdown_it/rules_core/__pycache__/inline.cpython-310.pyc,,
+markdown_it/rules_core/__pycache__/linkify.cpython-310.pyc,,
+markdown_it/rules_core/__pycache__/normalize.cpython-310.pyc,,
+markdown_it/rules_core/__pycache__/replacements.cpython-310.pyc,,
+markdown_it/rules_core/__pycache__/smartquotes.cpython-310.pyc,,
+markdown_it/rules_core/__pycache__/state_core.cpython-310.pyc,,
+markdown_it/rules_core/__pycache__/text_join.cpython-310.pyc,,
+markdown_it/rules_core/block.py,sha256=0_JY1CUy-H2OooFtIEZAACtuoGUMohgxo4Z6A_UinSg,372
+markdown_it/rules_core/inline.py,sha256=9oWmeBhJHE7x47oJcN9yp6UsAZtrEY_A-VmfoMvKld4,325
+markdown_it/rules_core/linkify.py,sha256=mjQqpk_lHLh2Nxw4UFaLxa47Fgi-OHnmDamlgXnhmv0,5141
+markdown_it/rules_core/normalize.py,sha256=AJm4femtFJ_QBnM0dzh0UNqTTJk9K6KMtwRPaioZFqM,403
+markdown_it/rules_core/replacements.py,sha256=CH75mie-tdzdLKQtMBuCTcXAl1ijegdZGfbV_Vk7st0,3471
+markdown_it/rules_core/smartquotes.py,sha256=izK9fSyuTzA-zAUGkRkz9KwwCQWo40iRqcCKqOhFbEE,7443
+markdown_it/rules_core/state_core.py,sha256=HqWZCUr5fW7xG6jeQZDdO0hE9hxxyl3_-bawgOy57HY,570
+markdown_it/rules_core/text_join.py,sha256=rLXxNuLh_es5RvH31GsXi7en8bMNO9UJ5nbJMDBPltY,1173
+markdown_it/rules_inline/__init__.py,sha256=qqHZk6-YE8Rc12q6PxvVKBaxv2wmZeeo45H1XMR_Vxs,696
+markdown_it/rules_inline/__pycache__/__init__.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/autolink.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/backticks.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/balance_pairs.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/emphasis.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/entity.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/escape.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/fragments_join.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/html_inline.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/image.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/link.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/linkify.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/newline.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/state_inline.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/strikethrough.cpython-310.pyc,,
+markdown_it/rules_inline/__pycache__/text.cpython-310.pyc,,
+markdown_it/rules_inline/autolink.py,sha256=pPoqJY8i99VtFn7KgUzMackMeq1hytzioVvWs-VQPRo,2065
+markdown_it/rules_inline/backticks.py,sha256=J7bezjjNxiXlKqvHc0fJkHZwH7-2nBsXVjcKydk8E4M,2037
+markdown_it/rules_inline/balance_pairs.py,sha256=5zgBiGidqdiWmt7Io_cuZOYh5EFEfXrYRce8RXg5m7o,4852
+markdown_it/rules_inline/emphasis.py,sha256=7aDLZx0Jlekuvbu3uEUTDhJp00Z0Pj6g4C3-VLhI8Co,3123
+markdown_it/rules_inline/entity.py,sha256=CE8AIGMi5isEa24RNseo0wRmTTaj5YLbgTFdDmBesAU,1651
+markdown_it/rules_inline/escape.py,sha256=KGulwrP5FnqZM7GXY8lf7pyVv0YkR59taZDeHb5cmKg,1659
+markdown_it/rules_inline/fragments_join.py,sha256=_3JbwWYJz74gRHeZk6T8edVJT2IVSsi7FfmJJlieQlA,1493
+markdown_it/rules_inline/html_inline.py,sha256=SBg6HR0HRqCdrkkec0dfOYuQdAqyfeLRFLeQggtgjvg,1130
+markdown_it/rules_inline/image.py,sha256=Wbsg7jgnOtKXIwXGNJOlG7ORThkMkBVolxItC0ph6C0,4141
+markdown_it/rules_inline/link.py,sha256=2oD-fAdB0xyxDRtZLTjzLeWbzJ1k9bbPVQmohb58RuI,4258
+markdown_it/rules_inline/linkify.py,sha256=ifH6sb5wE8PGMWEw9Sr4x0DhMVfNOEBCfFSwKll2O-s,1706
+markdown_it/rules_inline/newline.py,sha256=329r0V3aDjzNtJcvzA3lsFYjzgBrShLAV5uf9hwQL_M,1297
+markdown_it/rules_inline/state_inline.py,sha256=d-menFzbz5FDy1JNgGBF-BASasnVI-9RuOxWz9PnKn4,5003
+markdown_it/rules_inline/strikethrough.py,sha256=pwcPlyhkh5pqFVxRCSrdW5dNCIOtU4eDit7TVDTPIVA,3214
+markdown_it/rules_inline/text.py,sha256=FQqaQRUqbnMLO9ZSWPWQUMEKH6JqWSSSmlZ5Ii9P48o,1119
+markdown_it/token.py,sha256=cWrt9kodfPdizHq_tYrzyIZNtJYNMN1813DPNlunwTg,6381
+markdown_it/tree.py,sha256=56Cdbwu2Aiks7kNYqO_fQZWpPb_n48CUllzjQQfgu1Y,11111
+markdown_it/utils.py,sha256=lVLeX7Af3GaNFfxmMgUbsn5p7cXbwhLq7RSf56UWuRE,5687
+markdown_it_py-4.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+markdown_it_py-4.0.0.dist-info/METADATA,sha256=6fyqHi2vP5bYQKCfuqo5T-qt83o22Ip7a2tnJIfGW_s,7288
+markdown_it_py-4.0.0.dist-info/RECORD,,
+markdown_it_py-4.0.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
+markdown_it_py-4.0.0.dist-info/entry_points.txt,sha256=T81l7fHQ3pllpQ4wUtQK6a8g_p6wxQbnjKVHCk2WMG4,58
+markdown_it_py-4.0.0.dist-info/licenses/LICENSE,sha256=SiJg1uLND1oVGh6G2_59PtVSseK-q_mUHBulxJy85IQ,1078
+markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it,sha256=eSxIxahJoV_fnjfovPnm0d0TsytGxkKnSKCkapkZ1HM,1073
diff --git a/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/WHEEL b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/WHEEL
new file mode 100644
index 0000000..d8b9936
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.12.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/entry_points.txt b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/entry_points.txt
new file mode 100644
index 0000000..7d829cd
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+markdown-it=markdown_it.cli.parse:main
+
diff --git a/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE
new file mode 100644
index 0000000..582ddf5
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 ExecutableBookProject
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it
new file mode 100644
index 0000000..7ffa058
--- /dev/null
+++ b/Scripts/Lib/site-packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it
@@ -0,0 +1,22 @@
+Copyright (c) 2014 Vitaly Puzrin, Alex Kocharin.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Scripts/Lib/site-packages/mdurl-0.1.2.dist-info/INSTALLER b/Scripts/Lib/site-packages/mdurl-0.1.2.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/Scripts/Lib/site-packages/mdurl-0.1.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/Scripts/Lib/site-packages/mdurl-0.1.2.dist-info/LICENSE b/Scripts/Lib/site-packages/mdurl-0.1.2.dist-info/LICENSE
new file mode 100644
index 0000000..2a920c5
--- /dev/null
+++ b/Scripts/Lib/site-packages/mdurl-0.1.2.dist-info/LICENSE
@@ -0,0 +1,46 @@
+Copyright (c) 2015 Vitaly Puzrin, Alex Kocharin.
+Copyright (c) 2021 Taneli Hukkinen
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+.parse() is based on Joyent's node.js `url` code:
+
+Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/Scripts/Lib/site-packages/mdurl-0.1.2.dist-info/METADATA b/Scripts/Lib/site-packages/mdurl-0.1.2.dist-info/METADATA
new file mode 100644
index 0000000..b4670e8
--- /dev/null
+++ b/Scripts/Lib/site-packages/mdurl-0.1.2.dist-info/METADATA
@@ -0,0 +1,32 @@
+Metadata-Version: 2.1
+Name: mdurl
+Version: 0.1.2
+Summary: Markdown URL utilities
+Keywords: markdown,commonmark
+Author-email: Taneli Hukkinen optgroup
+ # element, or if there is no more content in the parent
+ # element.
+ if type == "StartTag":
+ return next["name"] in ('option', 'optgroup')
+ else:
+ return type == "EndTag" or type is None
+ elif tagname in ('rt', 'rp'):
+ # An rt element's end tag may be omitted if the rt element is
+ # immediately followed by an rt or rp element, or if there is
+ # no more content in the parent element.
+ # An rp element's end tag may be omitted if the rp element is
+ # immediately followed by an rt or rp element, or if there is
+ # no more content in the parent element.
+ if type == "StartTag":
+ return next["name"] in ('rt', 'rp')
+ else:
+ return type == "EndTag" or type is None
+ elif tagname == 'colgroup':
+ # A colgroup element's end tag may be omitted if the colgroup
+ # element is not immediately followed by a space character or
+ # a comment.
+ if type in ("Comment", "SpaceCharacters"):
+ return False
+ elif type == "StartTag":
+ # XXX: we also look for an immediately following colgroup
+ # element. See is_optional_start.
+ return next["name"] != 'colgroup'
+ else:
+ return True
+ elif tagname in ('thead', 'tbody'):
+ # A thead element's end tag may be omitted if the thead element
+ # is immediately followed by a tbody or tfoot element.
+ # A tbody element's end tag may be omitted if the tbody element
+ # is immediately followed by a tbody or tfoot element, or if
+ # there is no more content in the parent element.
+ # A tfoot element's end tag may be omitted if the tfoot element
+ # is immediately followed by a tbody element, or if there is no
+ # more content in the parent element.
+ # XXX: we never omit the end tag when the following element is
+ # a tbody. See is_optional_start.
+ if type == "StartTag":
+ return next["name"] in ['tbody', 'tfoot']
+ elif tagname == 'tbody':
+ return type == "EndTag" or type is None
+ else:
+ return False
+ elif tagname == 'tfoot':
+ # A tfoot element's end tag may be omitted if the tfoot element
+ # is immediately followed by a tbody element, or if there is no
+ # more content in the parent element.
+ # XXX: we never omit the end tag when the following element is
+ # a tbody. See is_optional_start.
+ if type == "StartTag":
+ return next["name"] == 'tbody'
+ else:
+ return type == "EndTag" or type is None
+ elif tagname in ('td', 'th'):
+ # A td element's end tag may be omitted if the td element is
+ # immediately followed by a td or th element, or if there is
+ # no more content in the parent element.
+ # A th element's end tag may be omitted if the th element is
+ # immediately followed by a td or th element, or if there is
+ # no more content in the parent element.
+ if type == "StartTag":
+ return next["name"] in ('td', 'th')
+ else:
+ return type == "EndTag" or type is None
+ return False
diff --git a/Scripts/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py b/Scripts/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
new file mode 100644
index 0000000..aa7431d
--- /dev/null
+++ b/Scripts/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
@@ -0,0 +1,916 @@
+"""Deprecated from html5lib 1.1.
+
+See `here ,