diff --git a/evalkit_tf437/lib/python3.10/site-packages/mpl_toolkits/axes_grid1/__pycache__/axes_size.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/mpl_toolkits/axes_grid1/__pycache__/axes_size.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2114b52a5f74968c0250320e55813bcc851e7060
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/mpl_toolkits/axes_grid1/__pycache__/axes_size.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/mpl_toolkits/axes_grid1/axes_size.py b/evalkit_tf437/lib/python3.10/site-packages/mpl_toolkits/axes_grid1/axes_size.py
new file mode 100644
index 0000000000000000000000000000000000000000..e417c1a899ac9823e757f2b490aeb354223f8f2b
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/mpl_toolkits/axes_grid1/axes_size.py
@@ -0,0 +1,248 @@
+"""
+Provides classes of simple units that will be used with `.AxesDivider`
+class (or others) to determine the size of each Axes. The unit
+classes define `get_size` method that returns a tuple of two floats,
+meaning relative and absolute sizes, respectively.
+
+Note that this class is nothing more than a simple tuple of two
+floats. Take a look at the Divider class to see how these two
+values are used.
+"""
+
+from numbers import Real
+
+from matplotlib import _api
+from matplotlib.axes import Axes
+
+
+class _Base:
+ def __rmul__(self, other):
+ return Fraction(other, self)
+
+ def __add__(self, other):
+ if isinstance(other, _Base):
+ return Add(self, other)
+ else:
+ return Add(self, Fixed(other))
+
+ def get_size(self, renderer):
+ """
+ Return two-float tuple with relative and absolute sizes.
+ """
+ raise NotImplementedError("Subclasses must implement")
+
+
+class Add(_Base):
+ """
+ Sum of two sizes.
+ """
+
+ def __init__(self, a, b):
+ self._a = a
+ self._b = b
+
+ def get_size(self, renderer):
+ a_rel_size, a_abs_size = self._a.get_size(renderer)
+ b_rel_size, b_abs_size = self._b.get_size(renderer)
+ return a_rel_size + b_rel_size, a_abs_size + b_abs_size
+
+
+class Fixed(_Base):
+ """
+ Simple fixed size with absolute part = *fixed_size* and relative part = 0.
+ """
+
+ def __init__(self, fixed_size):
+ _api.check_isinstance(Real, fixed_size=fixed_size)
+ self.fixed_size = fixed_size
+
+ def get_size(self, renderer):
+ rel_size = 0.
+ abs_size = self.fixed_size
+ return rel_size, abs_size
+
+
+class Scaled(_Base):
+ """
+ Simple scaled(?) size with absolute part = 0 and
+ relative part = *scalable_size*.
+ """
+
+ def __init__(self, scalable_size):
+ self._scalable_size = scalable_size
+
+ def get_size(self, renderer):
+ rel_size = self._scalable_size
+ abs_size = 0.
+ return rel_size, abs_size
+
+Scalable = Scaled
+
+
+def _get_axes_aspect(ax):
+ aspect = ax.get_aspect()
+ if aspect == "auto":
+ aspect = 1.
+ return aspect
+
+
+class AxesX(_Base):
+ """
+ Scaled size whose relative part corresponds to the data width
+ of the *axes* multiplied by the *aspect*.
+ """
+
+ def __init__(self, axes, aspect=1., ref_ax=None):
+ self._axes = axes
+ self._aspect = aspect
+ if aspect == "axes" and ref_ax is None:
+ raise ValueError("ref_ax must be set when aspect='axes'")
+ self._ref_ax = ref_ax
+
+ def get_size(self, renderer):
+ l1, l2 = self._axes.get_xlim()
+ if self._aspect == "axes":
+ ref_aspect = _get_axes_aspect(self._ref_ax)
+ aspect = ref_aspect / _get_axes_aspect(self._axes)
+ else:
+ aspect = self._aspect
+
+ rel_size = abs(l2-l1)*aspect
+ abs_size = 0.
+ return rel_size, abs_size
+
+
+class AxesY(_Base):
+ """
+ Scaled size whose relative part corresponds to the data height
+ of the *axes* multiplied by the *aspect*.
+ """
+
+ def __init__(self, axes, aspect=1., ref_ax=None):
+ self._axes = axes
+ self._aspect = aspect
+ if aspect == "axes" and ref_ax is None:
+ raise ValueError("ref_ax must be set when aspect='axes'")
+ self._ref_ax = ref_ax
+
+ def get_size(self, renderer):
+ l1, l2 = self._axes.get_ylim()
+
+ if self._aspect == "axes":
+ ref_aspect = _get_axes_aspect(self._ref_ax)
+ aspect = _get_axes_aspect(self._axes)
+ else:
+ aspect = self._aspect
+
+ rel_size = abs(l2-l1)*aspect
+ abs_size = 0.
+ return rel_size, abs_size
+
+
+class MaxExtent(_Base):
+ """
+ Size whose absolute part is either the largest width or the largest height
+ of the given *artist_list*.
+ """
+
+ def __init__(self, artist_list, w_or_h):
+ self._artist_list = artist_list
+ _api.check_in_list(["width", "height"], w_or_h=w_or_h)
+ self._w_or_h = w_or_h
+
+ def add_artist(self, a):
+ self._artist_list.append(a)
+
+ def get_size(self, renderer):
+ rel_size = 0.
+ extent_list = [
+ getattr(a.get_window_extent(renderer), self._w_or_h) / a.figure.dpi
+ for a in self._artist_list]
+ abs_size = max(extent_list, default=0)
+ return rel_size, abs_size
+
+
+class MaxWidth(MaxExtent):
+ """
+ Size whose absolute part is the largest width of the given *artist_list*.
+ """
+
+ def __init__(self, artist_list):
+ super().__init__(artist_list, "width")
+
+
+class MaxHeight(MaxExtent):
+ """
+ Size whose absolute part is the largest height of the given *artist_list*.
+ """
+
+ def __init__(self, artist_list):
+ super().__init__(artist_list, "height")
+
+
+class Fraction(_Base):
+ """
+ An instance whose size is a *fraction* of the *ref_size*.
+
+ >>> s = Fraction(0.3, AxesX(ax))
+ """
+
+ def __init__(self, fraction, ref_size):
+ _api.check_isinstance(Real, fraction=fraction)
+ self._fraction_ref = ref_size
+ self._fraction = fraction
+
+ def get_size(self, renderer):
+ if self._fraction_ref is None:
+ return self._fraction, 0.
+ else:
+ r, a = self._fraction_ref.get_size(renderer)
+ rel_size = r*self._fraction
+ abs_size = a*self._fraction
+ return rel_size, abs_size
+
+
+def from_any(size, fraction_ref=None):
+ """
+ Create a Fixed unit when the first argument is a float, or a
+ Fraction unit if that is a string that ends with %. The second
+ argument is only meaningful when Fraction unit is created.
+
+ >>> from mpl_toolkits.axes_grid1.axes_size import from_any
+ >>> a = from_any(1.2) # => Fixed(1.2)
+ >>> from_any("50%", a) # => Fraction(0.5, a)
+ """
+ if isinstance(size, Real):
+ return Fixed(size)
+ elif isinstance(size, str):
+ if size[-1] == "%":
+ return Fraction(float(size[:-1]) / 100, fraction_ref)
+ raise ValueError("Unknown format")
+
+
+class _AxesDecorationsSize(_Base):
+ """
+ Fixed size, corresponding to the size of decorations on a given Axes side.
+ """
+
+ _get_size_map = {
+ "left": lambda tight_bb, axes_bb: axes_bb.xmin - tight_bb.xmin,
+ "right": lambda tight_bb, axes_bb: tight_bb.xmax - axes_bb.xmax,
+ "bottom": lambda tight_bb, axes_bb: axes_bb.ymin - tight_bb.ymin,
+ "top": lambda tight_bb, axes_bb: tight_bb.ymax - axes_bb.ymax,
+ }
+
+ def __init__(self, ax, direction):
+ _api.check_in_list(self._get_size_map, direction=direction)
+ self._direction = direction
+ self._ax_list = [ax] if isinstance(ax, Axes) else ax
+
+ def get_size(self, renderer):
+ sz = max([
+ self._get_size_map[self._direction](
+ ax.get_tightbbox(renderer, call_axes_locator=False), ax.bbox)
+ for ax in self._ax_list])
+ dpi = renderer.points_to_pixels(72)
+ abs_size = sz / dpi
+ rel_size = 0
+ return rel_size, abs_size
diff --git a/falcon/lib/python3.10/site-packages/RUST/__pycache__/__main__.cpython-310.pyc b/falcon/lib/python3.10/site-packages/RUST/__pycache__/__main__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5b1a73aa8771165b8b4e72c6d807a2027483264
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/RUST/__pycache__/__main__.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/RUST/__pycache__/dipeptide.cpython-310.pyc b/falcon/lib/python3.10/site-packages/RUST/__pycache__/dipeptide.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a804d44d469e11948e38c827c60ec7a1853b4f97
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/RUST/__pycache__/dipeptide.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/RUST/__pycache__/nucleotide.cpython-310.pyc b/falcon/lib/python3.10/site-packages/RUST/__pycache__/nucleotide.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..052ff9bf61d53b6ee9f64c3c2d738c29e0e642bc
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/RUST/__pycache__/nucleotide.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/RUST/__pycache__/plot_transcript.cpython-310.pyc b/falcon/lib/python3.10/site-packages/RUST/__pycache__/plot_transcript.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2fd0fcf8ef61d6363059a8e46ff115dc2fb66c35
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/RUST/__pycache__/plot_transcript.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/RUST/__pycache__/synergy.cpython-310.pyc b/falcon/lib/python3.10/site-packages/RUST/__pycache__/synergy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d06d5e06afedcb91357e4e4724c1aa3f8d33c6f
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/RUST/__pycache__/synergy.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/RUST/__pycache__/tripeptide.cpython-310.pyc b/falcon/lib/python3.10/site-packages/RUST/__pycache__/tripeptide.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de650105baa2bfd1a875baece63b91a782c93f38
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/RUST/__pycache__/tripeptide.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/RUST/amino.py b/falcon/lib/python3.10/site-packages/RUST/amino.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbd160e0cf392f1ba0ed62d6ee5bc1abb78c3668
--- /dev/null
+++ b/falcon/lib/python3.10/site-packages/RUST/amino.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+#####################################################################################
+# rust_amino, Produces RUST metagene profile of amino acids
+# Copyright (C) 2015 Patrick O'Connor
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#####################################################################################
+
+import os, re, pysam, sys, math, argparse
+from .methods import *
+
+try:
+ import matplotlib as mpl
+
+ mpl.use("Agg")
+ import matplotlib.pyplot as plt
+ from pylab import log2, MaxNLocator
+except:
+ pass
+
+
+def RUST_metagene_plot(infileopen36, ax36):
+ infileopen36.seek(0)
+ infileopen36.readline()
+ while 1:
+ line = infileopen36.readline()
+ linesplit = line.split(",")
+ if len(linesplit) == 1:
+ break
+ codon = linesplit[0]
+ coverage = list(map(float, linesplit[1:]))
+ coverage_a = coverage[0]
+ if coverage_a == 0:
+ continue
+ coverage_n = [n / coverage_a for n in coverage[1:]]
+ log2_values = [math.log(n, 2) for n in coverage_n]
+ ax36.plot(log2_values, color="gray")
+ # print log2(coverage_n) == math.log(coverage_n,2)
+
+ line = infileopen36.readline()
+ linesplit = line.split(",")
+ if "NA" not in line:
+ coverage = list(map(float, linesplit[2:]))
+ ax2 = ax36.twinx()
+ ax2.plot(coverage, color="blue")
+ for tl in ax2.get_yticklabels():
+ tl.set_color("blue")
+ tl.set_rotation(0)
+
+ ax2.yaxis.set_major_locator(MaxNLocator(3))
+ ax2.set_ylim(0, 1.0)
+ ax2.set_ylim(-2, 1.0)
+ ax2.set_yticks([0, 1], minor=False)
+ ax2.set_yticklabels(["0", "1"])
+ ax2.set_ylabel("Kullback-Leibler divergence", color="blue")
+
+ ax36.set_xticks([5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55])
+ ax36.set_xticklabels([-35, -30, -25, -20, -15, -10, -5, 0, 5, 10, 15])
+ ax36.set_xlabel("distance from A-site [codon]")
+ ax36.set_ylabel("Amino acid RUST ratio (observed/expected), log2")
+ ax36.axvline(40, color="red")
+
+
+def main(args):
+
+ mRNA_sequences = args.transcriptome # path to fastq file of transcripts
+ in_seq_handle = open(mRNA_sequences)
+ cds_start_dict = {}
+ cds_end_dict = {}
+ seq_dict = {}
+ for line in in_seq_handle:
+ if line[0] != ">":
+ seq_dict.setdefault(transcript, "")
+ seq_dict[transcript] += line[:-1]
+ continue
+ try:
+ transcript_split = line[:-1].split("\t")
+ transcript = transcript_split[0][1:]
+ cds_start_dict[transcript] = int(transcript_split[1])
+ cds_end_dict[transcript] = int(transcript_split[2])
+ except:
+ pass
+ in_seq_handle.close()
+
+ offset = args.offset
+ readlen_range = args.lengths
+ readlen_rangesplit = readlen_range.split(":")
+ if len(readlen_rangesplit) == 1:
+ accepted_read_lengths = [int(readlen_rangesplit[0])]
+ length_values = "%s" % int(readlen_rangesplit[0])
+ elif len(readlen_rangesplit) == 2:
+ accepted_read_lengths = [
+ readlen
+ for readlen in range(
+ int(readlen_rangesplit[0]), int(readlen_rangesplit[1]) + 1
+ )
+ ]
+ length_values = "%s_%s" % (
+ int(readlen_rangesplit[0]),
+ int(readlen_rangesplit[1]),
+ )
+ else:
+ stop_err(
+ "Lengths of footprints parameter not in correct format, it should be either colon seperated with the second value greater or equal to the first, (28:32) or a single interger (31)"
+ )
+ if len(accepted_read_lengths) == 0:
+ stop_err(
+ "Lengths of footprints parameter not in correct format, it should be either colon seperated with the second value greater or equal to the first, (28:32) or a single interger (31)"
+ )
+
+ amino_acids = [
+ "A",
+ "C",
+ "E",
+ "D",
+ "G",
+ "F",
+ "I",
+ "H",
+ "K",
+ "M",
+ "L",
+ "N",
+ "Q",
+ "P",
+ "S",
+ "R",
+ "T",
+ "W",
+ "V",
+ "Y",
+ ]
+ aligments_A1 = pysam.Samfile(
+ args.alignment, "rb"
+ ) # path to aligments in bam format
+
+ amino_enrichment_dict = {}
+ codon_enrichment_expected_dict = {}
+ for amino_acid in amino_acids:
+ amino_enrichment_dict[amino_acid] = {}
+ codon_enrichment_expected_dict[amino_acid] = []
+ for number in range(0, 60, 1):
+ amino_enrichment_dict[amino_acid][number] = [0.0, 0.0]
+
+ list_transcripts = seq_dict.keys()
+ number_transcripts = 0
+ list_10_percentile = []
+ for value in range(1, 10):
+ list_10_percentile.append((len(list_transcripts) * value) / 10)
+ for transcript in list_transcripts:
+ number_transcripts += 1
+ if number_transcripts in list_10_percentile:
+ sys.stdout.write(
+ "%s percent\n"
+ % ((list_10_percentile.index(number_transcripts) + 1) * 10)
+ )
+ try: # use supplied CDS annotation
+ cds_start = cds_start_dict[transcript]
+ cds_end = cds_end_dict[transcript]
+ if cds_end < cds_start:
+ raise Exception
+ except Exception: # find longest ORF
+ transcript_seq = seq_dict[transcript]
+ cds_start = -1
+ start_post = []
+ end_post = []
+ for match in re.finditer(r"(?=(%s))" % re.escape("ATG"), transcript_seq):
+ start_post.append(match.start())
+ for match in re.finditer(r"(?=(%s))" % re.escape("TAG"), transcript_seq):
+ end_post.append(match.start())
+ for match in re.finditer(r"(?=(%s))" % re.escape("TAA"), transcript_seq):
+ end_post.append(match.start())
+ for match in re.finditer(r"(?=(%s))" % re.escape("TGA"), transcript_seq):
+ end_post.append(match.start())
+
+ end_post.sort()
+ len_max_orf = 0
+ for value in start_post:
+ for value2 in end_post:
+ if value < value2:
+ if value % 3 == value2 % 3:
+ len_orf = value2 - value
+ if len_orf > len_max_orf:
+ cds_start = value
+ cds_end = value2 + 3
+ len_max_orf = len_orf
+ break
+ if cds_start == -1:
+ # sys.stdout.write( '%s, AUG codon not found\n'%transcript )
+ continue
+
+ elongation_region_all = seq_dict[transcript][cds_start:cds_end]
+ elongation_region_part = elongation_region_all[
+ 120:-60
+ ] # first 120 and last 60 nt are not used
+
+ if len(elongation_region_part) % 3 != 0:
+ # sys.stdout.write( '%s, CDS not divisible by 3\n'%transcript )
+ continue
+ peptide_sequence = translate_dna(elongation_region_all)
+ profile_list = [
+ 0.0 for n in range(cds_start + 120, cds_end - 60)
+ ] # records ribo-seq profile
+ if len(profile_list) < 50:
+ # sys.stdout.write( '%s, ORF too short\n'%transcript )
+ continue
+ all_reads = aligments_A1.fetch(transcript)
+
+ len_elongation_region = len(profile_list)
+ for read in all_reads:
+ readlen = read.qlen
+ if readlen not in accepted_read_lengths:
+ continue # selection of read of acceptable length
+ A_site = read.pos + offset - cds_start - 120 # addition of offset
+ if len_elongation_region > A_site > -1:
+ profile_list[A_site] += 1
+
+ average_gene_density = float(sum(profile_list)) / len(
+ profile_list
+ ) # average gene density calculated
+
+ if average_gene_density != 0:
+ num_codon = len(
+ [
+ 1
+ for number88 in range(0, len(profile_list), 3)
+ if (
+ (
+ profile_list[number88]
+ + profile_list[number88 + 1]
+ + profile_list[number88 + 2]
+ )
+ / 3
+ )
+ > average_gene_density
+ ]
+ )
+ # number of codons that exceed average gene density
+ expected_codon_density = float(num_codon) / (
+ len(profile_list) / 3
+ ) # expected enrichment value
+
+ peptide_start = 0
+ for sliding_w_n in range(
+ 0, len(elongation_region_part), 3
+ ): # sliding window using increments of 3 nts
+ amino_window = str(peptide_sequence[peptide_start : peptide_start + 60])
+ if len(set(amino_window) - set(amino_acids)) != 0:
+ peptide_start += 1
+ continue
+
+ if (
+ profile_list[sliding_w_n]
+ + profile_list[sliding_w_n + 1]
+ + profile_list[sliding_w_n + 2]
+ ) / 3 > average_gene_density:
+ for number in range(0, 60):
+ amino_acid_1 = amino_window[number : number + 1]
+ amino_enrichment_dict[amino_acid_1][number][0] += 1
+ amino_enrichment_dict[amino_acid_1][number][1] += 1
+ else:
+ for number in range(0, 60):
+ amino_acid_1 = amino_window[number : number + 1]
+ amino_enrichment_dict[amino_acid_1][number][0] += 1
+
+ amino_acid_1 = amino_window[40:41]
+ codon_enrichment_expected_dict[amino_acid_1].append(
+ expected_codon_density
+ )
+
+ peptide_start += 1
+
+ alignment_filename = args.alignment.split("/")[-1]
+ if not os.path.exists(args.Path):
+ os.mkdir(args.o)
+ outfile = open(
+ "%s/RUST_amino_file_%s_%s_%s"
+ % (args.Path, alignment_filename, args.offset, length_values),
+ "w",
+ )
+ outfile.write("amino, expected value")
+ for number106 in range(-40, 20):
+ outfile.write(", %s" % number106)
+ outfile.write("\n")
+
+ list_codons = []
+ list_amino_acids = list(amino_enrichment_dict)
+ list_amino_acids.sort()
+ rust_expected = []
+ rust_observed_metafootprint = []
+ for amino2 in list_amino_acids:
+ if amino2 in list_codons:
+ continue
+ list_codons.append(amino2)
+ outfile.write("%s" % amino2)
+ if codon_enrichment_expected_dict[amino2] != []:
+ outfile.write(", %s" % mean_value(codon_enrichment_expected_dict[amino2]))
+ list_data = []
+ for number in range(0, 60):
+ if amino_enrichment_dict[amino2][number][0] != 0:
+ outfile.write(
+ ", %s"
+ % (
+ amino_enrichment_dict[amino2][number][1]
+ / amino_enrichment_dict[amino2][number][0]
+ )
+ )
+ list_data.append(
+ amino_enrichment_dict[amino2][number][1]
+ / amino_enrichment_dict[amino2][number][0]
+ )
+ else:
+ outfile.write(", 0")
+ list_data.append(0)
+ outfile.write("\n")
+ rust_expected.append(mean_value(codon_enrichment_expected_dict[amino2]))
+ rust_observed_metafootprint.append(list_data)
+
+ rust_expected_sum = sum(rust_expected)
+ q_values = [n / rust_expected_sum for n in rust_expected]
+ shannon_values = []
+ for loc_i in range(60):
+ rust_observed = [n[loc_i] for n in rust_observed_metafootprint]
+ rust_observed_sum = sum(rust_observed)
+ rust_observed_min = min(rust_observed)
+ if rust_observed_min == 0:
+ shannon_values.append("NA")
+ else:
+ p_values = [n / rust_observed_sum for n in rust_observed]
+ shannon = []
+ list_normalised = [] ####
+ for p_value, q_value in zip(p_values, q_values):
+ shannon.append(abs(p_value * math.log((p_value / q_value), 2)))
+ list_normalised.append(p_value / q_value) ####
+ shannon_values.append(sum(shannon))
+
+ outfile.write("\nKullback Leibler divergence ,")
+ for value in shannon_values:
+ outfile.write(", %s" % value)
+ outfile.close()
+
+ try:
+ mpl.rcParams["xtick.direction"] = "out"
+ mpl.rcParams["ytick.direction"] = "out"
+ mpl.rcParams["legend.fontsize"] = 10
+ mpl.rcParams["ytick.labelsize"] = 10
+ mpl.rcParams["xtick.labelsize"] = 10
+ mpl.rcParams["font.size"] = 10
+ mpl.rcParams["axes.titlesize"] = 10
+ mpl.rcParams["legend.frameon"] = 0
+ mpl.rcParams["axes.axisbelow"] = False
+ mpl.rcParams["xtick.major.pad"] = 2.0
+ mpl.rcParams["ytick.major.pad"] = 2
+ mpl.rcParams["xtick.major.size"] = 2.0
+ mpl.rcParams["ytick.major.size"] = 2
+ mpl.rcParams["axes.linewidth"] = 0.5
+ mpl.rcParams["ytick.major.width"] = 0.25
+ mpl.rcParams["xtick.major.width"] = 0.25
+ mpl.rcParams["lines.linewidth"] = 1
+ mpl.rcParams["legend.borderpad"] = 0.01
+ mpl.rcParams["legend.labelspacing"] = 0.05
+ mpl.rcParams["legend.columnspacing"] = 0.5
+ mpl.rcParams["legend.borderaxespad"] = 0.15
+ mpl.rcParams["legend.handlelength"] = 1
+
+ fig = plt.figure(figsize=(6.69, 6.0))
+ infileopen = open(
+ "%s/RUST_amino_file_%s_%s_%s"
+ % (args.Path, alignment_filename, args.offset, length_values)
+ )
+ ax1_metafootprint = fig.add_subplot(111)
+ RUST_metagene_plot(infileopen, ax1_metafootprint)
+ plt.savefig(
+ "%s/RUST_amino_metafootprint_%s_%s_%s.png"
+ % (args.Path, alignment_filename, args.offset, length_values)
+ )
+ except:
+ sys.stdout.write("Error producing images\n")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Produces RUST metagene profile of amino acids"
+ )
+ parser.add_argument("--version", action="version", version="%(prog)s 1.2")
+
+ parser.add_argument(
+ "-t",
+ "--transcriptome",
+ help="fasta file of transcripts, CDS start and end may be provided on description line using tab separation e.g. >NM_0001 10 5000, otherwise it searches for longest ORF"
+ ", required=True",
+ )
+ parser.add_argument(
+ "-a",
+ "--alignment",
+ help="sorted bam file of transcriptome alignments",
+ required=True,
+ )
+ parser.add_argument("-o", "--offset", help="nucleotide offset to A-site", type=int)
+ parser.add_argument(
+ "-l",
+ "--lengths",
+ help="lengths of footprints included, for example 28:32 is 28,29,30,31,32",
+ )
+ parser.add_argument(
+ "-P", "--Path", help='path to outputfile, default is "amino"', default="amino"
+ )
+ args = parser.parse_args(None)
+ main(args)
diff --git a/falcon/lib/python3.10/site-packages/RUST/methods.py b/falcon/lib/python3.10/site-packages/RUST/methods.py
new file mode 100644
index 0000000000000000000000000000000000000000..1864f8beeb67765df739d3f094c5dd4f0f78416f
--- /dev/null
+++ b/falcon/lib/python3.10/site-packages/RUST/methods.py
@@ -0,0 +1,94 @@
+"""
+This module contains methods for the RUST tool
+These are general purpose functions that are used in the RUST tool
+
+"""
+import sys
+
+
+def stop_err(msg):
+ sys.stderr.write("%s\n" % msg)
+ sys.exit()
+
+
+def mean_value(input_list):
+ if len(input_list) == 0:
+ return 0
+ else:
+ return sum(map(float, input_list)) / len(input_list)
+
+
+def translate_dna(sequence):
+ codontable = {
+ "ATA": "I",
+ "ATC": "I",
+ "ATT": "I",
+ "ATG": "M",
+ "ACA": "T",
+ "ACC": "T",
+ "ACG": "T",
+ "ACT": "T",
+ "AAC": "N",
+ "AAT": "N",
+ "AAA": "K",
+ "AAG": "K",
+ "AGC": "S",
+ "AGT": "S",
+ "AGA": "R",
+ "AGG": "R",
+ "CTA": "L",
+ "CTC": "L",
+ "CTG": "L",
+ "CTT": "L",
+ "CCA": "P",
+ "CCC": "P",
+ "CCG": "P",
+ "CCT": "P",
+ "CAC": "H",
+ "CAT": "H",
+ "CAA": "Q",
+ "CAG": "Q",
+ "CGA": "R",
+ "CGC": "R",
+ "CGG": "R",
+ "CGT": "R",
+ "GTA": "V",
+ "GTC": "V",
+ "GTG": "V",
+ "GTT": "V",
+ "GCA": "A",
+ "GCC": "A",
+ "GCG": "A",
+ "GCT": "A",
+ "GAC": "D",
+ "GAT": "D",
+ "GAA": "E",
+ "GAG": "E",
+ "GGA": "G",
+ "GGC": "G",
+ "GGG": "G",
+ "GGT": "G",
+ "TCA": "S",
+ "TCC": "S",
+ "TCG": "S",
+ "TCT": "S",
+ "TTC": "F",
+ "TTT": "F",
+ "TTA": "L",
+ "TTG": "L",
+ "TAC": "Y",
+ "TAT": "Y",
+ "TAA": "*",
+ "TAG": "*",
+ "TGC": "C",
+ "TGT": "C",
+ "TGA": "*",
+ "TGG": "W",
+ }
+ proteinsequence = ""
+ for n in range(0, len(sequence), 3):
+ try:
+ proteinsequence += codontable[sequence[n : n + 3]]
+ except:
+ proteinsequence += "X"
+ return proteinsequence
diff --git a/falcon/lib/python3.10/site-packages/RUST/nucleotide.py b/falcon/lib/python3.10/site-packages/RUST/nucleotide.py
new file mode 100644
index 0000000000000000000000000000000000000000..47873db5fc2a386587691e69c35a9a4c840243fc
--- /dev/null
+++ b/falcon/lib/python3.10/site-packages/RUST/nucleotide.py
@@ -0,0 +1,416 @@
+#!/usr/bin/python
+#####################################################################################
+# rust_nucleotide, Produces RUST metagene profile of nucleotides
+# Copyright (C) 2015 Patrick O'Connor
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#####################################################################################
+
+import os, re, pysam, sys, math, argparse
+from RUST.methods import *
+
+try:
+ import matplotlib as mpl
+
+ mpl.use("Agg")
+ import matplotlib.pyplot as plt
+ from pylab import MaxNLocator
+except:
+ pass
+
+
+def RUST_metagene_plot(infileopen36, ax36):
+ infileopen36.seek(0)
+ infileopen36.readline()
+ while 1:
+ line = infileopen36.readline()
+ linesplit = line.split(",")
+ if len(linesplit) == 1:
+ break
+ nucleotide_type = linesplit[0]
+ coverage = list(map(float, linesplit[1:]))
+ coverage_a = coverage[0]
+ if coverage_a == 0:
+ continue
+ coverage_n = [n / coverage_a for n in coverage[1:]]
+ # ax36.plot(log2(coverage_n[:-2]),color = "gray")
+ log2_values = [math.log(n, 2) for n in coverage_n]
+ if nucleotide_type == "A":
+ ax36.plot(log2_values, color="firebrick", label=nucleotide_type)
+ elif nucleotide_type == "T":
+ ax36.plot(log2_values, color="seagreen", label=nucleotide_type)
+ elif nucleotide_type == "G":
+ ax36.plot(log2_values, color="Orange", label=nucleotide_type)
+ else:
+ ax36.plot(log2_values, color="MediumPurple", label=nucleotide_type)
+
+ line = infileopen36.readline()
+ linesplit = line.split(",")
+ if "NA" not in linesplit:
+ coverage = list(map(float, linesplit[2:]))
+ ax2 = ax36.twinx()
+ ax2.plot(coverage, color="blue")
+ for tl in ax2.get_yticklabels():
+ tl.set_color("blue")
+ tl.set_rotation(0)
+
+ ax2.yaxis.set_major_locator(MaxNLocator(3))
+ ax2.set_ylim(0, 1.0)
+ ax2.set_ylim(-2, 1.0)
+ ax2.set_yticks([0, 1], minor=False)
+ ax2.set_yticklabels(["0", "1"])
+ ax2.set_ylabel("Kullback-Leibler divergence", color="blue")
+
+ ax36.set_xticks([15, 30, 45, 60, 75, 90, 105, 120, 135, 150, 165])
+ ax36.set_xticklabels([-105, -90, -75, -60, -45, -30, -15, 0, 15, 30, 45])
+ ax36.set_xlabel("distance from A-site [codon]")
+ ax36.set_ylabel("Nucleotide RUST ratio (observed/expected), log2")
+ ax36.axvline(120, color="red")
+ ax36.legend(bbox_to_anchor=(0, 0, 0.2, 0.9))
+
+
+def main(args):
+
+ mRNA_sequences = args.transcriptome # path to fastq file of transcripts
+ in_seq_handle = open(mRNA_sequences)
+ cds_start_dict = {}
+ cds_end_dict = {}
+ seq_dict = {}
+ for line in in_seq_handle:
+ if line[0] != ">":
+ seq_dict.setdefault(transcript, "")
+ seq_dict[transcript] += line[:-1]
+ continue
+ try:
+ transcript_split = line[:-1].split("\t")
+ transcript = transcript_split[0][1:]
+ cds_start_dict[transcript] = int(transcript_split[1])
+ cds_end_dict[transcript] = int(transcript_split[2])
+ except:
+ pass
+ in_seq_handle.close()
+
+ offset = args.offset
+ readlen_range = args.lengths
+ readlen_rangesplit = readlen_range.split(":")
+ if len(readlen_rangesplit) == 1:
+ accepted_read_lengths = [int(readlen_rangesplit[0])]
+ length_values = "%s" % int(readlen_rangesplit[0])
+ elif len(readlen_rangesplit) == 2:
+ accepted_read_lengths = [
+ readlen
+ for readlen in range(
+ int(readlen_rangesplit[0]), int(readlen_rangesplit[1]) + 1
+ )
+ ]
+ length_values = "%s_%s" % (
+ int(readlen_rangesplit[0]),
+ int(readlen_rangesplit[1]),
+ )
+ else:
+ stop_err(
+ "Lengths of footprints parameter not in correct format, it should be either colon seperated with the second value greater or equal to the first, (28:32) or a single interger (31)"
+ )
+ if len(accepted_read_lengths) == 0:
+ stop_err(
+ "Lengths of footprints parameter not in correct format, it should be either colon seperated with the second value greater or equal to the first, (28:32) or a single interger (31)"
+ )
+
+ nts = ["A", "G", "C", "T"]
+ aligments_A1 = pysam.Samfile(args.alignment, "rb")
+
+ nucleotide_enrichment_dict = {}
+ nucleotide_enrichment_expected_dict = {}
+ for nt in nts:
+ nucleotide_enrichment_dict[nt] = {}
+ nucleotide_enrichment_expected_dict[nt] = []
+ for number in range(0, 180, 1):
+ nucleotide_enrichment_dict[nt][number] = [0.0, 0.0]
+
+ list_transcripts = seq_dict.keys()
+ number_transcripts = 0
+ list_10_percentile = []
+ for value in range(1, 10):
+ list_10_percentile.append((len(list_transcripts) * value) / 10)
+ for transcript in list_transcripts:
+ number_transcripts += 1
+ if number_transcripts in list_10_percentile:
+ sys.stdout.write(
+ "%s percent\n"
+ % ((list_10_percentile.index(number_transcripts) + 1) * 10)
+ )
+
+ try: # use supplied CDS annotation
+ cds_start = cds_start_dict[transcript]
+ cds_end = cds_end_dict[transcript]
+ if cds_end < cds_start:
+ raise Exception
+ except Exception: # find longest ORF
+ transcript_seq = seq_dict[transcript]
+ cds_start = -1
+ start_post = []
+ end_post = []
+ for match in re.finditer(r"(?=(%s))" % re.escape("ATG"), transcript_seq):
+ start_post.append(match.start())
+ for match in re.finditer(r"(?=(%s))" % re.escape("TAG"), transcript_seq):
+ end_post.append(match.start())
+ for match in re.finditer(r"(?=(%s))" % re.escape("TAA"), transcript_seq):
+ end_post.append(match.start())
+ for match in re.finditer(r"(?=(%s))" % re.escape("TGA"), transcript_seq):
+ end_post.append(match.start())
+
+ end_post.sort()
+ len_max_orf = 0
+ for value in start_post:
+ for value2 in end_post:
+ if value < value2:
+ if value % 3 == value2 % 3:
+ len_orf = value2 - value
+ if len_orf > len_max_orf:
+ cds_start = value
+ cds_end = value2 + 3
+ len_max_orf = len_orf
+ break
+ if cds_start == -1:
+ # sys.stdout.write( '%s, AUG codon not found\n'%transcript )
+ continue
+ elongation_region_all = seq_dict[transcript][cds_start:cds_end]
+ elongation_region_part = elongation_region_all[
+ 120:-60
+ ] # first 120 and last 60 nt are not used
+ # peptide_sequence = elongation_region_all.translate()
+
+ if len(elongation_region_part) % 3 != 0:
+ # sys.stdout.write( '%s, CDS not divisible by 3\n'%transcript )
+ continue
+ profile_list = [
+ 0.0 for n in range(cds_start + 120, cds_end - 60)
+ ] # records ribo-seq profile
+ if len(profile_list) < 50:
+ # sys.stdout.write( '%s, ORF too short\n'%transcript )
+ continue
+ all_reads = aligments_A1.fetch(transcript)
+
+ len_elongation_region = len(profile_list)
+ for read in all_reads:
+ readlen = read.qlen
+ if readlen not in accepted_read_lengths:
+ continue # selection of read of acceptable length
+ A_site = read.pos + offset - cds_start - 120 # addition of offset
+ if len_elongation_region > A_site > -1:
+ profile_list[A_site] += 1
+
+ average_gene_density = float(sum(profile_list)) / len(profile_list)
+
+ if average_gene_density != 0:
+ num_codon = len(
+ [
+ 1
+ for number88 in range(0, len(profile_list), 3)
+ if (
+ (
+ profile_list[number88]
+ + profile_list[number88 + 1]
+ + profile_list[number88 + 2]
+ )
+ / 3
+ )
+ > average_gene_density
+ ]
+ )
+ # number of codons that exceed average gene density
+ expected_codon_density = float(num_codon) / (len(profile_list) / 3)
+
+ codon_start = 0
+ for sliding_w_n in range(
+ 0, len(elongation_region_part), 3
+ ): # sliding window using increments of 3 nts
+ codon_window = str(
+ elongation_region_all[codon_start : codon_start + 180]
+ )
+ if len(set(codon_window) - set(["A", "T", "G", "C"])) != 0:
+ codon_start += 3
+ continue
+
+ if (
+ profile_list[sliding_w_n]
+ + profile_list[sliding_w_n + 1]
+ + profile_list[sliding_w_n + 2]
+ ) / 3 > average_gene_density:
+ for number in range(0, 180):
+ nucleotide3 = codon_window[number]
+ nucleotide_enrichment_dict[nucleotide3][number][0] += 1
+ nucleotide_enrichment_dict[nucleotide3][number][1] += 1
+ else:
+ for number in range(0, 180):
+ nucleotide3 = codon_window[number]
+ nucleotide_enrichment_dict[nucleotide3][number][0] += 1
+
+ nucleotide3 = codon_window[120:121]
+ nucleotide_enrichment_expected_dict[nucleotide3].append(
+ expected_codon_density
+ )
+ nucleotide3 = codon_window[121:122]
+ nucleotide_enrichment_expected_dict[nucleotide3].append(
+ expected_codon_density
+ )
+ nucleotide3 = codon_window[122:123]
+ nucleotide_enrichment_expected_dict[nucleotide3].append(
+ expected_codon_density
+ )
+ codon_start += 3
+
+ if not os.path.exists(args.Path):
+ os.mkdir(args.Path)
+ alignment_filename = args.alignment.split("/")[-1]
+ outfile = open(
+ "%s/RUST_nucleotide_file_%s_%s_%s"
+ % (args.Path, alignment_filename, args.offset, length_values),
+ "w",
+ )
+ outfile.write("nucleotide, expected value")
+ for number106 in range(-120, 60):
+ outfile.write(", %s" % number106)
+ outfile.write("\n")
+
+ list_nucleotide2 = []
+ nucleotides = list(nucleotide_enrichment_expected_dict.keys())
+ nucleotides.sort()
+ rust_expected = []
+ rust_observed_metafootprint = []
+ for nucleotide2 in nucleotides:
+ if nucleotide2 in list_nucleotide2:
+ continue
+ list_nucleotide2.append(nucleotide2)
+ outfile.write("%s" % nucleotide2)
+ outfile.write(
+ ", %s" % mean_value(nucleotide_enrichment_expected_dict[nucleotide2])
+ )
+ list_data = []
+ for number in range(0, 180):
+ if nucleotide_enrichment_dict[nucleotide2][number][0] != 0:
+ outfile.write(
+ ", %s"
+ % (
+ nucleotide_enrichment_dict[nucleotide2][number][1]
+ / nucleotide_enrichment_dict[nucleotide2][number][0]
+ )
+ )
+ list_data.append(
+ nucleotide_enrichment_dict[nucleotide2][number][1]
+ / nucleotide_enrichment_dict[nucleotide2][number][0]
+ )
+ else:
+ outfile.write(", 0")
+ list_data.append(0)
+ rust_expected.append(
+ mean_value(nucleotide_enrichment_expected_dict[nucleotide2])
+ )
+ rust_observed_metafootprint.append(list_data)
+ outfile.write("\n")
+
+ rust_expected_sum = sum(rust_expected)
+ q_values = [n / rust_expected_sum for n in rust_expected]
+ shannon_values = []
+ for loc_i in range(180):
+ rust_observed = [n[loc_i] for n in rust_observed_metafootprint]
+ rust_observed_sum = sum(rust_observed)
+ rust_observed_min = min(rust_observed)
+ if rust_observed_min == 0:
+ shannon_values.append("NA")
+ else:
+ p_values = [n / rust_observed_sum for n in rust_observed]
+ shannon = []
+ list_normalised = [] ####
+ for p_value, q_value in zip(p_values, q_values):
+ shannon.append(abs(p_value * math.log((p_value / q_value), 2)))
+ list_normalised.append(p_value / q_value) ####
+ shannon_values.append(sum(shannon))
+
+ outfile.write("\nKullback Leibler divergence,")
+ for value in shannon_values:
+ outfile.write(", %s" % value)
+ outfile.close()
+
+ try:
+ mpl.rcParams["xtick.direction"] = "out"
+ mpl.rcParams["ytick.direction"] = "out"
+ mpl.rcParams["legend.fontsize"] = 10
+ mpl.rcParams["ytick.labelsize"] = 10
+ mpl.rcParams["xtick.labelsize"] = 10
+ mpl.rcParams["font.size"] = 10
+ mpl.rcParams["axes.titlesize"] = 10
+ mpl.rcParams["legend.frameon"] = 0
+ mpl.rcParams["axes.axisbelow"] = False
+ mpl.rcParams["xtick.major.pad"] = 2.0
+ mpl.rcParams["ytick.major.pad"] = 2
+ mpl.rcParams["xtick.major.size"] = 2.0
+ mpl.rcParams["ytick.major.size"] = 2
+ mpl.rcParams["axes.linewidth"] = 0.5
+ mpl.rcParams["ytick.major.width"] = 0.25
+ mpl.rcParams["xtick.major.width"] = 0.25
+ mpl.rcParams["lines.linewidth"] = 1
+ mpl.rcParams["legend.borderpad"] = 0.01
+ mpl.rcParams["legend.labelspacing"] = 0.05
+ mpl.rcParams["legend.columnspacing"] = 0.5
+ mpl.rcParams["legend.borderaxespad"] = 0.15
+ mpl.rcParams["legend.handlelength"] = 1
+
+ fig = plt.figure(figsize=(6.69, 6.0))
+ infileopen = open(
+ "%s/RUST_nucleotide_file_%s_%s_%s"
+ % (args.Path, alignment_filename, args.offset, length_values)
+ )
+ ax1_metafootprint = fig.add_subplot(111)
+ RUST_metagene_plot(infileopen, ax1_metafootprint)
+ plt.savefig(
+ "%s/RUST_nucleotide_metafootprint_%s_%s_%s.png"
+ % (args.Path, alignment_filename, args.offset, length_values)
+ )
+ except:
+ sys.stdout.write("Error producing images\n")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Produces RUST metagene profile of nucleotides"
+ )
+ parser.add_argument(
+ "-t",
+ "--transcriptome",
+ help="fasta file of transcripts, CDS start and end may be provided on description line using tab separation e.g. >NM_0001 10 5000, otherwise it searches for longest ORF"
+ ", required=True",
+ )
+ parser.add_argument(
+ "-a",
+ "--alignment",
+ help="sorted bam file of transcriptome alignments",
+ required=True,
+ )
+ parser.add_argument("-o", "--offset", help="nucleotide offset to A-site", type=int)
+ parser.add_argument(
+ "-l",
+ "--lengths",
+ help="lengths of footprints included, for example 28:32 is 28,29,30,31,32",
+ )
+ parser.add_argument(
+ "-P",
+ "--Path",
+ help='path to outputfile, default is "nucleotide"',
+ default="nucleotide",
+ )
+ parser.add_argument("--version", action="version", version="%(prog)s 1.2")
+ args = parser.parse_args(None)
+
+ main(args)
diff --git a/falcon/lib/python3.10/site-packages/RUST/predict_profiles.py b/falcon/lib/python3.10/site-packages/RUST/predict_profiles.py
new file mode 100644
index 0000000000000000000000000000000000000000..868bedec6eb9504e323c18543e7d806f44e99ae6
--- /dev/null
+++ b/falcon/lib/python3.10/site-packages/RUST/predict_profiles.py
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+#####################################################################################
+# rust_predict_profiles, Correlation between observed and predicted profiles from CDS start + 120 to CDS stop - 60
+# Copyright (C) 2015 Patrick O'Connor
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#####################################################################################
+
+import os, pysam, sys, numpy, argparse, re
+from RUST.methods import *
+
+
+def rank(lsit1):
+ lsit2 = []
+ lsit1s = lsit1[:]
+ lsit1s.sort()
+ dict_ranks = {}
+ for value_i, value in enumerate(lsit1s):
+ dict_ranks.setdefault(value, []).append(value_i)
+
+ for value in lsit1:
+ lsit2.append(mean_value(dict_ranks[value]))
+ return lsit2
+
+
+def main(args):
+
+ RUST_file = open(args.rustfile) # file output of RUST_script.py
+ RUST_file.readline()
+ codon_rust_dict = {}
+ for line in RUST_file:
+ linesplit = line.split(",")
+ if len(linesplit) == 1:
+ break
+ codon = linesplit[0]
+ if len(codon) != 3 or len(set(codon) - set(["A", "T", "G", "C"])) != 0:
+ stop_err("Codon metafootprint file not correct, check input file")
+ codon_rust_dict[codon] = {}
+ rust_values = list(map(float, linesplit[1:]))
+ expected = rust_values[0]
+ rust_metafootprint = [ro_value / expected for ro_value in rust_values[1:]]
+ for n in range(34, 46):
+ codon_rust_dict[codon][n - 40] = rust_metafootprint[
+ n
+ ] # for 12 codons positions near A-site
+ RUST_file.close()
+
+ mRNA_sequences = args.transcriptome # path to fastq file of transcripts
+ in_seq_handle = open(mRNA_sequences)
+ cds_start_dict = {}
+ cds_end_dict = {}
+ seq_dict = {}
+ for line in in_seq_handle:
+ if line[0] != ">":
+ seq_dict.setdefault(transcript, "")
+ seq_dict[transcript] += line[:-1]
+ continue
+ try:
+ transcript_split = line[:-1].split("\t")
+ transcript = transcript_split[0][1:]
+ cds_start_dict[transcript] = int(transcript_split[1])
+ cds_end_dict[transcript] = int(transcript_split[2])
+ except:
+ pass
+ in_seq_handle.close()
+
+ offset = args.offset
+ readlen_range = args.lengths
+ readlen_rangesplit = readlen_range.split(":")
+ if len(readlen_rangesplit) == 1:
+ accepted_read_lengths = [int(readlen_rangesplit[0])]
+ length_values = "%s" % int(readlen_rangesplit[0])
+ elif len(readlen_rangesplit) == 2:
+ accepted_read_lengths = [
+ readlen
+ for readlen in range(
+ int(readlen_rangesplit[0]), int(readlen_rangesplit[1]) + 1
+ )
+ ]
+ length_values = "%s_%s" % (
+ int(readlen_rangesplit[0]),
+ int(readlen_rangesplit[1]),
+ )
+ else:
+ stop_err(
+ "Lengths of footprints parameter not in correct format, it should be either colon seperated with the second value greater or equal to the first, (28:32) or a single interger (31)"
+ )
+ if len(accepted_read_lengths) == 0:
+ stop_err(
+ "Lengths of footprints parameter not in correct format, it should be either colon seperated with the second value greater or equal to the first, (28:32) or a single interger (31)"
+ )
+
+ amino_acids = [
+ "A",
+ "C",
+ "E",
+ "D",
+ "G",
+ "F",
+ "I",
+ "H",
+ "K",
+ "M",
+ "L",
+ "N",
+ "Q",
+ "P",
+ "S",
+ "R",
+ "T",
+ "W",
+ "V",
+ "Y",
+ ]
+ aligments_A1 = pysam.Samfile(args.alignment, "rb")
+
+ if not os.path.exists(args.Path):
+ os.mkdir(args.Path)
+ if args.profiles:
+ if not os.path.exists("%s/rust_profile_predictions" % args.Path):
+ os.mkdir("%s/rust_profile_predictions" % args.Path)
+
+ if "/" in args.rustfile:
+ rustfile_split = args.rustfile.split("/")[-1]
+ # elif "\\" in args.rustfile:
+ # rustfile_split= args.rustfile.split("\\")[-1]
+ else:
+ rustfile_split = args.rustfile
+
+ if "RUST_codon_file_" in rustfile_split:
+ alignment_filename = rustfile_split[16:]
+ else:
+ alignment_filename = rustfile_split
+
+ correlations_file = open(
+ "%s/predict_profiles_%s_%s_%s"
+ % (args.Path, alignment_filename, args.offset, length_values),
+ "w",
+ )
+ correlations_file.write(
+ "transcript,average read density,Spearman's coefficient,Pearson's coefficient\n"
+ )
+
+ list_transcripts = seq_dict.keys()
+ number_transcripts = 0
+ list_10_percentile = []
+ for value in range(1, 10):
+ list_10_percentile.append((len(list_transcripts) * value) / 10)
+ for transcript in list_transcripts:
+ number_transcripts += 1
+ if number_transcripts in list_10_percentile:
+ sys.stdout.write(
+ "%s percent\n"
+ % ((list_10_percentile.index(number_transcripts) + 1) * 10)
+ )
+
+ try:
+ cds_start = cds_start_dict[transcript]
+ cds_end = cds_end_dict[transcript]
+ if cds_end < cds_start:
+ raise Exception
+ except Exception:
+ transcript_seq = seq_dict[transcript]
+ cds_start = -1
+ start_post = []
+ end_post = []
+ for match in re.finditer(r"(?=(%s))" % re.escape("ATG"), transcript_seq):
+ start_post.append(match.start())
+ for match in re.finditer(r"(?=(%s))" % re.escape("TAG"), transcript_seq):
+ end_post.append(match.start())
+ for match in re.finditer(r"(?=(%s))" % re.escape("TAA"), transcript_seq):
+ end_post.append(match.start())
+ for match in re.finditer(r"(?=(%s))" % re.escape("TGA"), transcript_seq):
+ end_post.append(match.start())
+
+ end_post.sort()
+ len_max_orf = 0
+ for value in start_post:
+ for value2 in end_post:
+ if value < value2:
+ if value % 3 == value2 % 3:
+ len_orf = value2 - value
+ if len_orf > len_max_orf:
+ cds_start = value
+ cds_end = value2 + 3
+ len_max_orf = len_orf
+ break
+ if cds_start == -1:
+ # sys.stdout.write( '%s, AUG codon not found\n'%transcript )
+ continue
+
+ elongation_region_all = seq_dict[transcript][cds_start:cds_end]
+
+ if (
+ len(elongation_region_all) % 3 != 0
+ ): # genes with codon region not divisible by 3 skipped
+ # sys.stdout.write( '%s, CDS not divisible by 3\n'%transcript )
+ continue
+
+ profile_expect = []
+ for n in range(
+ 0, len(elongation_region_all[120:-60]), 3
+ ): # predicts profile from 120 nts after start to 60 before stop
+ minus6_plus5_footprint = elongation_region_all[
+ 120 + n - 18 : 120 + n + 19
+ ] # contains sequence of region used to predict profile
+ value = 1.0
+ amino_loc = -6
+ for number in range(0, len(minus6_plus5_footprint) - 2, 3):
+ codon = minus6_plus5_footprint[number : number + 3]
+ if len(set(codon) - set(["A", "T", "G", "C"])) != 0 or codon in [
+ "TAG",
+ "TGA",
+ "TAA",
+ ]:
+ amino_loc += 1
+ continue
+ value = value * codon_rust_dict[codon][amino_loc]
+ amino_loc += 1
+ profile_expect.append(value)
+ profile_expect_sum = sum(profile_expect)
+ profile_expect_probablility = [
+ float(value) / profile_expect_sum for value in profile_expect
+ ]
+
+ profile_list = [
+ 0.0 for n in range(cds_start + 120, cds_end - 60)
+ ] # records ribo-seq profile
+ if len(profile_list) < 50:
+ # sys.stdout.write( '%s, ORF too short\n'%transcript )
+ continue
+ all_reads = aligments_A1.fetch(transcript)
+
+ len_elongation_region = len(profile_list)
+ for read in all_reads:
+ readlen = read.qlen
+ if readlen not in accepted_read_lengths:
+ continue # selection of read of acceptable length
+ A_site = read.pos + offset - cds_start - 120 # addition of offset
+ if len_elongation_region > A_site > -1:
+ profile_list[A_site] += 1
+
+ average_gene_density = float(sum(profile_list)) / len(
+ profile_list
+ ) # average gene density calculated
+ if average_gene_density > 0:
+ profiles_control_codon = [
+ profile_list[codon_ind]
+ + profile_list[codon_ind + 1]
+ + profile_list[codon_ind + 2]
+ for codon_ind in range(0, len(profile_list), 3)
+ ]
+ spearmanr_value = numpy.corrcoef(
+ rank(profiles_control_codon), rank(profile_expect)
+ )[0, 1]
+ pearsonr_value = numpy.corrcoef(profiles_control_codon, profile_expect)[
+ 0, 1
+ ]
+ correlations_file.write(
+ "%s,%s,%s,%s\n"
+ % (transcript, average_gene_density, spearmanr_value, pearsonr_value)
+ )
+ if args.profiles:
+ open_file = open(
+ "%s/rust_profile_predictions/observed_predicted_%s_%s_%s_%s.csv"
+ % (
+ args.Path,
+ transcript,
+ alignment_filename,
+ args.offset,
+ length_values,
+ ),
+ "w",
+ )
+ profile_expect_probablility_index = 0
+ open_file.write("%s\n" % transcript)
+ open_file.write("codon, predicted probability, alignments\n")
+ for coordinate_index in range(
+ 0, len(elongation_region_all[120:-60]), 3
+ ):
+ codon = elongation_region_all[
+ 120 + coordinate_index : 120 + coordinate_index + 3
+ ]
+ open_file.write("%s, " % (codon))
+ open_file.write(
+ "%s, "
+ % (
+ profile_expect_probablility[
+ profile_expect_probablility_index
+ ]
+ )
+ )
+ open_file.write(
+ "%s\n"
+ % (profiles_control_codon[profile_expect_probablility_index])
+ )
+ profile_expect_probablility_index += 1
+ open_file.close()
+ correlations_file.close()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Correlation between observed and predicted profiles from CDS start + 120 to CDS stop - 60"
+ )
+ parser.add_argument(
+ "-t",
+ "--transcriptome",
+ help="fasta file of transcripts, CDS start and end may be provided on description line using tab separation e.g. >NM_0001 10 5000, otherwise it searches for longest ORF"
+ ", required=True",
+ )
+ parser.add_argument(
+ "-a",
+ "--alignment",
+ help="sorted bam file of transcriptome alignments",
+ required=True,
+ )
+ parser.add_argument("-o", "--offset", help="nucleotide offset to A-site", type=int)
+ parser.add_argument(
+ "-l",
+ "--lengths",
+ help="lengths of footprints included, for example 28:32 is 28,29,30,31,32",
+ )
+ parser.add_argument(
+ "-P",
+ "--Path",
+ help='path to outputfile, default is "amino"',
+ default="predict_profiles",
+ )
+ parser.add_argument("-r", "--rustfile", help="path to rust file produced by codon")
+ parser.add_argument(
+ "-o",
+ metavar="outfile directory",
+ help='path to outputfile, default is "predict_profiles"',
+ default="predict_profiles",
+ )
+ parser.add_argument(
+ "-p",
+ action="store_true",
+ help="writes all profiles in csv files, may produce >10,000 files",
+ default=False,
+ )
+ parser.add_argument("--version", action="version", version="%(prog)s 1.2")
+ args = parser.parse_args(None)
+ main(args)
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/alias.cpython-310.pyc b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/alias.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3fd5e9ab7659a74ab2a6025a68d0d58c7c7dbbe6
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/alias.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/bdist_wheel.cpython-310.pyc b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/bdist_wheel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3fb03fde63e2eb15598e95df90fea8a2c3797fa1
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/bdist_wheel.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/build_clib.cpython-310.pyc b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/build_clib.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef4654dc5fd3c87bf2cae941d4cf589382f82e55
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/build_clib.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/egg_info.cpython-310.pyc b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/egg_info.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7ea9720dc99cfa1a2630a291b012536f115281c
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/egg_info.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/install.cpython-310.pyc b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/install.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..605319d9d7000ff7a5b3e7d4d9cf70cd5f056d1f
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/install.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/install_lib.cpython-310.pyc b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/install_lib.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..134346fa4b2d41535af1dd6fee1f3db75b461729
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/install_lib.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/install_scripts.cpython-310.pyc b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/install_scripts.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..416217c6f65fc436f89c0423a79131b7ba80a79b
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/install_scripts.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/rotate.cpython-310.pyc b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/rotate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f8007f252093cb0d3b830c4ce9c1cffd0e59fbb1
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/setuptools/command/__pycache__/rotate.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/build_clib.py b/falcon/lib/python3.10/site-packages/setuptools/command/build_clib.py
new file mode 100644
index 0000000000000000000000000000000000000000..f376f4ce4d2afc4a58f1fa0e85624136edc93835
--- /dev/null
+++ b/falcon/lib/python3.10/site-packages/setuptools/command/build_clib.py
@@ -0,0 +1,103 @@
+from ..dist import Distribution
+from ..modified import newer_pairwise_group
+
+import distutils.command.build_clib as orig
+from distutils import log
+from distutils.errors import DistutilsSetupError
+
+
+class build_clib(orig.build_clib):
+ """
+ Override the default build_clib behaviour to do the following:
+
+ 1. Implement a rudimentary timestamp-based dependency system
+ so 'compile()' doesn't run every time.
+ 2. Add more keys to the 'build_info' dictionary:
+ * obj_deps - specify dependencies for each object compiled.
+ this should be a dictionary mapping a key
+ with the source filename to a list of
+ dependencies. Use an empty string for global
+ dependencies.
+ * cflags - specify a list of additional flags to pass to
+ the compiler.
+ """
+
+ distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution
+
+ def build_libraries(self, libraries) -> None:
+ for lib_name, build_info in libraries:
+ sources = build_info.get('sources')
+ if sources is None or not isinstance(sources, (list, tuple)):
+ raise DistutilsSetupError(
+ f"in 'libraries' option (library '{lib_name}'), "
+ "'sources' must be present and must be "
+ "a list of source filenames"
+ )
+ sources = sorted(list(sources))
+
+ log.info("building '%s' library", lib_name)
+
+ # Make sure everything is the correct type.
+ # obj_deps should be a dictionary of keys as sources
+ # and a list/tuple of files that are its dependencies.
+ obj_deps = build_info.get('obj_deps', dict())
+ if not isinstance(obj_deps, dict):
+ raise DistutilsSetupError(
+ f"in 'libraries' option (library '{lib_name}'), "
+ "'obj_deps' must be a dictionary of "
+ "type 'source: list'"
+ )
+ dependencies = []
+
+ # Get the global dependencies that are specified by the '' key.
+ # These will go into every source's dependency list.
+ global_deps = obj_deps.get('', list())
+ if not isinstance(global_deps, (list, tuple)):
+ raise DistutilsSetupError(
+ f"in 'libraries' option (library '{lib_name}'), "
+ "'obj_deps' must be a dictionary of "
+ "type 'source: list'"
+ )
+
+ # Build the list to be used by newer_pairwise_group
+ # each source will be auto-added to its dependencies.
+ for source in sources:
+ src_deps = [source]
+ src_deps.extend(global_deps)
+ extra_deps = obj_deps.get(source, list())
+ if not isinstance(extra_deps, (list, tuple)):
+ raise DistutilsSetupError(
+ f"in 'libraries' option (library '{lib_name}'), "
+ "'obj_deps' must be a dictionary of "
+ "type 'source: list'"
+ )
+ src_deps.extend(extra_deps)
+ dependencies.append(src_deps)
+
+ expected_objects = self.compiler.object_filenames(
+ sources,
+ output_dir=self.build_temp,
+ )
+
+ if newer_pairwise_group(dependencies, expected_objects) != ([], []):
+ # First, compile the source code to object files in the library
+ # directory. (This should probably change to putting object
+ # files in a temporary build directory.)
+ macros = build_info.get('macros')
+ include_dirs = build_info.get('include_dirs')
+ cflags = build_info.get('cflags')
+ self.compiler.compile(
+ sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ extra_postargs=cflags,
+ debug=self.debug,
+ )
+
+ # Now "link" the object files together into a static library.
+ # (On Unix at least, this isn't really linking -- it just
+ # builds an archive. Whatever.)
+ self.compiler.create_static_lib(
+ expected_objects, lib_name, output_dir=self.build_clib, debug=self.debug
+ )
diff --git a/falcon/lib/python3.10/site-packages/setuptools/command/install.py b/falcon/lib/python3.10/site-packages/setuptools/command/install.py
new file mode 100644
index 0000000000000000000000000000000000000000..741b140c7027bf5686474101c84e3912b389813f
--- /dev/null
+++ b/falcon/lib/python3.10/site-packages/setuptools/command/install.py
@@ -0,0 +1,183 @@
+from __future__ import annotations
+
+import glob
+import inspect
+import platform
+from collections.abc import Callable
+from typing import TYPE_CHECKING, Any, ClassVar, cast
+
+import setuptools
+
+from ..dist import Distribution
+from ..warnings import SetuptoolsDeprecationWarning, SetuptoolsWarning
+from .bdist_egg import bdist_egg as bdist_egg_cls
+
+import distutils.command.install as orig
+from distutils.errors import DistutilsArgError
+
+if TYPE_CHECKING:
+ # This is only used for a type-cast, don't import at runtime or it'll cause deprecation warnings
+ from .easy_install import easy_install as easy_install_cls
+else:
+ easy_install_cls = None
+
+
+def __getattr__(name: str): # pragma: no cover
+ if name == "_install":
+ SetuptoolsDeprecationWarning.emit(
+ "`setuptools.command._install` was an internal implementation detail "
+ + "that was left in for numpy<1.9 support.",
+ due_date=(2025, 5, 2), # Originally added on 2024-11-01
+ )
+ return orig.install
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
+
+
+class install(orig.install):
+ """Use easy_install to install the package, w/dependencies"""
+
+ distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution
+
+ user_options = orig.install.user_options + [
+ ('old-and-unmanageable', None, "Try not to use this!"),
+ (
+ 'single-version-externally-managed',
+ None,
+ "used by system package builders to create 'flat' eggs",
+ ),
+ ]
+ boolean_options = orig.install.boolean_options + [
+ 'old-and-unmanageable',
+ 'single-version-externally-managed',
+ ]
+ # Type the same as distutils.command.install.install.sub_commands
+ # Must keep the second tuple item potentially None due to invariance
+ new_commands: ClassVar[list[tuple[str, Callable[[Any], bool] | None]]] = [
+ ('install_egg_info', lambda self: True),
+ ('install_scripts', lambda self: True),
+ ]
+ _nc = dict(new_commands)
+
+ def initialize_options(self):
+ SetuptoolsDeprecationWarning.emit(
+ "setup.py install is deprecated.",
+ """
+ Please avoid running ``setup.py`` directly.
+ Instead, use pypa/build, pypa/installer or other
+ standards-based tools.
+ """,
+ see_url="https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html",
+ # TODO: Document how to bootstrap setuptools without install
+ # (e.g. by unziping the wheel file)
+ # and then add a due_date to this warning.
+ )
+
+ super().initialize_options()
+ self.old_and_unmanageable = None
+ self.single_version_externally_managed = None
+
+ def finalize_options(self) -> None:
+ super().finalize_options()
+ if self.root:
+ self.single_version_externally_managed = True
+ elif self.single_version_externally_managed:
+ if not self.root and not self.record:
+ raise DistutilsArgError(
+ "You must specify --record or --root when building system packages"
+ )
+
+ def handle_extra_path(self):
+ if self.root or self.single_version_externally_managed:
+ # explicit backward-compatibility mode, allow extra_path to work
+ return orig.install.handle_extra_path(self)
+
+ # Ignore extra_path when installing an egg (or being run by another
+ # command without --root or --single-version-externally-managed
+ self.path_file = None
+ self.extra_dirs = ''
+ return None
+
+ def run(self):
+ # Explicit request for old-style install? Just do it
+ if self.old_and_unmanageable or self.single_version_externally_managed:
+ return super().run()
+
+ if not self._called_from_setup(inspect.currentframe()):
+ # Run in backward-compatibility mode to support bdist_* commands.
+ super().run()
+ else:
+ self.do_egg_install()
+
+ return None
+
+ @staticmethod
+ def _called_from_setup(run_frame):
+ """
+ Attempt to detect whether run() was called from setup() or by another
+ command. If called by setup(), the parent caller will be the
+ 'run_command' method in 'distutils.dist', and *its* caller will be
+ the 'run_commands' method. If called any other way, the
+ immediate caller *might* be 'run_command', but it won't have been
+ called by 'run_commands'. Return True in that case or if a call stack
+ is unavailable. Return False otherwise.
+ """
+ if run_frame is None:
+ msg = "Call stack not available. bdist_* commands may fail."
+ SetuptoolsWarning.emit(msg)
+ if platform.python_implementation() == 'IronPython':
+ msg = "For best results, pass -X:Frames to enable call stack."
+ SetuptoolsWarning.emit(msg)
+ return True
+
+ frames = inspect.getouterframes(run_frame)
+ for frame in frames[2:4]:
+ (caller,) = frame[:1]
+ info = inspect.getframeinfo(caller)
+ caller_module = caller.f_globals.get('__name__', '')
+
+ if caller_module == "setuptools.dist" and info.function == "run_command":
+ # Starting from v61.0.0 setuptools overwrites dist.run_command
+ continue
+
+ return caller_module == 'distutils.dist' and info.function == 'run_commands'
+
+ return False
+
+ def do_egg_install(self) -> None:
+ easy_install = self.distribution.get_command_class('easy_install')
+
+ cmd = cast(
+ # We'd want to cast easy_install as type[easy_install_cls] but a bug in
+ # mypy makes it think easy_install() returns a Command on Python 3.12+
+ # https://github.com/python/mypy/issues/18088
+ easy_install_cls,
+ easy_install( # type: ignore[call-arg]
+ self.distribution,
+ args="x",
+ root=self.root,
+ record=self.record,
+ ),
+ )
+ cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
+ cmd.always_copy_from = '.' # make sure local-dir eggs get installed
+
+ # pick up setup-dir .egg files only: no .egg-info
+ cmd.package_index.scan(glob.glob('*.egg'))
+
+ self.run_command('bdist_egg')
+ bdist_egg = cast(bdist_egg_cls, self.distribution.get_command_obj('bdist_egg'))
+ args = [bdist_egg.egg_output]
+
+ if setuptools.bootstrap_install_from:
+ # Bootstrap self-installation of setuptools
+ args.insert(0, setuptools.bootstrap_install_from)
+
+ cmd.args = args
+ cmd.run(show_deprecation=False)
+ setuptools.bootstrap_install_from = None
+
+
+# XXX Python 3.1 doesn't see _nc if this is inside the class
+install.sub_commands = [
+ cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc
+] + install.new_commands
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/__init__.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c6ada8b0183087c9067e1ccdad1f85166d3b41b8
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/__init__.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/animate.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/animate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c6479b141112ddc074b26073fdf9ecb367d9321f
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/animate.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/container.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/container.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1520846258a1f8440d1bdf228ef1a87cad26bb8e
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/container.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/elementfactory.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/elementfactory.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d2986525d75fb893ab2ef1339415805772b86dba
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/elementfactory.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/etree.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/etree.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93afc4861c0929e4e7ccd386b74c19d11e99826c
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/etree.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/filters.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/filters.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2133c3a62019f1054a70ac12ecfbcac70d6591fe
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/filters.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/image.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/image.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66d8df1ac0f896365f83a488661a4c0617f1e4b4
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/image.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/masking.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/masking.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f48b5b7e51cee7c335a90814a0cdc770fa2c032
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/masking.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/mixins.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/mixins.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..963fab29ed4b883ec4963685a5f43ad0fd036659
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/mixins.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/params.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/params.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..561d5831df73604b28cb908f3f18e44d8f593f86
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/params.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/path.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/path.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dedc05541af168f64a1b0ba1a8d283c23191c48e
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/path.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/pattern.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/pattern.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c90f93a6241dce72f0210668da9029a90851b73c
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/pattern.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/utils.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59e9a85f26ac501cfa3d1f69e169163566664203
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/utils.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/version.cpython-310.pyc b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/version.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ffaba14ecd24e2625aaa49f5c57fc14620b12359
Binary files /dev/null and b/falcon/lib/python3.10/site-packages/svgwrite/__pycache__/version.cpython-310.pyc differ
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/animate.py b/falcon/lib/python3.10/site-packages/svgwrite/animate.py
new file mode 100644
index 0000000000000000000000000000000000000000..5908ec1cb65cd894f8f34aacf98ab009071a3d4a
--- /dev/null
+++ b/falcon/lib/python3.10/site-packages/svgwrite/animate.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python
+#coding:utf-8
+# Author: mozman --
+# Purpose: animate elements
+# Created: 31.10.2010
+# Copyright (C) 2010, Manfred Moitzi
+# License: MIT License
+
+from svgwrite.base import BaseElement
+from svgwrite.mixins import XLink
+from svgwrite.utils import strlist, is_string
+
+
+class Set(BaseElement, XLink):
+ """ The **set** element provides a simple means of just setting the value
+ of an attribute for a specified duration. It supports all attribute types,
+ including those that cannot reasonably be interpolated, such as string
+ and boolean values. The **set** element is non-additive. The additive and
+ accumulate attributes are not allowed, and will be ignored if specified.
+ """
+ elementname = 'set'
+
+ def __init__(self, href=None, **extra):
+ """ Set constructor.
+
+ :param href: target svg element, if **href** is not `None`; else
+ the target SVG Element is the parent SVG Element.
+ """
+ super(Set, self).__init__(**extra)
+ if href is not None:
+ self.set_href(href)
+
+ def get_xml(self):
+ self.update_id() # if href is an object - 'id' - attribute may be changed!
+ return super(Set, self).get_xml()
+
+ def set_target(self, attributeName, attributeType=None):
+ """
+ Set animation attributes :ref:`attributeName` and :ref:`attributeType`.
+ """
+ self['attributeName'] = attributeName
+ if attributeType is not None:
+ self['attributeType'] = attributeType
+
+
+ def set_event(self, onbegin=None, onend=None, onrepeat=None, onload=None):
+ """
+ Set animation attributes :ref:`onbegin`, :ref:`onend`, :ref:`onrepeat`
+ and :ref:`onload`.
+ """
+ if onbegin is not None:
+ self['onbegin'] = onbegin
+ if onend is not None:
+ self['onend'] = onend
+ if onrepeat is not None:
+ self['onrepeat'] = onrepeat
+ if onload is not None:
+ self['onload'] = onload
+
+ def set_timing(self, begin=None, end=None, dur=None, min=None, max=None,
+ restart=None, repeatCount=None, repeatDur=None):
+ """
+ Set animation attributes :ref:`begin`, :ref:`end`, :ref:`dur`,
+ :ref:`min`, :ref:`max`, :ref:`restart`, :ref:`repeatCount` and
+ :ref:`repeatDur`.
+ """
+ if begin is not None:
+ self['begin'] = begin
+ if end is not None:
+ self['end'] = end
+ if dur is not None:
+ self['dur'] = dur
+ if min is not None:
+ self['min'] = min
+ if max is not None:
+ self['max'] = max
+ if restart is not None:
+ self['restart'] = restart
+ if repeatCount is not None:
+ self['repeatCount'] = repeatCount
+ if repeatDur is not None:
+ self['repeatDur'] = repeatDur
+
+ def freeze(self):
+ """ Freeze the animation effect. (see also :ref:`fill `)
+ """
+ self['fill'] = 'freeze'
+
+class AnimateMotion(Set):
+ """ The **animateMotion** element causes a referenced element to move
+ along a motion path.
+ """
+ elementname = 'animateMotion'
+
+ def __init__(self, path=None, href=None, **extra):
+ """
+ :param path: the motion path
+ :param href: target svg element, if **href** is not `None`; else
+ the target SVG Element is the parent SVG Element.
+ """
+ super(AnimateMotion, self).__init__(href=href, **extra)
+ if path is not None:
+ self['path'] = path
+
+ def set_value(self, path=None, calcMode=None, keyPoints=None, rotate=None):
+ """
+ Set animation attributes `path`, `calcMode`, `keyPoints` and `rotate`.
+ """
+ if path is not None:
+ self['path'] = path
+ if calcMode is not None:
+ self['calcMode'] = calcMode
+ if keyPoints is not None:
+ self['keyPoints'] = keyPoints
+ if rotate is not None:
+ self['rotate'] = rotate
+
+
+class Animate(Set):
+ """ The **animate** element allows scalar attributes and properties to be
+ assigned different values over time .
+ """
+ elementname = 'animate'
+
+ def __init__(self, attributeName=None, values=None, href=None, **extra):
+ """
+ :param attributeName: name of the SVG Attribute to animate
+ :param values: interpolation values, `string` as `` or a python `list`
+ :param href: target svg element, if **href** is not `None`; else
+ the target SVG Element is the parent SVG Element.
+ """
+ super(Animate, self).__init__(href=href, **extra)
+ if values is not None:
+ self.set_value(values)
+ if attributeName is not None:
+ self.set_target(attributeName)
+
+ def set_value(self, values, calcMode=None, keyTimes=None, keySplines=None,
+ from_=None, to=None, by=None):
+ """
+ Set animation attributes :ref:`values`, :ref:`calcMode`, :ref:`keyTimes`,
+ :ref:`keySplines`, :ref:`from`, :ref:`to` and :ref:`by`.
+ """
+ if values is not None:
+ if not is_string(values):
+ values = strlist(values, ';')
+ self['values'] = values
+
+ if calcMode is not None:
+ self['calcMode'] = calcMode
+ if keyTimes is not None:
+ self['keyTimes'] = keyTimes
+ if keySplines is not None:
+ self['keySplines'] = keySplines
+ if from_ is not None:
+ self['from'] = from_
+ if to is not None:
+ self['to'] = to
+ if by is not None:
+ self['by'] = by
+
+
+class AnimateColor(Animate):
+ """ The **animateColor** element specifies a color transformation over
+ time.
+ """
+ elementname = 'animateColor'
+
+
+class AnimateTransform(Animate):
+ """ The **animateTransform** element animates a transformation attribute
+ on a target element, thereby allowing animations to control translation,
+ scaling, rotation and/or skewing.
+ """
+ elementname = 'animateTransform'
+ def __init__(self, transform, element=None, **extra):
+ """
+ :param element: target svg element, if element is not `None`; else
+ the target svg element is the parent svg element.
+ :param string transform: ``'translate | scale | rotate | skewX | skewY'``
+ """
+ super(AnimateTransform, self).__init__(element, **extra)
+ self['type'] = transform
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/base.py b/falcon/lib/python3.10/site-packages/svgwrite/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..f38cb38b587ea14257719a0d26778cdad18dccfc
--- /dev/null
+++ b/falcon/lib/python3.10/site-packages/svgwrite/base.py
@@ -0,0 +1,285 @@
+#!/usr/bin/env python
+#coding:utf-8
+# Author: mozman
+# Purpose: svg base element
+# Created: 08.09.2010
+# Copyright (c) 2010-2020, Manfred Moitzi
+# License: MIT License
+"""
+The **BaseElement** is the root for all SVG elements.
+"""
+
+from svgwrite.etree import etree
+
+import copy
+
+from svgwrite.params import Parameter
+from svgwrite.utils import AutoID
+
+
+class BaseElement(object):
+ """
+ The **BaseElement** is the root for all SVG elements. The SVG attributes
+ are stored in **attribs**, and the SVG subelements are stored in
+ **elements**.
+
+ """
+ elementname = 'baseElement'
+
+ def __init__(self, **extra):
+ """
+ :param extra: extra SVG attributes (keyword arguments)
+
+ * add trailing '_' to reserved keywords: ``'class_'``, ``'from_'``
+ * replace inner '-' by '_': ``'stroke_width'``
+
+
+ SVG attribute names will be checked, if **debug** is `True`.
+
+ workaround for removed **attribs** parameter in Version 0.2.2::
+
+ # replace
+ element = BaseElement(attribs=adict)
+
+ #by
+ element = BaseElement()
+ element.update(adict)
+
+ """
+ # the keyword 'factory' specifies the object creator
+ factory = extra.pop('factory', None)
+ if factory is not None:
+ # take parameter from 'factory'
+ self._parameter = factory._parameter
+ else:
+ # default parameter debug=True profile='full'
+ self._parameter = Parameter()
+
+ # override debug setting
+ debug = extra.pop('debug', None)
+ if debug is not None:
+ self._parameter.debug = debug
+
+ # override profile setting
+ profile = extra.pop('profile', None)
+ if profile is not None:
+ self._parameter.profile = profile
+
+ self.attribs = dict()
+ self.update(extra)
+ self.elements = list()
+
+ def update(self, attribs):
+ """ Update SVG Attributes from `dict` attribs.
+
+ Rules for keys:
+
+ 1. trailing '_' will be removed (``'class_'`` -> ``'class'``)
+ 2. inner '_' will be replaced by '-' (``'stroke_width'`` -> ``'stroke-width'``)
+
+ """
+ for key, value in attribs.items():
+ # remove trailing underscores
+ # and replace inner underscores
+ key = key.rstrip('_').replace('_', '-')
+ self.__setitem__(key, value)
+
+ def copy(self):
+ newobj = copy.copy(self) # shallow copy of object
+ newobj.attribs = copy.copy(self.attribs) # shallow copy of attributes
+ newobj.elements = copy.copy(self.elements) # shallow copy of subelements
+ if 'id' in newobj.attribs: # create a new 'id'
+ newobj['id'] = newobj.next_id()
+ return newobj
+
+ @property
+ def debug(self):
+ return self._parameter.debug
+
+ @property
+ def profile(self):
+ return self._parameter.profile
+
+ @property
+ def validator(self):
+ return self._parameter.validator
+
+ @validator.setter
+ def validator(self, value):
+ self._parameter.validator = value
+
+ @property
+ def version(self):
+ return self._parameter.get_version()
+
+ def set_parameter(self, parameter):
+ self._parameter = parameter
+
+ def next_id(self, value=None):
+ return AutoID.next_id(value)
+
+ def get_id(self):
+ """ Get the object `id` string, if the object does not have an `id`,
+ a new `id` will be created.
+
+ :returns: `string`
+ """
+ if 'id' not in self.attribs:
+ self.attribs['id'] = self.next_id()
+ return self.attribs['id']
+
+ def get_iri(self):
+ """
+ Get the `IRI` reference string of the object. (i.e., ``'#id'``).
+
+ :returns: `string`
+ """
+ return "#%s" % self.get_id()
+
+ def get_funciri(self):
+ """
+ Get the `FuncIRI` reference string of the object. (i.e. ``'url(#id)'``).
+
+ :returns: `string`
+ """
+ return "url(%s)" % self.get_iri()
+
+ def __getitem__(self, key):
+ """ Get SVG attribute by `key`.
+
+ :param string key: SVG attribute name
+ :return: SVG attribute value
+
+ """
+ return self.attribs[key]
+
+ def __setitem__(self, key, value):
+ """ Set SVG attribute by `key` to `value`.
+
+ :param string key: SVG attribute name
+ :param object value: SVG attribute value
+
+ """
+ # Attribute checking is only done by using the __setitem__() method or
+ # by self['attribute'] = value
+ if self.debug:
+ self.validator.check_svg_attribute_value(self.elementname, key, value)
+ self.attribs[key] = value
+
+ def add(self, element):
+ """ Add an SVG element as subelement.
+
+ :param element: append this SVG element
+ :returns: the added element
+
+ """
+ if self.debug:
+ self.validator.check_valid_children(self.elementname, element.elementname)
+ self.elements.append(element)
+ return element
+
+ def tostring(self):
+ """ Get the XML representation as unicode `string`.
+
+ :return: unicode XML string of this object and all its subelements
+
+ """
+ xml = self.get_xml()
+ # required for Python 2 support
+ xml_utf8_str = etree.tostring(xml, encoding='utf-8')
+ return xml_utf8_str.decode('utf-8')
+ # just Python 3: return etree.tostring(xml, encoding='unicode')
+
+ def _repr_svg_(self):
+ """ Show SVG in IPython, Jupyter Notebook, and Jupyter Lab
+
+ :return: unicode XML string of this object and all its subelements
+
+ """
+ return self.tostring()
+
+ def get_xml(self):
+ """ Get the XML representation as `ElementTree` object.
+
+ :return: XML `ElementTree` of this object and all its subelements
+
+ """
+ xml = etree.Element(self.elementname)
+ if self.debug:
+ self.validator.check_all_svg_attribute_values(self.elementname, self.attribs)
+ for attribute, value in sorted(self.attribs.items()):
+ # filter 'None' values
+ if value is not None:
+ value = self.value_to_string(value)
+ if value: # just add not empty attributes
+ xml.set(attribute, value)
+
+ for element in self.elements:
+ xml.append(element.get_xml())
+ return xml
+
+ def value_to_string(self, value):
+ """
+ Converts *value* into a includes a value check, depending
+ on :attr:`self.debug` and :attr:`self.profile`.
+
+ """
+ if isinstance(value, (int, float)):
+ if self.debug:
+ self.validator.check_svg_type(value, 'number')
+ if isinstance(value, float) and self.profile == 'tiny':
+ value = round(value, 4)
+ return str(value)
+
+ def set_desc(self, title=None, desc=None):
+ """ Insert a **title** and/or a **desc** element as first subelement.
+ """
+ if desc is not None:
+ self.elements.insert(0, Desc(desc))
+ if title is not None:
+ self.elements.insert(0, Title(title))
+
+ def set_metadata(self, xmldata):
+ """
+ :param xmldata: an xml.etree.ElementTree - Element() object.
+ """
+ metadata = Metadata(xmldata)
+ if len(self.elements) == 0:
+ self.elements.append(metadata)
+ else:
+ pos = 0
+ while self.elements[pos].elementname in ('title', 'desc'):
+ pos += 1
+ if pos == len(self.elements):
+ self.elements.append(metadata)
+ return
+ if self.elements[pos].elementname == 'metadata':
+ self.elements[pos].xml.append(xmldata)
+ else:
+ self.elements.insert(pos, metadata)
+
+
+class Title(object):
+ elementname = 'title'
+
+ def __init__(self, text):
+ self.xml = etree.Element(self.elementname)
+ self.xml.text = str(text)
+
+ def get_xml(self):
+ return self.xml
+
+
+class Desc(Title):
+ elementname = 'desc'
+
+
+class Metadata(Title):
+ elementname = 'metadata'
+
+ def __init__(self, xmldata):
+ """
+ :param xmldata: an xml.etree.ElementTree - Element() object.
+ """
+ self.xml = etree.Element('metadata')
+ self.xml.append(xmldata)
diff --git a/falcon/lib/python3.10/site-packages/svgwrite/container.py b/falcon/lib/python3.10/site-packages/svgwrite/container.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e215dc01ae8ea25acd64812a581ea7977c9684d
--- /dev/null
+++ b/falcon/lib/python3.10/site-packages/svgwrite/container.py
@@ -0,0 +1,287 @@
+#coding:utf-8
+# Author: mozman
+# Purpose: svg container classes
+# Created: 15.09.2010
+# Copyright (C) 2010, Manfred Moitzi
+# License: MIT License
+"""
+The **container** module provides following structural objects:
+
+* :class:`svgwrite.Group`
+* :class:`svgwrite.SVG`
+* :class:`svgwrite.Defs`
+* :class:`svgwrite.Symbol`
+* :class:`svgwrite.Marker`
+* :class:`svgwrite.Use`
+* :class:`svgwrite.Hyperlink`
+* :class:`svgwrite.Script`
+* :class:`svgwrite.Style`
+
+set/get SVG attributes::
+
+ element['attribute'] = value
+ value = element['attribute']
+
+"""
+from urllib.request import urlopen
+from svgwrite.utils import font_mimetype, base64_data, find_first_url
+from svgwrite.base import BaseElement
+from svgwrite.mixins import ViewBox, Transform, XLink
+from svgwrite.mixins import Presentation, Clipping
+from svgwrite.etree import CDATA
+
+
+class Group(BaseElement, Transform, Presentation):
+ """ The **Group** (SVG **g**) element is a container element for grouping
+ together related graphics elements.
+
+ Grouping constructs, when used in conjunction with the **desc** and **title**
+ elements, provide information about document structure and semantics.
+ Documents that are rich in structure may be rendered graphically, as speech,
+ or as braille, and thus promote accessibility.
+
+ A group of elements, as well as individual objects, can be given a name using
+ the **id** attribute. Named groups are needed for several purposes such as
+ animation and re-usable objects.
+
+ """
+ elementname = 'g'
+
+
+class Defs(Group):
+ """ The **defs** element is a container element for referenced elements. For
+ understandability and accessibility reasons, it is recommended that, whenever
+ possible, referenced elements be defined inside of a **defs**.
+ """
+ elementname = 'defs'
+
+
+class Symbol(BaseElement, ViewBox, Presentation, Clipping):
+ """ The **symbol** element is used to define graphical template objects which
+ can be instantiated by a **use** element. The use of **symbol** elements for
+ graphics that are used multiple times in the same document adds structure and
+ semantics. Documents that are rich in structure may be rendered graphically,
+ as speech, or as braille, and thus promote accessibility.
+ """
+ # ITransform interface is not valid for Symbol -> do not inherit from Group
+ elementname = 'symbol'
+
+
+class Marker(BaseElement, ViewBox, Presentation):
+ """ The **marker** element defines the graphics that is to be used for
+ drawing arrowheads or polymarkers on a given **path**, **line**, **polyline**
+ or **polygon** element.
+
+ Add Marker definitions to a **defs** section, preferred to the **defs** section
+ of the **main drawing**.
+
+ """
+ elementname = 'marker'
+
+ def __init__(self, insert=None, size=None, orient=None, **extra):
+ """
+ :param 2-tuple insert: reference point (**refX**, **refY**)
+ :param 2-tuple size: (**markerWidth**, **markerHeight**)
+ :param orient: ``'auto'`` | `angle`
+ :param extra: additional SVG attributes as keyword-arguments
+ """
+ super(Marker, self).__init__(**extra)
+ if insert is not None:
+ self['refX'] = insert[0]
+ self['refY'] = insert[1]
+ if size is not None:
+ self['markerWidth'] = size[0]
+ self['markerHeight'] = size[1]
+ if orient is not None:
+ self['orient'] = orient
+ if 'id' not in self.attribs: # an 'id' is necessary
+ self['id'] = self.next_id()
+
+
+FONT_TEMPLATE = """@font-face{{
+ font-family: "{name}";
+ src: url("{data}");
+}}
+"""
+
+
+class SVG(Symbol):
+ """ A SVG document fragment consists of any number of SVG elements contained
+ within an **svg** element.
+
+ An SVG document fragment can range from an empty fragment (i.e., no content
+ inside of the **svg** element), to a very simple SVG document fragment containing
+ a single SVG graphics element such as a **rect**, to a complex, deeply nested
+ collection of container elements and graphics elements.
+ """
+ elementname = 'svg'
+
+ def __init__(self, insert=None, size=None, **extra):
+ """
+ :param 2-tuple insert: insert position (**x**, **y**)
+ :param 2-tuple size: (**width**, **height**)
+ :param extra: additional SVG attributes as keyword-arguments
+ """
+ super(SVG, self).__init__(**extra)
+ if insert is not None:
+ self['x'] = insert[0]
+ self['y'] = insert[1]
+ if size is not None:
+ self['width'] = size[0]
+ self['height'] = size[1]
+
+ self.defs = Defs(factory=self) # defs container
+ self.add(self.defs) # add defs as first element
+
+ def embed_stylesheet(self, content):
+ """ Add