id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
/MDSuite-0.2.0-py3-none-any.whl/mdsuite/project/project.py | from __future__ import annotations
import logging
import pathlib
from datetime import datetime
from pathlib import Path
from typing import Dict, Union
from dot4dict import dotdict
import mdsuite.database.scheme as db
import mdsuite.file_io.file_read
from mdsuite.database.project_database import ProjectDatabase
from mdsuite.experiment import Experiment
from mdsuite.experiment.run import RunComputation
from mdsuite.utils import Units
from mdsuite.utils.helpers import NoneType
log = logging.getLogger(__name__)
class Project(ProjectDatabase):
"""Class for the main container of all experiments.
The Project class acts as the encompassing class for analysis with MDSuite.
It contains all method required to add and analyze new experiments. These
experiments may then be compared with one another quickly. The state of the
class is saved and updated after each operation in order to retain the
most current state of the analysis.
.. code-block:: python
project = mdsuite.Project()
project.add_experiment(
name="NaCl",
timestep=0.002,
temperature=1400.0,
units="metal",
simulation_data="NaCl_gk_i_q.lammpstraj",
active=False # calculations are only performed on active experiments
)
project.activate_experiments("NaCl") # set experiment state to active
project.run.RadialDistributionFunction(number_of_configurations=500)
project.disable_experiments("NaCl") # set experiment state to inactive
Attributes
----------
name : str
The name of the project
description : str
A short description of the project
storage_path : str
Where to store the tensor_values and databases. This may not simply
be the current directory if the databases are expected to be
quite large.
experiments : dict
A dict of class objects. Class objects are instances of the experiment class
for different experiments.
"""
def __init__(
self,
name: str = None,
storage_path: Union[str, Path] = "./",
description: str = None,
):
"""Project class constructor
The constructor will check to see if the project already exists, if so,
it will load the state of each of the classes so that they can be used
again. If the project is new, the constructor will build the necessary
file structure for the project.
Parameters
----------
name : str
The name of the project.
storage_path : str
Where to store the tensor_values and databases. This should be
a place with sufficient storage space for the full analysis.
"""
super().__init__()
if name is None:
self.name = "MDSuite_Project"
else:
self.name = name
self.storage_path = Path(storage_path).as_posix()
# Properties
self._experiments = {}
# Check for project directory, if none exist, create a new one
self.project_dir = Path(f"{self.storage_path}/{self.name}")
if self.project_dir.exists():
self.attach_file_logger()
log.info("Loading the class state")
log.info(f"Available experiments are: {self.db_experiments}")
else:
self.project_dir.mkdir(parents=True, exist_ok=True)
self.attach_file_logger()
log.info(f"Creating new project {self.name}")
self.build_database()
# Database Properties
self.description = description
def attach_file_logger(self):
"""Attach a file logger for this project"""
logger = logging.getLogger("mdsuite")
formatter = logging.Formatter(
"%(asctime)s %(levelname)s (%(module)s): %(message)s"
)
# TODO this will potentially log two mds.Projects into the same file
# maybe there are some conditional logging Handlers that can check
# project.name, but for now this should be fine.
channel = logging.FileHandler(self.project_dir / "mdsuite.log")
channel.setLevel(logging.DEBUG)
channel.setFormatter(formatter)
logger.addHandler(channel)
def __str__(self):
"""
Returns
-------
str:
A list of all available experiments like "1.) Exp01\n2.) Exp02\n3.) Exp03"
"""
return "\n".join([f"{exp.id}.) {exp.name}" for exp in self.db_experiments])
def add_experiment(
self,
name: str = NoneType,
timestep: float = None,
temperature: float = None,
units: Union[str, Units] = None,
cluster_mode: bool = None,
active: bool = True,
simulation_data: Union[
str, pathlib.Path, mdsuite.file_io.file_read.FileProcessor, list
] = None, # TODO make this the second argument, (name, data, ...)
) -> Experiment:
"""Add an experiment to the project
Parameters
----------
active: bool, default = True
Activate the experiment when added
cluster_mode : bool
If true, cluster mode is parsed to the experiment class.
name : str
Name to use for the experiment.
timestep : float
Timestep used during the simulation.
temperature : float
Temperature the simulation was performed at and is to be used
in calculation.
units : str
units used
simulation_data:
data that should be added to the experiment.
see mdsuite.experiment.add_data() for details of the file specification.
you can also create the experiment with simulation_data == None and add data
later
Notes
------
Using custom NoneType to raise a custom ValueError message with useful info.
Returns
--------
Experiment:
The experiment object that was added to the project
"""
if name is NoneType:
raise ValueError(
"Experiment name can not be empty! "
"Use None to automatically generate a unique name."
)
if name is None:
name = f"Experiment_{datetime.now().strftime('%Y%m%d-%H%M%S')}"
# set the experiment name to the current date and time if None is provided
# Run a query to see if that experiment already exists
with self.session as ses:
experiments = (
ses.query(db.Experiment).filter(db.Experiment.name == name).all()
)
if len(experiments) > 0:
log.info("This experiment already exists")
self.load_experiments(name)
return self.experiments[name]
# If the experiment does not exists, instantiate a new Experiment
new_experiment = Experiment(
project=self,
name=name,
time_step=timestep,
temperature=temperature,
units=units,
cluster_mode=cluster_mode,
)
new_experiment.active = active
# Update the internal experiment dictionary for self.experiment property
self._experiments[name] = new_experiment
if simulation_data is not None:
self.experiments[name].add_data(simulation_data)
return self.experiments[name]
def load_experiments(self, names: Union[str, list]):
"""Alias for activate_experiments"""
self.activate_experiments(names)
def activate_experiments(self, names: Union[str, list]):
"""Load experiments, such that they are used for the computations
Parameters
----------
names: Name or list of names of experiments that should be instantiated
and loaded into self.experiments.
Returns
-------
Updates the class state.
"""
if isinstance(names, str):
names = [names]
for name in names:
self.experiments[name].active = True
def disable_experiments(self, names: Union[str, list]):
"""Disable experiments
Parameters
----------
names: Name or list of names of experiments that should be instantiated
and loaded into self.experiments
Returns
-------
"""
if isinstance(names, str):
names = [names]
for name in names:
self.experiments[name].active = False
def add_data(self, data_sets: dict):
"""Add simulation_data to a experiments.
This is a method so that parallelization is
possible amongst simulation_data addition to different experiments at the same
time.
Parameters
----------
data_sets: dict
keys: the names of the experiments
values: str or mdsuite.file_io.file_read.FileProcessor
refer to mdsuite.experiment.add_data() for an explanation of the file
specification options
Returns
-------
Updates the experiment classes.
"""
for key, val in data_sets.items():
self.experiments[key].add_data(val)
@property
def run(self) -> RunComputation:
"""Method to access the available calculators
Returns
-------
RunComputation:
class that has all available calculators as properties
"""
return RunComputation(experiments=[x for x in self.active_experiments.values()])
@property
def experiments(self) -> Dict[str, Experiment]:
"""Get a DotDict of instantiated experiments!"""
with self.session as ses:
db_experiments = ses.query(db.Experiment).all()
for exp in db_experiments:
exp: db.Experiment
if exp.name not in self._experiments:
self._experiments[exp.name] = Experiment(project=self, name=exp.name)
return dotdict(self._experiments)
@property
def active_experiments(self) -> Dict[str, Experiment]:
"""Get a DotDict of instantiated experiments that are currently selected!"""
active_experiment = {
key: val for key, val in self.experiments.items() if val.active
}
return dotdict(active_experiment) | PypiClean |
/ATAX-1.1.3.tar.gz/ATAX-1.1.3/anlagenverzeichnis.py |
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from _ATAX.accounting import *
from _ATAX.accounting import _Base_, _Entry_
from _TFL.pyk import pyk
from _TFL.Regexp import *
import _TFL.CAO
import _TFL.r_eval
class _Mixin_ (TFL.Meta.Object) :
def _setup_dates (self, target_year) :
self.head_date = "1.1.%s" % (target_year, )
self.midd_date = "30.6.%s" % (target_year, )
self.tail_date = "31.12.%s" % (target_year, )
self.head_time = Date (self.head_date)
self.midd_time = Date (self.midd_date)
self.tail_time = Date (self.tail_date)
self.target_year = int (target_year)
# end def _setup_dates
# end class _Mixin_
@pyk.adapt__bool__
class _IFB_ (TFL.Meta.Object) :
"""Base class for FBiG and IFB."""
def __init__ (self, entry) :
self.entry = entry
self.alive = entry.birth_time.year + 4 > entry.target_year
self.is_new = entry.birth_time.year == entry.target_year
# end def __init__
def round (self) :
self.value = self.value.rounded_as_target ()
# end def round
def __bool__ (self) :
return self.alive and bool (self.value)
# end def __bool__
# end class _IFB_
class FBiG (_IFB_) :
"""Model a FBiG (Freibetrag für investierte Gewinne)."""
abbr = "FBiG"
account = None
name = "Freibetrag für investierte Gewinne"
def __init__ (self, entry, ifb, source_currency) :
self.__super.__init__ (entry)
self.value = source_currency (float (ifb or entry.birth_value))
# end def __init__
# end class FBiG
class IFB (_IFB_) :
"""Model a IFB (Investitionsfreibetrag)."""
abbr = "IFB"
account = 7802
name = "Investitionsfreibetrag"
def __init__ (self, entry, ifb, source_currency) :
self.__super.__init__ (entry)
self.rate = int (ifb or 0) / 100.0
self.value = entry.birth_value * self.rate
# end def __init__
# end class IFB
class Anlagen_Entry (_Mixin_, _Entry_) :
cat = "Normal"
rate_pattern = r"(?P<rate> [-+*/().0-9\s]+)"
first_rate_pat = Regexp (rate_pattern, re.X)
later_rate_pat = Regexp \
( r"(?P<year> \d\d (?: \d\d)?) \s* : \s* " + rate_pattern
, re.X
)
_cat_pat = Regexp (r"C\[(?P<cat> [^]]+)\]", re.VERBOSE)
def __init__ (self, line, anlagenverzeichnis) :
try :
( self.desc, self.supplier, self.flags
, self.birth_date, self.a_value, self.afa_spec, ifb
, self.death_date
) = split_pat.split (line, 8)
except ValueError as exc :
print (line)
raise
final = "31.12.2037"
self.p_konto = self._get_p_konto (self.flags)
self.birth_time = Date (self.birth_date)
self.death_time = Date (self.death_date or final)
self.alive = self.death_time > anlagenverzeichnis.tail_time
self.contemporary = \
( self.birth_time <= anlagenverzeichnis.tail_time
and self.death_time >= anlagenverzeichnis.head_time
)
if int (self.death_time.year) < int (anlagenverzeichnis.year) :
self._setup_dates (self.death_time.year)
else :
self._setup_dates (anlagenverzeichnis.year)
self.half_date = "1.7.%s" % (self.birth_time.year, )
if "~" in self.flags :
self.half_date = "1.1.%s" % (self.birth_time.year + 1, )
self.half_time = Date (self.half_date)
self.desc = desc_strip_pat.sub ("", self.desc)
currency_match = currency_pat.search (self.a_value)
a_value = self.a_value
source_currency = anlagenverzeichnis.source_currency
if currency_match :
source_currency = EU_Currency.Table [currency_match.group (1)]
a_value = currency_pat.sub ("", a_value)
if EUC.target_currency is not ATS :
self.zero = source_currency (0.0)
else :
self.zero = source_currency (1.0)
self.source_currency = source_currency
self.birth_value = source_currency (TFL.r_eval (a_value))
self.new_value = source_currency (0.0)
self.out_value = source_currency (0.0)
if "G" in self.flags :
self.ifb = FBiG (self, ifb, source_currency)
else :
self.ifb = IFB (self, ifb, source_currency)
self._set_cat (self.flags)
# end def __init__
@property
def active (self) :
return \
( self.contemporary
and (self.current_depreciation > 0 or self.base_rate == 0)
)
# end def active
def evaluate (self) :
self._calc_rates ()
self.current_depreciation = \
self.birth_value * (self.current_rate / 100.0)
if "=" not in self.flags :
self.head_value = max \
( self.birth_value * ((100.0 - self.past_total_rate) / 100.)
, self.zero
)
self.tail_value = self.head_value - self.current_depreciation
if self.tail_value < self.zero :
self.tail_value = self.zero
self.current_depreciation -= self.zero
else :
self.head_value = self.tail_value = self.birth_value
if self.birth_time >= self.head_time :
self.head_value = self.source_currency (0.0)
self.new_value = self.birth_value
if not self.alive :
self.out_value = self.tail_value
self.tail_value = self.source_currency (0.0)
if self.tail_value.target_currency.to_euro_factor != 1.0 :
self.birth_value = self.birth_value.rounded_as_target ()
self.head_value = self.head_value.rounded_as_target ()
self.tail_value = self.tail_value.rounded_as_target ()
self.new_value = self.new_value.rounded_as_target ()
self.out_value = self.out_value.rounded_as_target ()
self.current_depreciation = \
self.current_depreciation.rounded_as_target ()
if self.ifb :
self.ifb.round ()
# end def evaluate
def _calc_rates (self) :
rates = [x.strip () for x in self.afa_spec.split (",")]
first_rate = rates [0]
first_rate_pat = self.first_rate_pat
later_rate_pat = self.later_rate_pat
if not first_rate_pat.match (first_rate) :
raise ValueError \
("%s doesn't match a depreciation rate" % (first_rate, ))
later_rates = []
for r in rates [1:] :
if not later_rate_pat.match (r) :
raise ValueError \
("%s doesn't match a depreciation rate" % (r, ))
y = Time_Tuple (later_rate_pat.year).year
later_rates.append ((y, TFL.r_eval (later_rate_pat.rate) * 1.0))
y_rate = self.base_rate = TFL.r_eval (first_rate_pat.rate) * 1.0
if later_rates :
later_rates.append ((self.target_year, later_rates [-1] [1]))
else :
later_rates.append ((self.target_year, y_rate))
y_rates = self.y_rates = \
[y_rate * ((0.5, 1.0) [self.birth_time < self.half_time])]
if self.birth_time < self.head_time :
current_year = self.birth_time.year + 1
for target_year, next_rate in later_rates :
while current_year < target_year :
y_rates.append (y_rate)
current_year += 1
y_rate = self.base_rate = next_rate
y_rates.append \
(y_rate * ((0.5, 1.0) [self.midd_time < self.death_time]))
self.current_rate = y_rates [-1]
past_total_rate = 0
for y_rate in y_rates [:-1] :
past_total_rate += y_rate
self.past_total_rate = min (past_total_rate, 100.0)
if self.past_total_rate + self.current_rate > 100.0 :
self.current_rate = 100.0 - self.past_total_rate
# end def _calc_rates
def _set_cat (self, flags) :
pat = self._cat_pat
if pat.search (flags) :
self.cat = pat.cat
# end def _set_cat
# end class Anlagen_Entry
class Anlagenverzeichnis (_Mixin_, _Base_) :
assignment_pat = Regexp \
( r"^\$ "
r"(?P<var> account_file | source_currency)"
r"\s* = \s*"
r"(?P<value> .*)"
r"\s* ;"
, re.X
)
header_format = "%-48s %-8s %10s %10s %8s %10s %10s"
entry1_format = "%-44s%4s %8s %10.2f %10.2f %5.2f %10.2f %10.2f"
newifb_format = " %-46s %8s %10s %10s %8s %10.2f %10s"
alive_format = " %-46s %8s %10s %10s %8s"
dying_format = " %-36.31s%10s %8s %10s %10s %8s %10.2f %10s"
footer_format = "\n%-48s %8s %10.2f %10.2f %8s %10.2f %10.2f"
new_format = "%-48s %8s %10s %10.2f"
out_format = "%-48s %8s %10s %10s %8s %10.2f"
account_format = \
" %s & & & %10.2f & b & %-5s & 2100 & - & %-3s & & %-6s %s\n"
ifb_type = ""
def __init__ (self, year, start_year, file_name, source_currency, account_file = None) :
self.year = year
self.start_time = Date ("1.1.%s" % (start_year, ))
self.file_name = file_name
self.source_currency = source_currency
self.account_file = account_file
self.entries = []
self.total_birth_value = source_currency (0.0)
self.total_head_value = source_currency (0.0)
self.total_tail_value = source_currency (0.0)
self.total_new_value = source_currency (0.0)
self.total_out_value = source_currency (0.0)
self.total_ifb_value = source_currency (0.0)
self.total_depreciation = source_currency (0.0)
self.total_per_cat = defaultdict (EUR)
self._setup_dates (year)
self.add_file (file_name)
# end def __init__
def add_file (self, file_name) :
assignment_pat = self.assignment_pat
file = open (file_name, "rb")
for line in file :
line = self._decoded (line)
if ignor_pat.match (line) : continue
line = ws_head_pat.sub ("", line, count = 1)
line = ws_tail_pat.sub ("", line, count = 1)
if not line : continue
if assignment_pat.match (line) :
self.eval_line (line, assignment_pat)
else :
self.add_line (line)
file.close ()
# end def add_file
def eval_line (self, line, match) :
name = match.var
expression = match.value.replace \
("$target_year", str (self.target_year))
value = TFL.r_eval (expression)
if name == "source_currency" :
value = EUC.Table [value]
setattr (self, name, value)
# end def eval_line
def add_line (self, line) :
self.entries.append (Anlagen_Entry (line, self))
# end def add_line
def evaluate (self) :
for e in self.entries :
if (not e.contemporary) or e.birth_time < self.start_time :
e.contemporary = 0
continue
e.evaluate ()
if e.active :
self.total_birth_value += e.birth_value
self.total_head_value += e.head_value
self.total_tail_value += e.tail_value
self.total_new_value += e.new_value
self.total_out_value += e.out_value
self.total_depreciation += e.current_depreciation
self.total_per_cat [e.cat] += e.current_depreciation
if e.ifb and e.ifb.is_new :
self.ifb_type = e.ifb.__class__
self.total_ifb_value += e.ifb.value
# end def evaluate
def write (self) :
pyk.fprint \
( self.header_format
% ( "", "", "Anschaff/", "Buchwert", " Afa ", "Afa", "Buchwert")
)
pyk.fprint \
( self.header_format
% ( "Text", "Datum", "Teil-Wert", "1.1.", " % "
, "IFB/Abgang", "31.12."
)
)
pyk.fprint ("\n%s\n" % ("=" * 116, ))
for e in self.entries :
if e.active :
self._write_entry (e)
pyk.fprint ("\n%s\n" % ("=" * 116, ))
pyk.fprint \
( self.footer_format
% ( "Summe", ""
, self.total_birth_value
, self.total_head_value
, "Afa"
, self.total_depreciation
, self.total_tail_value
)
)
if len (self.total_per_cat) > 1 :
for k, v in sorted (pyk.iteritems (self.total_per_cat)) :
pyk.fprint ((self.out_format % ("", "", "", "", k, v)))
pyk.fprint \
(self.new_format % ("Neuzugänge", "", "", self.total_new_value))
pyk.fprint \
( self.out_format
% ("Abgänge", "", "", "", "", self.total_out_value)
)
if self.total_ifb_value :
pyk.fprint \
( self.out_format
% ( self.ifb_type.name, "", "", "", self.ifb_type.abbr
, self.total_ifb_value
)
)
# end def write
def _write_entry (self, e) :
ifb_indicator = ""
if e.ifb :
ifb_indicator = e.ifb.abbr
pyk.fprint \
( self.entry1_format
% ( e.desc
, ifb_indicator
, e.birth_time.formatted ("%d.%m.%y")
, e.birth_value
, e.head_value
, e.base_rate
, e.current_depreciation
, e.tail_value
)
)
if e.alive :
if e.ifb and e.ifb.is_new :
pyk.fprint \
( self.newifb_format
% ( e.supplier, "", "", "", e.ifb.abbr, e.ifb.value, "")
)
elif e.ifb :
pyk.fprint (" %-36s%10.2f" % (e.supplier, e.ifb.value))
else :
pyk.fprint \
( self.alive_format
% (e.supplier, "", "", "", ("", "ewig") ["=" in e.flags])
)
else :
pyk.fprint \
( self.dying_format
% ( e.supplier
, "Abgang"
, e.death_time.formatted ("%d.%m.%y")
, ifb_indicator
, ("", e.ifb.value.as_string_s ()) [bool (e.ifb)]
, ("", "ewig") ["=" in e.flags]
, e.out_value
, ""
)
)
# end def _write_entry
def update_accounts (self) :
if self.account_file :
file = open (self.account_file, "w")
else :
file = sys.stdout
for e in self.entries :
if e.contemporary :
self._update_account_entry (e, file)
if self.account_file :
file.close ()
# end def update_accounts
def _update_account_entry (self, e, file) :
cat = "fe"
if e.p_konto :
cat = "%sP[%s]" % (cat, e.p_konto)
eoy = Date (day_to_time_tuple ("31.12."))
if e.active and e.current_depreciation :
self._write \
( file
, self.account_format
% ( eoy.formatted ("%d.%m.")
, e.current_depreciation, 7800, cat, "Afa", e.desc
)
)
if e.ifb and e.ifb.is_new and e.ifb.account :
self._write \
( file
, self.account_format
% (eoy.formatted ("%d.%m.")
, e.ifb.value, e.ifb.account, cat, e.ifb.abbr, e.desc
)
)
if not e.alive :
self._write \
( file
, self.account_format
% ( e.death_time.formatted ("%d.%m.")
, e.out_value, 7801, cat, "Abgang", e.desc
)
)
# end def _update_account_entry
def _write (self, file, s) :
file.write (pyk.as_str (s))
# end def _write
# end class Anlagenverzeichnis
def _main (cmd) :
source_currency = cmd.source_currency
year = cmd.year
start = cmd.Start_year
file_name = cmd.anlagenverzeichnis
account_file = cmd.account_file
anlagenverzeichnis = Anlagenverzeichnis \
(year, start, file_name, source_currency, account_file)
anlagenverzeichnis.evaluate ()
anlagenverzeichnis.write ()
if cmd.update_accounts :
anlagenverzeichnis.update_accounts ()
return anlagenverzeichnis
# end def _main
_Command = TFL.CAO.Cmd \
( handler = _main
, args =
( "year:S?Year of interest"
, "anlagenverzeichnis:P?File defining depreciation data"
)
, min_args = 2
, max_args = 2
, opts =
( "-account_file:P?Name of account file to update"
, "-Start_year:S=1988?Skip all entries before `Start_year`"
, "-update_accounts:B?Add depreciation entries to account file"
, TFL.CAO.Arg.EUC_Source ()
, TFL.CAO.Arg.EUC_Target ()
, TFL.CAO.Opt.Output_Encoding (default = "utf-8")
)
, description = "Calculate depreciations for `year`"
)
"""
year=2007 ; python -m ATAX.anlagenverzeichnis $year ~/EAR/anlagen_gewerbe.dat
"""
if __name__ == "__main__":
_Command ()
### __END__ ATAX.anlagenverzeichnis | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/geo/__init__.py | from decimal import Decimal
from typing import Optional, Tuple, Union
from .. import BaseProvider
localized = True
PlaceType = Tuple[str, str, str, str, str]
class Provider(BaseProvider):
"""
land_coords data extracted from geonames.org, under the Creative Commons Attribution 3.0 License.
Coordinates are in decimal format for mapping purposes.
Country code is in Alpha 2 format (https://www.nationsonline.org/oneworld/country_code_list.htm).
Timezones are canonical (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
"""
land_coords: Tuple[PlaceType, ...] = (
("42.50729", "1.53414", "les Escaldes", "AD", "Europe/Andorra"),
("36.21544", "65.93249", "Sar-e Pul", "AF", "Asia/Kabul"),
("40.49748", "44.7662", "Hrazdan", "AM", "Asia/Yerevan"),
("-11.78333", "19.91667", "Luena", "AO", "Africa/Luanda"),
("-37.32167", "-59.13316", "Tandil", "AR", "America/Argentina/Buenos_Aires"),
(
"-34.74785",
"-58.70072",
"Pontevedra",
"AR",
"America/Argentina/Buenos_Aires",
),
("-34.64966", "-58.38341", "Barracas", "AR", "America/Argentina/Buenos_Aires"),
("-54.8", "-68.3", "Ushuaia", "AR", "America/Argentina/Ushuaia"),
("-31.25033", "-61.4867", "Rafaela", "AR", "America/Argentina/Cordoba"),
("-31.4488", "-60.93173", "Esperanza", "AR", "America/Argentina/Cordoba"),
("-34.64167", "-60.47389", "Chacabuco", "AR", "America/Argentina/Buenos_Aires"),
("-27.4338", "-65.61427", "Aguilares", "AR", "America/Argentina/Tucuman"),
("47.05", "15.46667", "Sankt Peter", "AT", "Europe/Vienna"),
("48.25", "16.4", "Floridsdorf", "AT", "Europe/Vienna"),
("-31.95224", "115.8614", "Perth", "AU", "Australia/Perth"),
("-37.9", "145.18333", "Wheelers Hill", "AU", "Australia/Melbourne"),
("-33.88096", "151.07986", "Strathfield", "AU", "Australia/Sydney"),
("-34.88422", "150.60036", "Nowra", "AU", "Australia/Sydney"),
("-25.54073", "152.70493", "Maryborough", "AU", "Australia/Brisbane"),
("-34.28853", "146.05093", "Griffith", "AU", "Australia/Sydney"),
("-33.79176", "151.08057", "Eastwood", "AU", "Australia/Sydney"),
("-37.88333", "145.06667", "Carnegie", "AU", "Australia/Melbourne"),
("-33.75881", "150.99292", "Baulkham Hills", "AU", "Australia/Sydney"),
("-27.50578", "153.10236", "Carindale", "AU", "Australia/Brisbane"),
("-32.05251", "115.88782", "Willetton", "AU", "Australia/Perth"),
("-38.16604", "145.13643", "Frankston South", "AU", "Australia/Melbourne"),
("38.45598", "48.87498", "Astara", "AZ", "Asia/Baku"),
("41.09246", "45.36561", "Qazax", "AZ", "Asia/Baku"),
("44.75874", "19.21437", "Bijeljina", "BA", "Europe/Sarajevo"),
("23.9028", "89.11943", "Kushtia", "BD", "Asia/Dhaka"),
("22.83957", "91.84128", "Manikchari", "BD", "Asia/Dhaka"),
("50.8", "3.16667", "Wevelgem", "BE", "Europe/Brussels"),
("51.12794", "4.21372", "Temse", "BE", "Europe/Brussels"),
("50.71229", "4.52529", "Rixensart", "BE", "Europe/Brussels"),
("50.74497", "3.20639", "Mouscron", "BE", "Europe/Brussels"),
("51.24197", "4.82313", "Lille", "BE", "Europe/Brussels"),
("51.03427", "5.37429", "Houthalen", "BE", "Europe/Brussels"),
("50.56149", "4.69889", "Gembloux", "BE", "Europe/Brussels"),
("50.88506", "4.07601", "Denderleeuw", "BE", "Europe/Brussels"),
("51.21187", "4.25633", "Beveren", "BE", "Europe/Brussels"),
("41.57439", "24.71204", "Smolyan", "BG", "Europe/Sofia"),
("43.4125", "23.225", "Montana", "BG", "Europe/Sofia"),
("42.7", "27.25", "Aytos", "BG", "Europe/Sofia"),
("8.88649", "2.59753", "Tchaourou", "BJ", "Africa/Porto-Novo"),
("-21.44345", "-65.71875", "Tupiza", "BO", "America/La_Paz"),
("-0.71667", "-48.52333", "Soure", "BR", "America/Belem"),
("-8.05389", "-34.88111", "Recife", "BR", "America/Recife"),
("-4.42472", "-41.45861", "Pedro II", "BR", "America/Fortaleza"),
("-3.14306", "-58.44417", "Itacoatiara", "BR", "America/Manaus"),
("-4.16694", "-40.7475", "Guaraciaba do Norte", "BR", "America/Fortaleza"),
("-8.66667", "-35.71667", "Catende", "BR", "America/Recife"),
("-8.28333", "-35.03333", "Cabo", "BR", "America/Recife"),
("-4.24444", "-42.29444", "Barras", "BR", "America/Fortaleza"),
("-3.20333", "-52.20639", "Altamira", "BR", "America/Santarem"),
("-20.87306", "-48.29694", "Viradouro", "BR", "America/Sao_Paulo"),
("-22.97056", "-46.99583", "Valinhos", "BR", "America/Sao_Paulo"),
("-10.95817", "-38.79084", "Tucano", "BR", "America/Bahia"),
("-28.81833", "-52.51028", "Soledade", "BR", "America/Sao_Paulo"),
("-23.44361", "-51.87389", "Sarandi", "BR", "America/Sao_Paulo"),
("-22.45667", "-47.53028", "Santa Gertrudes", "BR", "America/Sao_Paulo"),
("-11.48472", "-37.93278", "Rio Real", "BR", "America/Bahia"),
("-19.32556", "-41.25528", "Resplendor", "BR", "America/Sao_Paulo"),
("-26.22861", "-52.67056", "Pato Branco", "BR", "America/Sao_Paulo"),
("-25.42944", "-50.00639", "Palmeira", "BR", "America/Sao_Paulo"),
("-12.91667", "-39.25", "Muritiba", "BR", "America/Bahia"),
("-21.41222", "-42.19667", "Miracema", "BR", "America/Sao_Paulo"),
("-28.44917", "-52.2", "Marau", "BR", "America/Sao_Paulo"),
("-22.92306", "-53.13722", "Loanda", "BR", "America/Sao_Paulo"),
("-10.91722", "-37.65", "Lagarto", "BR", "America/Maceio"),
("-19.72806", "-50.19556", "Iturama", "BR", "America/Sao_Paulo"),
("-21.205", "-41.88778", "Itaperuna", "BR", "America/Sao_Paulo"),
("-20.25333", "-43.80139", "Itabirito", "BR", "America/Sao_Paulo"),
("-28.24", "-48.67028", "Imbituba", "BR", "America/Sao_Paulo"),
("-22.53722", "-42.98194", "Guapimirim", "BR", "America/Sao_Paulo"),
("-19.7625", "-44.31389", "Esmeraldas", "BR", "America/Sao_Paulo"),
("-25.42778", "-49.27306", "Curitiba", "BR", "America/Sao_Paulo"),
("-14.66463", "-52.35558", "Nova Xavantina", "BR", "America/Cuiaba"),
("-29.2975", "-51.50361", "Carlos Barbosa", "BR", "America/Sao_Paulo"),
("-15.675", "-38.94722", "Canavieiras", "BR", "America/Bahia"),
("-17.74431", "-48.62789", "Caldas Novas", "BR", "America/Sao_Paulo"),
("-23.7975", "-48.59278", "Buri", "BR", "America/Sao_Paulo"),
("-10.90889", "-37.03861", "Barra dos Coqueiros", "BR", "America/Maceio"),
("-22.57306", "-47.1725", "Artur Nogueira", "BR", "America/Sao_Paulo"),
("-10.91111", "-37.07167", "Aracaju", "BR", "America/Maceio"),
("-21.42917", "-45.94722", "Alfenas", "BR", "America/Sao_Paulo"),
("-8.76194", "-63.90389", "Porto Velho", "BR", "America/Porto_Velho"),
("-21.44236", "27.46153", "Tonota", "BW", "Africa/Gaborone"),
("55.1904", "30.2049", "Vitebsk", "BY", "Europe/Minsk"),
("53.5942", "25.8191", "Novogrudok", "BY", "Europe/Minsk"),
("52.4089", "31.3237", "Dobrush", "BY", "Europe/Minsk"),
("45.43341", "-73.86586", "Beaconsfield", "CA", "America/Toronto"),
("46.23899", "-63.13414", "Charlottetown", "CA", "America/Halifax"),
("45.4473", "-73.75335", "Dorval", "CA", "America/Toronto"),
("49.88307", "-119.48568", "Kelowna", "CA", "America/Vancouver"),
("43.86682", "-79.2663", "Markham", "CA", "America/Toronto"),
("42.8334", "-80.38297", "Norfolk County", "CA", "America/Toronto"),
("45.44868", "-73.81669", "Pointe-Claire", "CA", "America/Toronto"),
("45.40008", "-73.58248", "Sainte-Catherine", "CA", "America/Toronto"),
("53.51684", "-113.3187", "Sherwood Park", "CA", "America/Edmonton"),
("50.26729", "-119.27337", "Vernon", "CA", "America/Vancouver"),
("46.1351", "-60.1831", "Sydney", "CA", "America/Glace_Bay"),
("0.76755", "24.43973", "Yangambi", "CD", "Africa/Lubumbashi"),
("-8.73508", "24.99798", "Kamina", "CD", "Africa/Lubumbashi"),
("0.49113", "29.47306", "Beni", "CD", "Africa/Lubumbashi"),
("-4.5833", "15.16554", "Kasangulu", "CD", "Africa/Kinshasa"),
("4.94273", "15.87735", "Carnot", "CF", "Africa/Bangui"),
("-4.26613", "15.28318", "Brazzaville", "CG", "Africa/Brazzaville"),
("46.18396", "6.10237", "Onex", "CH", "Europe/Zurich"),
("47.30997", "8.52462", "Adliswil", "CH", "Europe/Zurich"),
("5.84752", "-5.682", "Lakota", "CI", "Africa/Abidjan"),
("5.27247", "-3.59625", "Bonoua", "CI", "Africa/Abidjan"),
("-33.59217", "-70.6996", "San Bernardo", "CL", "America/Santiago"),
("-30.60106", "-71.19901", "Ovalle", "CL", "America/Santiago"),
("-32.45242", "-71.23106", "La Ligua", "CL", "America/Santiago"),
("-36.9256", "-73.02841", "Chiguayante", "CL", "America/Santiago"),
("4.96667", "10.7", "Tonga", "CM", "Africa/Douala"),
("3.51667", "11.5", "Mbalmayo", "CM", "Africa/Douala"),
("4.2475", "9.00472", "Idenao", "CM", "Africa/Douala"),
("46.51872", "86.00214", "Hoxtolgay", "CN", "Asia/Urumqi"),
("36.81667", "117.81667", "Zhoucun", "CN", "Asia/Shanghai"),
("34.86472", "117.55417", "Zaozhuang", "CN", "Asia/Shanghai"),
("23.73333", "114.68333", "Heyuan", "CN", "Asia/Shanghai"),
("34.65918", "109.22921", "Yanliang", "CN", "Asia/Shanghai"),
("38.40917", "112.73333", "Xinzhou", "CN", "Asia/Shanghai"),
("33.78333", "114.51667", "Wacheng", "CN", "Asia/Shanghai"),
("27.85", "112.9", "Xiangtan", "CN", "Asia/Shanghai"),
("37.19723", "122.05228", "Tianfu", "CN", "Asia/Shanghai"),
("34.85", "117.33333", "Taozhuang", "CN", "Asia/Shanghai"),
("35.64889", "117.27583", "Sishui", "CN", "Asia/Shanghai"),
("27.34089", "117.4831", "Shaowu", "CN", "Asia/Shanghai"),
("37.30553", "120.82747", "Zhuangyuan", "CN", "Asia/Shanghai"),
("35.50056", "117.63083", "Pingyi", "CN", "Asia/Shanghai"),
("27.92333", "118.53333", "Pucheng", "CN", "Asia/Shanghai"),
("24.28859", "116.11768", "Meizhou", "CN", "Asia/Shanghai"),
("37.65181", "120.33063", "Longgang", "CN", "Asia/Shanghai"),
("23.29549", "113.82465", "Licheng", "CN", "Asia/Shanghai"),
("36.19278", "117.65694", "Laiwu", "CN", "Asia/Shanghai"),
("30.35028", "112.19028", "Jingzhou", "CN", "Asia/Shanghai"),
("32.50611", "120.14278", "Jiangyan", "CN", "Asia/Shanghai"),
("30.24706", "115.04814", "Huangshi", "CN", "Asia/Shanghai"),
("37.73222", "115.70111", "Hengshui", "CN", "Asia/Shanghai"),
("28.88162", "120.03308", "Guli", "CN", "Asia/Shanghai"),
("23.02677", "113.13148", "Foshan", "CN", "Asia/Shanghai"),
("35.85", "117.7", "Dongdu", "CN", "Asia/Shanghai"),
("32.54278", "111.50861", "Danjiangkou", "CN", "Asia/Shanghai"),
("35.20889", "111.73861", "Changzhi", "CN", "Asia/Shanghai"),
("34.56861", "105.89333", "Beidao", "CN", "Asia/Shanghai"),
("29.98869", "122.20488", "Zhoushan", "CN", "Asia/Shanghai"),
("40.66482", "122.22833", "Yingkou", "CN", "Asia/Shanghai"),
("46.08333", "122.08333", "Ulanhot", "CN", "Asia/Shanghai"),
("45.35", "126.28333", "Shuangcheng", "CN", "Asia/Shanghai"),
("41.09822", "120.74792", "Nanpiao", "CN", "Asia/Shanghai"),
("41.27194", "123.17306", "Liaoyang", "CN", "Asia/Shanghai"),
("41.94175", "123.50266", "Hushitai", "CN", "Asia/Shanghai"),
("40.85158", "122.74754", "Haicheng", "CN", "Asia/Shanghai"),
("42.64031", "125.51176", "Dongfeng", "CN", "Asia/Shanghai"),
("45.75279", "130.57211", "Boli", "CN", "Asia/Shanghai"),
("31.64615", "120.74221", "Changshu City", "CN", "Asia/Shanghai"),
("7.83389", "-72.47417", "Villa del Rosario", "CO", "America/Bogota"),
("6.46838", "-73.26022", "Socorro", "CO", "America/Bogota"),
("8.79577", "-75.69947", "San Carlos", "CO", "America/Bogota"),
("10.98778", "-74.95472", "Puerto Colombia", "CO", "America/Bogota"),
("4.73245", "-74.26419", "Madrid", "CO", "America/Bogota"),
("5.20856", "-74.73584", "Honda", "CO", "America/Bogota"),
("10.15031", "-73.9614", "El Copey", "CO", "America/Bogota"),
("3.8801", "-77.03116", "Buenaventura", "CO", "America/Bogota"),
("5.6561", "-75.87877", "Andes", "CO", "America/Bogota"),
("9.92787", "-84.13722", "San Rafael", "CR", "America/Costa_Rica"),
("10.63504", "-85.43772", "Liberia", "CR", "America/Costa_Rica"),
("23.15678", "-81.24441", "Varadero", "CU", "America/Havana"),
("20.14298", "-77.43532", "Media Luna", "CU", "America/Havana"),
("23.04419", "-82.00919", "Jaruco", "CU", "America/Havana"),
("22.98212", "-80.58556", "Corralillo", "CU", "America/Havana"),
("23.0072", "-82.4017", "Boyeros", "CU", "America/Havana"),
("50.50301", "13.63617", "Most", "CZ", "Europe/Prague"),
("50.23271", "12.87117", "Karlovy Vary", "CZ", "Europe/Prague"),
("51.04962", "12.1369", "Zeitz", "DE", "Europe/Berlin"),
("52.59319", "13.32127", "Wittenau", "DE", "Europe/Berlin"),
("50.82709", "6.9747", "Wesseling", "DE", "Europe/Berlin"),
("50.9803", "11.32903", "Weimar", "DE", "Europe/Berlin"),
("52.86147", "9.5926", "Walsrode", "DE", "Europe/Berlin"),
("51.88333", "8.51667", "Verl", "DE", "Europe/Berlin"),
("48.07667", "8.64409", "Trossingen", "DE", "Europe/Berlin"),
("48.78232", "9.17702", "Stuttgart", "DE", "Europe/Berlin"),
("53.59337", "9.47629", "Stade", "DE", "Europe/Berlin"),
("50.80019", "7.20769", "Siegburg", "DE", "Europe/Berlin"),
("51.21667", "6.26667", "Schwalmtal", "DE", "Europe/Berlin"),
("54.52156", "9.5586", "Schleswig", "DE", "Europe/Berlin"),
("50.72043", "11.34046", "Rudolstadt", "DE", "Europe/Berlin"),
("48.49144", "9.20427", "Reutlingen", "DE", "Europe/Berlin"),
("51.20219", "7.36027", "Radevormwald", "DE", "Europe/Berlin"),
("48.46458", "9.22796", "Pfullingen", "DE", "Europe/Berlin"),
("51.30001", "13.10984", "Oschatz", "DE", "Europe/Berlin"),
("51.47805", "6.8625", "Oberhausen", "DE", "Europe/Berlin"),
("50.23805", "8.86704", "Nidderau", "DE", "Europe/Berlin"),
("48.73218", "11.18709", "Neuburg an der Donau", "DE", "Europe/Berlin"),
("47.98372", "10.18527", "Memmingen", "DE", "Europe/Berlin"),
("50.80904", "8.77069", "Marburg an der Lahn", "DE", "Europe/Berlin"),
("49.5099", "6.74549", "Losheim", "DE", "Europe/Berlin"),
("48.52961", "12.16179", "Landshut", "DE", "Europe/Berlin"),
("51.19139", "6.51352", "Korschenbroich", "DE", "Europe/Berlin"),
("52.2", "8.63333", "Kirchlengern", "DE", "Europe/Berlin"),
("50.23019", "8.77155", "Karben", "DE", "Europe/Berlin"),
("50.09019", "8.4493", "Hofheim am Taunus", "DE", "Europe/Berlin"),
("52.61131", "13.31783", "Hermsdorf", "DE", "Europe/Berlin"),
("48.35149", "8.96317", "Hechingen", "DE", "Europe/Berlin"),
("53.63333", "9.85", "Halstenbek", "DE", "Europe/Berlin"),
("52.21099", "7.02238", "Gronau", "DE", "Europe/Berlin"),
("52.47774", "10.5511", "Gifhorn", "DE", "Europe/Berlin"),
("48.06919", "11.37703", "Gauting", "DE", "Europe/Berlin"),
("48.35693", "10.98461", "Friedberg", "DE", "Europe/Berlin"),
("51.168", "7.973", "Finnentrop", "DE", "Europe/Berlin"),
("49.13645", "8.91229", "Eppingen", "DE", "Europe/Berlin"),
("48.28259", "9.72749", "Ehingen", "DE", "Europe/Berlin"),
("52.4581", "13.28702", "Dahlem", "DE", "Europe/Berlin"),
("51.08468", "7.11393", "Burscheid", "DE", "Europe/Berlin"),
("49.03685", "8.70745", "Bretten", "DE", "Europe/Berlin"),
("49.68369", "8.61839", "Bensheim", "DE", "Europe/Berlin"),
("53.94313", "10.30215", "Bad Segeberg", "DE", "Europe/Berlin"),
("50.64336", "7.2278", "Bad Honnef", "DE", "Europe/Berlin"),
("49.97704", "9.15214", "Aschaffenburg", "DE", "Europe/Berlin"),
("48.21644", "9.02596", "Albstadt", "DE", "Europe/Berlin"),
("52.53048", "13.29371", "Charlottenburg-Nord", "DE", "Europe/Berlin"),
("53.6052", "10.03988", "Barmbek-Nord", "DE", "Europe/Berlin"),
("11.15583", "42.7125", "'Ali Sabieh", "DJ", "Africa/Djibouti"),
("55.67938", "12.53463", "Frederiksberg", "DK", "Europe/Copenhagen"),
(
"18.20854",
"-71.10077",
"Santa Cruz de Barahona",
"DO",
"America/Santo_Domingo",
),
("36.76639", "3.47717", "Boumerdas", "DZ", "Africa/Algiers"),
("36.72544", "3.55665", "Thenia", "DZ", "Africa/Algiers"),
("34.15429", "3.50309", "Messaad", "DZ", "Africa/Algiers"),
("35.21222", "2.31889", "Ksar Chellala", "DZ", "Africa/Algiers"),
("35.06544", "1.04945", "Frenda", "DZ", "Africa/Algiers"),
("36.06386", "4.62744", "El Achir", "DZ", "Africa/Algiers"),
("36.76775", "2.95924", "Cheraga", "DZ", "Africa/Algiers"),
("36.27462", "4.85668", "Bordj Zemoura", "DZ", "Africa/Algiers"),
("36.61954", "4.08282", "Beni Douala", "DZ", "Africa/Algiers"),
("-2.13404", "-79.59415", "Milagro", "EC", "America/Guayaquil"),
("-2.90055", "-79.00453", "Cuenca", "EC", "America/Guayaquil"),
("59.37722", "28.19028", "Narva", "EE", "Europe/Tallinn"),
("26.67319", "31.4976", "Juhaynah", "EG", "Africa/Cairo"),
("31.20176", "29.91582", "Alexandria", "EG", "Africa/Cairo"),
("39.96348", "-4.83076", "Talavera de la Reina", "ES", "Europe/Madrid"),
("37.35813", "-6.03731", "San Juan de Aznalfarache", "ES", "Europe/Madrid"),
("38.68712", "-4.10734", "Puertollano", "ES", "Europe/Madrid"),
("38.38479", "-0.76773", "Novelda", "ES", "Europe/Madrid"),
("27.76056", "-15.58602", "Maspalomas", "ES", "Atlantic/Canary"),
("38.47917", "-1.325", "Jumilla", "ES", "Europe/Madrid"),
("38.96667", "-0.18333", "Gandia", "ES", "Europe/Madrid"),
("38.10558", "-1.86343", "Caravaca", "ES", "Europe/Madrid"),
("37.49073", "-2.77259", "Baza", "ES", "Europe/Madrid"),
("42.64685", "-5.55835", "Villaquilambre", "ES", "Europe/Madrid"),
("42.06166", "-1.60452", "Tudela", "ES", "Europe/Madrid"),
("40.42386", "-3.53261", "San Fernando de Henares", "ES", "Europe/Madrid"),
("41.15612", "1.10687", "Reus", "ES", "Europe/Madrid"),
("41.91738", "3.1631", "Palafrugell", "ES", "Europe/Madrid"),
("43.32686", "-2.98884", "Leioa", "ES", "Europe/Madrid"),
("43.31667", "-2.68333", "Gernika-Lumo", "ES", "Europe/Madrid"),
("43.48961", "-8.2194", "Ferrol", "ES", "Europe/Madrid"),
("41.63976", "2.35739", "Cardedeu", "ES", "Europe/Madrid"),
("40.70995", "0.57856", "Amposta", "ES", "Europe/Madrid"),
("37.13548", "-3.67029", "Las Gabias", "ES", "Europe/Madrid"),
("42.8139", "-1.64295", "Segundo Ensanche", "ES", "Europe/Madrid"),
("41.41204", "2.18247", "el Camp de l'Arpa del Clot", "ES", "Europe/Madrid"),
("11.85", "38.01667", "Debre Tabor", "ET", "Africa/Addis_Ababa"),
("6.03333", "37.55", "Arba Minch", "ET", "Africa/Addis_Ababa"),
("65.84811", "24.14662", "Tornio", "FI", "Europe/Helsinki"),
("60.18427", "24.95034", "Kallio", "FI", "Europe/Helsinki"),
("60.2052", "24.6522", "Espoo", "FI", "Europe/Helsinki"),
("45.51667", "4.86667", "Vienne", "FR", "Europe/Paris"),
("44.92801", "4.8951", "Valence", "FR", "Europe/Paris"),
("44.80477", "-0.59543", "Talence", "FR", "Europe/Paris"),
("48.77644", "2.29026", "Sceaux", "FR", "Europe/Paris"),
("50.75", "2.25", "Saint-Omer", "FR", "Europe/Paris"),
("45.69558", "4.7934", "Saint-Genis-Laval", "FR", "Europe/Paris"),
("48.8765", "2.18967", "Rueil-Malmaison", "FR", "Europe/Paris"),
("48", "-4.1", "Quimper", "FR", "Europe/Paris"),
("43.11667", "1.6", "Pamiers", "FR", "Europe/Paris"),
("46.32313", "-0.45877", "Niort", "FR", "Europe/Paris"),
("43.61092", "3.87723", "Montpellier", "FR", "Europe/Paris"),
("48.98333", "2.61667", "Mitry-Mory", "FR", "Europe/Paris"),
("48.86667", "2.08333", "Marly-le-Roi", "FR", "Europe/Paris"),
("46.67535", "5.55575", "Lons-le-Saunier", "FR", "Europe/Paris"),
("43.32393", "5.4584", "Les Olives", "FR", "Europe/Paris"),
("48.8222", "2.12213", "Le Chesnay", "FR", "Europe/Paris"),
("48.90472", "2.2469", "La Garenne-Colombes", "FR", "Europe/Paris"),
("48.98994", "2.1699", "Herblay", "FR", "Europe/Paris"),
("48.98693", "2.44892", "Gonesse", "FR", "Europe/Paris"),
("48.79325", "2.29275", "Fontenay-aux-Roses", "FR", "Europe/Paris"),
("49.28669", "1.00288", "Elbeuf", "FR", "Europe/Paris"),
("43.71032", "-1.05366", "Dax", "FR", "Europe/Paris"),
("43.61058", "1.33467", "Colomiers", "FR", "Europe/Paris"),
("43.83125", "5.03586", "Cavaillon", "FR", "Europe/Paris"),
("45.73333", "4.91667", "Bron", "FR", "Europe/Paris"),
("48.90982", "2.45012", "Bobigny", "FR", "Europe/Paris"),
("48.77275", "5.16108", "Bar-le-Duc", "FR", "Europe/Paris"),
("43.67681", "4.63031", "Arles", "FR", "Europe/Paris"),
("41.91886", "8.73812", "Ajaccio", "FR", "Europe/Paris"),
("43.2907", "5.4384", "Marseille 11", "FR", "Europe/Paris"),
("-1.63333", "13.58357", "Franceville", "GA", "Africa/Libreville"),
("53.19146", "-2.52398", "Winsford", "GB", "Europe/London"),
("51.26", "-2.1875", "Westbury", "GB", "Europe/London"),
("51.84819", "1.26738", "Walton-on-the-Naze", "GB", "Europe/London"),
("52.41667", "0.75", "Thetford", "GB", "Europe/London"),
("51.39323", "0.47713", "Strood", "GB", "Europe/London"),
("50.79205", "-1.08593", "Southsea", "GB", "Europe/London"),
("53.78333", "-1.06667", "Selby", "GB", "Europe/London"),
("55.82885", "-4.21376", "Rutherglen", "GB", "Europe/London"),
("53.00974", "-3.05814", "Rhosllanerchrugog", "GB", "Europe/London"),
("53.83333", "-2.98333", "Poulton-le-Fylde", "GB", "Europe/London"),
("50.11861", "-5.53715", "Penzance", "GB", "Europe/London"),
("50.82882", "-0.32247", "Lancing", "GB", "Europe/London"),
("51.40148", "-1.32471", "Newbury", "GB", "Europe/London"),
("53.49389", "-1.29243", "Mexborough", "GB", "Europe/London"),
("50.75767", "-1.5443", "Lymington", "GB", "Europe/London"),
("53.69786", "-2.68758", "Leyland", "GB", "Europe/London"),
("53.7446", "-0.33525", "Kingston upon Hull", "GB", "Europe/London"),
("57.47908", "-4.22398", "Inverness", "GB", "Europe/London"),
("51.62907", "-0.74934", "High Wycombe", "GB", "Europe/London"),
("51.38673", "0.30367", "Hartley", "GB", "Europe/London"),
("52.66277", "-2.01111", "Great Wyrley", "GB", "Europe/London"),
("53.38333", "-0.76667", "Gainsborough", "GB", "Europe/London"),
("50.7236", "-3.52751", "Exeter", "GB", "Europe/London"),
("52.68333", "0.93333", "East Dereham", "GB", "Europe/London"),
("51.35084", "-1.99421", "Devizes", "GB", "Europe/London"),
("50.76306", "-1.29772", "Cowes", "GB", "Europe/London"),
("51.78967", "1.15597", "Clacton-on-Sea", "GB", "Europe/London"),
("53.46506", "-1.47217", "Chapletown", "GB", "Europe/London"),
("51.64316", "-0.36053", "Bushey", "GB", "Europe/London"),
("52.48173", "-2.12139", "Brierley Hill", "GB", "Europe/London"),
("53.81667", "-3.05", "Blackpool", "GB", "Europe/London"),
("53.0233", "-1.48119", "Belper", "GB", "Europe/London"),
("51.65", "-0.2", "Barnet", "GB", "Europe/London"),
("56.56317", "-2.58736", "Arbroath", "GB", "Europe/London"),
("57.14369", "-2.09814", "Aberdeen", "GB", "Europe/London"),
("51.39148", "-0.29825", "Surbiton", "GB", "Europe/London"),
("51.42708", "-0.91979", "Lower Earley", "GB", "Europe/London"),
("55.82737", "-4.0573", "Viewpark", "GB", "Europe/London"),
("41.82143", "41.77921", "Kobuleti", "GE", "Asia/Tbilisi"),
("5.30383", "-1.98956", "Tarkwa", "GH", "Africa/Accra"),
("7.06273", "-1.4001", "Mampong", "GH", "Africa/Accra"),
("6.46346", "-2.31938", "Bibiani", "GH", "Africa/Accra"),
("13.56667", "-15.6", "Farafenni", "GM", "Africa/Banjul"),
("9.535", "-13.68778", "Camayenne", "GN", "Africa/Conakry"),
("14.93333", "-91.11667", "Chichicastenango", "GT", "America/Guatemala"),
("22.37066", "114.10479", "Tsuen Wan", "HK", "Asia/Hong_Kong"),
("15.48131", "-86.57415", "Olanchito", "HN", "America/Tegucigalpa"),
("43.50891", "16.43915", "Split", "HR", "Europe/Zagreb"),
("18.65297", "-72.09391", "Thomazeau", "HT", "America/Port-au-Prince"),
("18.57677", "-72.22625", "Croix-des-Bouquets", "HT", "America/Port-au-Prince"),
("3.3285", "99.1625", "Tebingtinggi", "ID", "Asia/Jakarta"),
("3.7278", "98.6738", "Labuhan Deli", "ID", "Asia/Jakarta"),
("-7.51611", "109.05389", "Wangon", "ID", "Asia/Jakarta"),
("3.31332", "117.59152", "Tarakan", "ID", "Asia/Makassar"),
("-6.91806", "106.92667", "Sukabumi", "ID", "Asia/Jakarta"),
("-1.26424", "104.09701", "Simpang", "ID", "Asia/Jakarta"),
("-7.0981", "109.3243", "Randudongkal", "ID", "Asia/Jakarta"),
("0.51667", "101.44167", "Pekanbaru", "ID", "Asia/Jakarta"),
("-7.01833", "107.60389", "Pameungpeuk", "ID", "Asia/Jakarta"),
("-8.43333", "114.33333", "Muncar", "ID", "Asia/Jakarta"),
("-3.5403", "118.9707", "Majene", "ID", "Asia/Makassar"),
("-6.8048", "110.8405", "Kudus", "ID", "Asia/Jakarta"),
("-7.81667", "112.01667", "Kediri", "ID", "Asia/Jakarta"),
("-1.6", "103.61667", "Jambi City", "ID", "Asia/Jakarta"),
("-7.57897", "112.23109", "Diwek", "ID", "Asia/Jakarta"),
("-6.48167", "106.85417", "Cibinong", "ID", "Asia/Jakarta"),
("-7.73379", "113.69785", "Besuki", "ID", "Asia/Jakarta"),
("-1.26753", "116.82887", "Balikpapan", "ID", "Asia/Makassar"),
("-7.54972", "110.71639", "Ngemplak", "ID", "Asia/Jakarta"),
("53.53333", "-7.35", "An Muileann gCearr", "IE", "Europe/Dublin"),
("53.43333", "-7.95", "Athlone", "IE", "Europe/Dublin"),
("31.92923", "34.86563", "Ramla", "IL", "Asia/Jerusalem"),
("32.05971", "34.8732", "Ganei Tikva", "IL", "Asia/Jerusalem"),
("31.39547", "34.75699", "Rahat", "IL", "Asia/Jerusalem"),
("18.87813", "72.93924", "Uran", "IN", "Asia/Kolkata"),
("10.58806", "77.24779", "Udumalaippettai", "IN", "Asia/Kolkata"),
("9.82564", "78.25795", "Tiruppuvanam", "IN", "Asia/Kolkata"),
("25.49043", "85.94001", "Teghra", "IN", "Asia/Kolkata"),
("12.04161", "75.35927", "Talipparamba", "IN", "Asia/Kolkata"),
("26.11527", "86.59509", "Supaul", "IN", "Asia/Kolkata"),
("34.08565", "74.80555", "Srinagar", "IN", "Asia/Kolkata"),
("25.92493", "73.66633", "Sojat", "IN", "Asia/Kolkata"),
("14.62072", "74.83554", "Sirsi", "IN", "Asia/Kolkata"),
("25.13915", "73.06784", "Sheoganj", "IN", "Asia/Kolkata"),
("11.50526", "77.23826", "Sathyamangalam", "IN", "Asia/Kolkata"),
("21.46527", "83.97573", "Sambalpur", "IN", "Asia/Kolkata"),
("25.87498", "86.59611", "Saharsa", "IN", "Asia/Kolkata"),
("12.95629", "78.27539", "Robertsonpet", "IN", "Asia/Kolkata"),
("26.44931", "91.61356", "Rangia", "IN", "Asia/Kolkata"),
("33.37526", "74.3092", "Rajaori", "IN", "Asia/Kolkata"),
("24.81757", "84.63445", "Rafiganj", "IN", "Asia/Kolkata"),
("18.51957", "73.85535", "Pune", "IN", "Asia/Kolkata"),
("11.93381", "79.82979", "Puducherry", "IN", "Asia/Kolkata"),
("28.71271", "77.656", "Pilkhua", "IN", "Asia/Kolkata"),
("10.12268", "77.54372", "Periyakulam", "IN", "Asia/Kolkata"),
("31.28092", "74.85849", "Patti", "IN", "Asia/Kolkata"),
("20.88098", "75.11937", "Parola", "IN", "Asia/Kolkata"),
("23.07492", "88.28637", "Pandua", "IN", "Asia/Kolkata"),
("18.18158", "76.03889", "Osmanabad", "IN", "Asia/Kolkata"),
("25.6439", "77.9129", "Narwar", "IN", "Asia/Kolkata"),
("30.81383", "75.16878", "Moga", "IN", "Asia/Kolkata"),
("28.98002", "77.70636", "Meerut", "IN", "Asia/Kolkata"),
("11.12018", "76.11996", "Manjeri", "IN", "Asia/Kolkata"),
("30.21121", "74.4818", "Malaut", "IN", "Asia/Kolkata"),
("25.92127", "86.79271", "Madhipura", "IN", "Asia/Kolkata"),
("24.05979", "77.40858", "Leteri", "IN", "Asia/Kolkata"),
("21.34222", "71.30633", "Kundla", "IN", "Asia/Kolkata"),
("22.75218", "72.68533", "Kheda", "IN", "Asia/Kolkata"),
("23.1959", "86.51499", "Kenda", "IN", "Asia/Kolkata"),
("29.21399", "78.95693", "Kashipur", "IN", "Asia/Kolkata"),
("11.00599", "77.5609", "Kangayam", "IN", "Asia/Kolkata"),
("22.88783", "84.13864", "Jashpurnagar", "IN", "Asia/Kolkata"),
("26.2649", "81.54855", "Jais", "IN", "Asia/Kolkata"),
("16.06213", "76.0586", "Hungund", "IN", "Asia/Kolkata"),
("29.22254", "79.5286", "Haldwani", "IN", "Asia/Kolkata"),
("26.76628", "83.36889", "Gorakhpur", "IN", "Asia/Kolkata"),
("12.25282", "79.41727", "Gingee", "IN", "Asia/Kolkata"),
("21.53889", "71.57737", "Gariadhar", "IN", "Asia/Kolkata"),
("15.73628", "75.96976", "Gajendragarh", "IN", "Asia/Kolkata"),
("17.54907", "82.85749", "Elamanchili", "IN", "Asia/Kolkata"),
("19.21667", "73.08333", "Dombivli", "IN", "Asia/Kolkata"),
("22.19303", "88.18466", "Diamond Harbour", "IN", "Asia/Kolkata"),
("12.1277", "78.15794", "Dharmapuri", "IN", "Asia/Kolkata"),
("25.75728", "75.37991", "Deoli", "IN", "Asia/Kolkata"),
("14.46693", "75.92694", "Davangere", "IN", "Asia/Kolkata"),
("25.66795", "85.83636", "Dalsingh Sarai", "IN", "Asia/Kolkata"),
("15.5439", "73.7553", "Calangute", "IN", "Asia/Kolkata"),
("27.9247", "78.40102", "Chharra", "IN", "Asia/Kolkata"),
("32.55531", "76.12647", "Chamba", "IN", "Asia/Kolkata"),
("20.88197", "85.83334", "Bhuban", "IN", "Asia/Kolkata"),
("19.30157", "72.85107", "Bhayandar", "IN", "Asia/Kolkata"),
("15.45144", "78.14797", "Betamcherla", "IN", "Asia/Kolkata"),
("26.32293", "91.00632", "Barpeta", "IN", "Asia/Kolkata"),
("28.92694", "78.23456", "Bachhraon", "IN", "Asia/Kolkata"),
("21.59983", "71.21169", "Amreli", "IN", "Asia/Kolkata"),
("10.10649", "76.35484", "Alwaye", "IN", "Asia/Kolkata"),
("24.41288", "76.56719", "Aklera", "IN", "Asia/Kolkata"),
("23.49668", "86.68363", "Adra", "IN", "Asia/Kolkata"),
("22.4711", "88.1453", "Pujali", "IN", "Asia/Kolkata"),
("22.10194", "85.37752", "Barbil", "IN", "Asia/Kolkata"),
("17.34769", "78.55757", "Lal Bahadur Nagar", "IN", "Asia/Kolkata"),
("23.18", "88.58", "Aistala", "IN", "Asia/Kolkata"),
("9.57046", "76.32756", "Kalavoor", "IN", "Asia/Kolkata"),
("32.61603", "44.02488", "Karbala", "IQ", "Asia/Baghdad"),
("35.6803", "51.0193", "Shahre Jadide Andisheh", "IR", "Asia/Tehran"),
("36.64852", "51.49621", "Nowshahr", "IR", "Asia/Tehran"),
("33.14447", "47.3799", "Darreh Shahr", "IR", "Asia/Tehran"),
("33.86419", "48.26258", "Aleshtar", "IR", "Asia/Tehran"),
("32.65246", "51.67462", "Isfahan", "IR", "Asia/Tehran"),
("38.07789", "13.44275", "Villabate", "IT", "Europe/Rome"),
("36.92574", "14.72443", "Ragusa", "IT", "Europe/Rome"),
("37.51803", "15.00913", "Misterbianco", "IT", "Europe/Rome"),
("37.49223", "15.07041", "Catania", "IT", "Europe/Rome"),
("37.31065", "13.57661", "Agrigento", "IT", "Europe/Rome"),
("43.78956", "7.60872", "Ventimiglia", "IT", "Europe/Rome"),
("44.89784", "8.86374", "Tortona", "IT", "Europe/Rome"),
("40.87329", "14.43865", "Somma Vesuviana", "IT", "Europe/Rome"),
("40.72586", "8.55552", "Sassari", "IT", "Europe/Rome"),
("45.39402", "9.29109", "San Giuliano Milanese", "IT", "Europe/Rome"),
("42.67164", "14.01481", "Roseto degli Abruzzi", "IT", "Europe/Rome"),
("45.78071", "12.84052", "Portogruaro", "IT", "Europe/Rome"),
("43.1122", "12.38878", "Perugia", "IT", "Europe/Rome"),
("45.44694", "8.62118", "Novara", "IT", "Europe/Rome"),
("45.50369", "11.412", "Montecchio Maggiore-Alte Ceccato", "IT", "Europe/Rome"),
("40.55851", "17.80774", "Mesagne", "IT", "Europe/Rome"),
("45.79377", "8.88104", "Malnate", "IT", "Europe/Rome"),
("42.22718", "14.39024", "Lanciano", "IT", "Europe/Rome"),
("45.53069", "9.40531", "Gorgonzola", "IT", "Europe/Rome"),
("40.53123", "17.58522", "Francavilla Fontana", "IT", "Europe/Rome"),
("43.62558", "13.39954", "Falconara Marittima", "IT", "Europe/Rome"),
("45.9836", "12.70038", "Cordenons", "IT", "Europe/Rome"),
("44.31771", "9.32241", "Chiavari", "IT", "Europe/Rome"),
("44.59445", "11.04979", "Castelfranco Emilia", "IT", "Europe/Rome"),
("41.55947", "14.66737", "Campobasso", "IT", "Europe/Rome"),
("41.24264", "16.50104", "Bisceglie", "IT", "Europe/Rome"),
("41.72063", "12.6723", "Ariccia", "IT", "Europe/Rome"),
("40.92298", "14.30935", "Afragola", "IT", "Europe/Rome"),
("40.87363", "14.34085", "Volla", "IT", "Europe/Rome"),
("18.00747", "-76.78319", "New Kingston", "JM", "America/Jamaica"),
("35.8", "137.23333", "Gero", "JP", "Asia/Tokyo"),
("34.61667", "135.6", "Yao", "JP", "Asia/Tokyo"),
("34.75856", "136.13108", "Ueno-ebisumachi", "JP", "Asia/Tokyo"),
("34.81667", "137.4", "Toyokawa", "JP", "Asia/Tokyo"),
("34.4833", "136.84186", "Toba", "JP", "Asia/Tokyo"),
("36.65", "138.31667", "Suzaka", "JP", "Asia/Tokyo"),
("34.9", "137.5", "Shinshiro", "JP", "Asia/Tokyo"),
("35.06667", "135.21667", "Sasayama", "JP", "Asia/Tokyo"),
("36", "139.55722", "Okegawa", "JP", "Asia/Tokyo"),
("36.53333", "136.61667", "Nonoichi", "JP", "Asia/Tokyo"),
("36.75965", "137.36215", "Namerikawa", "JP", "Asia/Tokyo"),
("35", "136.51667", "Komono", "JP", "Asia/Tokyo"),
("33.4425", "129.96972", "Karatsu", "JP", "Asia/Tokyo"),
("35.30889", "139.55028", "Kamakura", "JP", "Asia/Tokyo"),
("34.25", "135.31667", "Iwade", "JP", "Asia/Tokyo"),
("35.82756", "137.95378", "Ina", "JP", "Asia/Tokyo"),
("33.3213", "130.94098", "Hita", "JP", "Asia/Tokyo"),
("36.24624", "139.07204", "Fujioka", "JP", "Asia/Tokyo"),
("36.33011", "138.89585", "Annaka", "JP", "Asia/Tokyo"),
("35.815", "139.6853", "Shimotoda", "JP", "Asia/Tokyo"),
("39.46667", "141.95", "Yamada", "JP", "Asia/Tokyo"),
("37.56667", "140.11667", "Inawashiro", "JP", "Asia/Tokyo"),
("43.82634", "144.09638", "Motomachi", "JP", "Asia/Tokyo"),
("44.35056", "142.45778", "Nayoro", "JP", "Asia/Tokyo"),
("41.77583", "140.73667", "Hakodate", "JP", "Asia/Tokyo"),
("35.48199", "137.02166", "Minokamo", "JP", "Asia/Tokyo"),
("0.03813", "36.36339", "Nyahururu", "KE", "Africa/Nairobi"),
("3.11988", "35.59642", "Lodwar", "KE", "Africa/Nairobi"),
("0.46005", "34.11169", "Busia", "KE", "Africa/Nairobi"),
("40.93333", "73", "Jalal-Abad", "KG", "Asia/Bishkek"),
("13.65805", "102.56365", "Paoy Paet", "KH", "Asia/Phnom_Penh"),
("36.82167", "128.63083", "Eisen", "KR", "Asia/Seoul"),
("37.1759", "128.9889", "T’aebaek", "KR", "Asia/Seoul"),
("36.20389", "127.08472", "Nonsan", "KR", "Asia/Seoul"),
("37.65639", "126.835", "Goyang-si", "KR", "Asia/Seoul"),
("36.6009", "126.665", "Hongseong", "KR", "Asia/Seoul"),
("34.8825", "128.62667", "Sinhyeon", "KR", "Asia/Seoul"),
("47.83333", "59.6", "Shalqar", "KZ", "Asia/Aqtobe"),
("47.46657", "84.87144", "Zaysan", "KZ", "Asia/Almaty"),
("44.85278", "65.50917", "Kyzylorda", "KZ", "Asia/Qyzylorda"),
("43.41949", "77.0202", "Otegen Batyra", "KZ", "Asia/Almaty"),
("6.84019", "79.87116", "Dehiwala-Mount Lavinia", "LK", "Asia/Colombo"),
("6.9909", "79.883", "Hendala", "LK", "Asia/Colombo"),
("7.57944", "-8.53778", "New Yekepa", "LR", "Africa/Monrovia"),
("55.25", "24.75", "Ukmerge", "LT", "Europe/Vilnius"),
("54.39635", "24.04142", "Alytus", "LT", "Europe/Vilnius"),
("30.75545", "20.22625", "Ajdabiya", "LY", "Africa/Tripoli"),
("24.96334", "10.18003", "Ghat", "LY", "Africa/Tripoli"),
("33.92866", "-6.90656", "Temara", "MA", "Africa/Casablanca"),
("33.42585", "-6.00137", "Oulmes", "MA", "Africa/Casablanca"),
("34.31", "-2.16", "Jerada", "MA", "Africa/Casablanca"),
("33.43443", "-5.22126", "Azrou", "MA", "Africa/Casablanca"),
("48.15659", "28.28489", "Soroca", "MD", "Europe/Chisinau"),
("42.28639", "18.84", "Budva", "ME", "Europe/Podgorica"),
("-22.9", "44.53333", "Sakaraha", "MG", "Indian/Antananarivo"),
("-21.15", "46.58333", "Ikalamavony", "MG", "Indian/Antananarivo"),
("-19.65", "47.31667", "Antanifotsy", "MG", "Indian/Antananarivo"),
("-17.83333", "48.41667", "Ambatondrazaka", "MG", "Indian/Antananarivo"),
("42", "21.32778", "Saraj", "MK", "Europe/Skopje"),
("41.92361", "20.91361", "Bogovinje", "MK", "Europe/Skopje"),
("12.74409", "-8.07257", "Kati", "ML", "Africa/Bamako"),
("14.0823", "98.19151", "Dawei", "MM", "Asia/Yangon"),
("16.68911", "98.50893", "Myawadi", "MM", "Asia/Yangon"),
("17.30858", "97.01124", "Kyaikto", "MM", "Asia/Yangon"),
("47.90771", "106.88324", "Ulan Bator", "MN", "Asia/Ulaanbaatar"),
("14.67751", "-60.94228", "Le Robert", "MQ", "America/Martinique"),
("35.89972", "14.51472", "Valletta", "MT", "Europe/Malta"),
("-13.7804", "34.4587", "Salima", "MW", "Africa/Blantyre"),
("16.75973", "-93.11308", "Tuxtla", "MX", "America/Mexico_City"),
("19.8173", "-97.35992", "Teziutlan", "MX", "America/Mexico_City"),
("21.28306", "-89.66123", "Progreso", "MX", "America/Merida"),
("17.06542", "-96.72365", "Oaxaca", "MX", "America/Mexico_City"),
("25.87972", "-97.50417", "Heroica Matamoros", "MX", "America/Matamoros"),
("19.32932", "-98.1664", "Contla", "MX", "America/Mexico_City"),
("17.94979", "-94.91386", "Acayucan", "MX", "America/Mexico_City"),
("19.32889", "-99.32556", "San Lorenzo Acopilco", "MX", "America/Mexico_City"),
("20.22816", "-103.5687", "Zacoalco de Torres", "MX", "America/Mexico_City"),
("20.74122", "-100.44843", "Santa Rosa Jauregui", "MX", "America/Mexico_City"),
("20.21322", "-100.88023", "Salvatierra", "MX", "America/Mexico_City"),
("19.64745", "-102.04897", "Paracho de Verduzco", "MX", "America/Mexico_City"),
("20.28527", "-103.42897", "Jocotepec", "MX", "America/Mexico_City"),
("21.01858", "-101.2591", "Guanajuato", "MX", "America/Mexico_City"),
("22.49396", "-105.36369", "Acaponeta", "MX", "America/Mazatlan"),
("19.04222", "-98.11889", "Casa Blanca", "MX", "America/Mexico_City"),
("1.6561", "103.6032", "Kulai", "MY", "Asia/Kuala_Lumpur"),
("5.90702", "116.10146", "Donggongon", "MY", "Asia/Kuching"),
("4.88441", "101.96857", "Gua Musang", "MY", "Asia/Kuala_Lumpur"),
("5.4709", "100.24529", "Batu Feringgi", "MY", "Asia/Kuala_Lumpur"),
("4.02219", "101.02083", "Teluk Intan", "MY", "Asia/Kuala_Lumpur"),
("1.6", "103.81667", "Ulu Tiram", "MY", "Asia/Kuala_Lumpur"),
("2.2139", "102.3278", "Kampung Ayer Molek", "MY", "Asia/Kuala_Lumpur"),
("-23.85972", "35.34722", "Maxixe", "MZ", "Africa/Maputo"),
("-21.98333", "16.91667", "Okahandja", "NA", "Africa/Windhoek"),
("13.70727", "9.15013", "Mirriah", "NE", "Africa/Niamey"),
("4.92675", "6.26764", "Yenagoa", "NG", "Africa/Lagos"),
("6.8485", "3.64633", "Shagamu", "NG", "Africa/Lagos"),
("7.6", "4.18333", "Olupona", "NG", "Africa/Lagos"),
("6.15038", "6.83042", "Nkpor", "NG", "Africa/Lagos"),
("6.45407", "3.39467", "Lagos", "NG", "Africa/Lagos"),
("9.58126", "8.2926", "Kafanchan", "NG", "Africa/Lagos"),
("7.62789", "4.74161", "Ilesa", "NG", "Africa/Lagos"),
("7.50251", "5.06258", "Igbara-Odo", "NG", "Africa/Lagos"),
("11.86064", "9.0027", "Gaya", "NG", "Africa/Lagos"),
("7.65649", "4.92235", "Efon-Alaaye", "NG", "Africa/Lagos"),
("10.61285", "12.19458", "Biu", "NG", "Africa/Lagos"),
("12.74482", "4.52514", "Argungu", "NG", "Africa/Lagos"),
("13.48082", "-86.58208", "Somoto", "NI", "America/Managua"),
("11.84962", "-86.19903", "Jinotepe", "NI", "America/Managua"),
("52.09", "5.23333", "Zeist", "NL", "Europe/Amsterdam"),
("51.65333", "5.2875", "Vught", "NL", "Europe/Amsterdam"),
("51.44889", "5.51978", "Tongelre", "NL", "Europe/Amsterdam"),
("51.95838", "4.47124", "Schiebroek", "NL", "Europe/Amsterdam"),
("52.31333", "6.92917", "Oldenzaal", "NL", "Europe/Amsterdam"),
("52.26083", "7.00417", "Losser", "NL", "Europe/Amsterdam"),
("53.16167", "6.76111", "Hoogezand", "NL", "Europe/Amsterdam"),
("52.57583", "6.61944", "Hardenberg", "NL", "Europe/Amsterdam"),
("52.71083", "5.74861", "Emmeloord", "NL", "Europe/Amsterdam"),
("51.955", "5.22778", "Culemborg", "NL", "Europe/Amsterdam"),
("52.14", "5.58472", "Barneveld", "NL", "Europe/Amsterdam"),
("68.79833", "16.54165", "Harstad", "NO", "Europe/Oslo"),
("-44.39672", "171.25364", "Timaru", "NZ", "Pacific/Auckland"),
("-38.65333", "178.00417", "Gisborne", "NZ", "Pacific/Auckland"),
("8.88988", "-79.62603", "Veracruz", "PA", "America/Panama"),
("9.15093", "-79.62098", "Chilibre", "PA", "America/Panama"),
("-3.74912", "-73.25383", "Iquitos", "PE", "America/Lima"),
("-16.25", "-69.08333", "Yunguyo", "PE", "America/Lima"),
("-15.21194", "-75.11028", "Minas de Marcona", "PE", "America/Lima"),
("-11.94306", "-76.70944", "Chosica", "PE", "America/Lima"),
("-5.85746", "144.23058", "Mount Hagen", "PG", "Pacific/Port_Moresby"),
("6.33444", "124.95278", "Tupi", "PH", "Asia/Manila"),
("10.7375", "122.9666", "Talisay", "PH", "Asia/Manila"),
("12.97389", "123.99333", "Sorsogon", "PH", "Asia/Manila"),
("9.3337", "122.8637", "Santa Catalina", "PH", "Asia/Manila"),
("12.35275", "121.06761", "San Jose", "PH", "Asia/Manila"),
("6.95194", "121.96361", "Recodo", "PH", "Asia/Manila"),
("14.66", "120.56528", "Pilar", "PH", "Asia/Manila"),
("10.20898", "123.758", "Naga", "PH", "Asia/Manila"),
("12.37169", "123.62494", "Masbate", "PH", "Asia/Manila"),
("16.0438", "120.4861", "Manaoag", "PH", "Asia/Manila"),
("10.13361", "124.84472", "Maasin", "PH", "Asia/Manila"),
("16.455", "120.5875", "La Trinidad", "PH", "Asia/Manila"),
("9.6531", "124.3697", "Jagna", "PH", "Asia/Manila"),
("14.8361", "120.97844", "Guyong", "PH", "Asia/Manila"),
("8.56697", "123.33471", "Dipolog", "PH", "Asia/Manila"),
("10.31672", "123.89071", "Cebu City", "PH", "Asia/Manila"),
("14.14989", "121.3152", "Calauan", "PH", "Asia/Manila"),
("15.72892", "120.57224", "Burgos", "PH", "Asia/Manila"),
("14.95472", "120.89694", "Baliuag", "PH", "Asia/Manila"),
("14.62578", "121.12251", "Antipolo", "PH", "Asia/Manila"),
("27.52948", "68.75915", "Khairpur Mir’s", "PK", "Asia/Karachi"),
("26.9423", "68.11759", "Tharu Shah", "PK", "Asia/Karachi"),
("31.82539", "72.54064", "Sillanwali", "PK", "Asia/Karachi"),
("31.71667", "73.38333", "Sangla Hill", "PK", "Asia/Karachi"),
("30.29184", "71.67164", "Qadirpur Ran", "PK", "Asia/Karachi"),
("31.96258", "73.97117", "Naushahra Virkan", "PK", "Asia/Karachi"),
("32.57756", "71.52847", "Mianwali", "PK", "Asia/Karachi"),
("27.55898", "68.21204", "Larkana", "PK", "Asia/Karachi"),
("30.46907", "70.96699", "Kot Addu", "PK", "Asia/Karachi"),
("30.76468", "74.12286", "Kanganpur", "PK", "Asia/Karachi"),
("25.95533", "68.88871", "Jhol", "PK", "Asia/Karachi"),
("29.69221", "72.54566", "Hasilpur", "PK", "Asia/Karachi"),
("32.17629", "75.06583", "Fazilpur", "PK", "Asia/Karachi"),
("32.87533", "71.57118", "Daud Khel", "PK", "Asia/Karachi"),
("25.80565", "68.49143", "Bhit Shah", "PK", "Asia/Karachi"),
("29.38242", "70.91106", "Alipur", "PK", "Asia/Karachi"),
("51.14942", "15.00835", "Zgorzelec", "PL", "Europe/Warsaw"),
("54.58048", "16.86194", "Ustka", "PL", "Europe/Warsaw"),
("50.5107", "18.30056", "Strzelce Opolskie", "PL", "Europe/Warsaw"),
("54.60528", "18.34717", "Reda", "PL", "Europe/Warsaw"),
("50.20528", "19.27498", "Jaworzno", "PL", "Europe/Warsaw"),
("50.86079", "17.4674", "Brzeg", "PL", "Europe/Warsaw"),
("18.42745", "-67.15407", "Aguadilla", "PR", "America/Puerto_Rico"),
("18.03496", "-66.8499", "Yauco", "PR", "America/Puerto_Rico"),
("31.78336", "35.23388", "East Jerusalem", "PS", "Asia/Hebron"),
("38.72706", "-9.24671", "Carnaxide", "PT", "Europe/Lisbon"),
("37.08819", "-8.2503", "Albufeira", "PT", "Europe/Lisbon"),
("41.20485", "-8.33147", "Paredes", "PT", "Europe/Lisbon"),
("41.1053", "-7.32097", "Custoias", "PT", "Europe/Lisbon"),
("37.74615", "-25.66689", "Ponta Delgada", "PT", "Atlantic/Azores"),
("-20.88231", "55.4504", "Saint-Denis", "RE", "Indian/Reunion"),
("44.43579", "26.01649", "Sector 6", "RO", "Europe/Bucharest"),
("44.22639", "22.53083", "Negotin", "RS", "Europe/Belgrade"),
("44.97639", "19.61222", "Sremska Mitrovica", "RS", "Europe/Belgrade"),
("53.53395", "33.72798", "Zhukovka", "RU", "Europe/Moscow"),
("46.7055", "38.2739", "Yeysk", "RU", "Europe/Moscow"),
("44.98901", "38.94324", "Yablonovskiy", "RU", "Europe/Moscow"),
("56.03361", "35.96944", "Volokolamsk", "RU", "Europe/Moscow"),
("57.97472", "33.2525", "Valday", "RU", "Europe/Moscow"),
("56.85836", "35.90057", "Tver", "RU", "Europe/Moscow"),
("55.62047", "37.49338", "Tyoply Stan", "RU", "Europe/Moscow"),
("54.90083", "38.07083", "Stupino", "RU", "Europe/Moscow"),
("55.63711", "37.38115", "Solntsevo", "RU", "Europe/Moscow"),
("59.80917", "30.38167", "Shushary", "RU", "Europe/Moscow"),
("64.5635", "39.8302", "Severodvinsk", "RU", "Europe/Moscow"),
("51.78771", "56.36091", "Saraktash", "RU", "Asia/Yekaterinburg"),
("53.95278", "32.86389", "Roslavl’", "RU", "Europe/Moscow"),
("51.40944", "46.04833", "Privolzhskiy", "RU", "Europe/Saratov"),
("61.78491", "34.34691", "Petrozavodsk", "RU", "Europe/Moscow"),
("53.37596", "51.3452", "Otradnyy", "RU", "Europe/Samara"),
("54.48147", "53.47103", "Oktyabr’skiy", "RU", "Asia/Yekaterinburg"),
("43.96222", "43.63417", "Novopavlovsk", "RU", "Europe/Moscow"),
("53.53041", "43.67663", "Nizhniy Lomov", "RU", "Europe/Moscow"),
("55.38752", "36.73307", "Naro-Fominsk", "RU", "Europe/Moscow"),
("50.06", "43.2379", "Mikhaylovka", "RU", "Europe/Volgograd"),
("55.64776", "38.02486", "Malakhovka", "RU", "Europe/Moscow"),
("55.85", "37.56667", "Likhobory", "RU", "Europe/Moscow"),
("51.4781", "57.3552", "Kuvandyk", "RU", "Asia/Yekaterinburg"),
("44.92934", "37.99117", "Krymsk", "RU", "Europe/Moscow"),
("54.03876", "43.91385", "Kovylkino", "RU", "Europe/Moscow"),
("60.02427", "30.28491", "Kolomyagi", "RU", "Europe/Moscow"),
("53.93361", "37.92792", "Kireyevsk", "RU", "Europe/Moscow"),
("54.84444", "38.16694", "Kashira", "RU", "Europe/Moscow"),
("58.7002", "59.4839", "Kachkanar", "RU", "Asia/Yekaterinburg"),
("43.35071", "46.10925", "Gudermes", "RU", "Europe/Moscow"),
("57.30185", "39.85331", "Gavrilov-Yam", "RU", "Europe/Moscow"),
("53.59782", "34.33825", "Dyat’kovo", "RU", "Europe/Moscow"),
("58.1908", "40.17171", "Danilov", "RU", "Europe/Moscow"),
("42.819", "47.1192", "Buynaksk", "RU", "Europe/Moscow"),
("53.77166", "38.12408", "Bogoroditsk", "RU", "Europe/Moscow"),
("54.39304", "53.26023", "Bavly", "RU", "Europe/Moscow"),
("55.39485", "43.83992", "Arzamas", "RU", "Europe/Moscow"),
("54.8421", "46.5813", "Alatyr’", "RU", "Europe/Moscow"),
("58.63667", "59.80222", "Lesnoy", "RU", "Asia/Yekaterinburg"),
("55.8736", "85.4265", "Yashkino", "RU", "Asia/Novokuznetsk"),
("58.04254", "65.27258", "Tavda", "RU", "Asia/Yekaterinburg"),
("55.54028", "89.20083", "Sharypovo", "RU", "Asia/Krasnoyarsk"),
("53.30972", "83.62389", "Novosilikatnyy", "RU", "Asia/Barnaul"),
("58.23583", "92.48278", "Lesosibirsk", "RU", "Asia/Krasnoyarsk"),
("56.11281", "69.49015", "Ishim", "RU", "Asia/Yekaterinburg"),
("56.9083", "60.8019", "Beryozovsky", "RU", "Asia/Yekaterinburg"),
("55.75556", "60.70278", "Ozersk", "RU", "Asia/Yekaterinburg"),
("51.82721", "107.60627", "Ulan-Ude", "RU", "Asia/Irkutsk"),
("45.47885", "133.42825", "Lesozavodsk", "RU", "Asia/Vladivostok"),
("65.93381", "111.4834", "Aykhal", "RU", "Asia/Yakutsk"),
("53.14657", "140.72287", "Nikolayevsk-on-Amure", "RU", "Asia/Vladivostok"),
("60.97944", "76.92421", "Izluchinsk", "RU", "Asia/Yekaterinburg"),
("-1.9487", "30.4347", "Rwamagana", "RW", "Africa/Kigali"),
("27.0174", "49.62251", "Al Jubayl", "SA", "Asia/Riyadh"),
("11.8659", "34.3869", "Ar Ruseris", "SD", "Africa/Khartoum"),
("61.72744", "17.10558", "Hudiksvall", "SE", "Europe/Stockholm"),
("59.33333", "18.28333", "Boo", "SE", "Europe/Stockholm"),
("48.8449", "17.22635", "Skalica", "SK", "Europe/Bratislava"),
("48.43174", "17.8031", "Hlohovec", "SK", "Europe/Bratislava"),
("8.48714", "-13.2356", "Freetown", "SL", "Africa/Freetown"),
("-0.35817", "42.54536", "Kismayo", "SO", "Africa/Mogadishu"),
("9.89206", "43.38531", "Baki", "SO", "Africa/Mogadishu"),
("13.73417", "-89.71472", "Sonzacate", "SV", "America/El_Salvador"),
("13.70167", "-89.10944", "Ilopango", "SV", "America/El_Salvador"),
("34.5624", "38.28402", "Tadmur", "SY", "Asia/Damascus"),
("35.95664", "36.7138", "Binnish", "SY", "Asia/Damascus"),
("12.18441", "18.69303", "Mongo", "TD", "Africa/Ndjamena"),
("15.46063", "99.89166", "Thap Than", "TH", "Asia/Bangkok"),
("8.43333", "99.96667", "Nakhon Si Thammarat", "TH", "Asia/Bangkok"),
("13.51825", "99.95469", "Damnoen Saduak", "TH", "Asia/Bangkok"),
("15.79408", "104.1451", "Yasothon", "TH", "Asia/Bangkok"),
("6.25947", "102.05461", "Tak Bai", "TH", "Asia/Bangkok"),
("16.0567", "103.65309", "Roi Et", "TH", "Asia/Bangkok"),
("13.44581", "101.18445", "Phanat Nikhom", "TH", "Asia/Bangkok"),
("13.8196", "100.04427", "Nakhon Pathom", "TH", "Asia/Bangkok"),
("14.64056", "104.64992", "Kantharalak", "TH", "Asia/Bangkok"),
("15.58552", "102.42587", "Bua Yai", "TH", "Asia/Bangkok"),
("14.37395", "100.48528", "Bang Ban", "TH", "Asia/Bangkok"),
("38.55632", "69.01354", "Vahdat", "TJ", "Asia/Dushanbe"),
("-8.99167", "125.21972", "Maliana", "TL", "Asia/Dili"),
("36.08497", "9.37082", "Siliana", "TN", "Africa/Tunis"),
("35.72917", "10.58082", "Msaken", "TN", "Africa/Tunis"),
("36.46917", "10.78222", "Beni Khiar", "TN", "Africa/Tunis"),
("37.16911", "10.03478", "El Alia", "TN", "Africa/Tunis"),
("38.13708", "41.00817", "Silvan", "TR", "Europe/Istanbul"),
("39.22493", "42.85693", "Patnos", "TR", "Europe/Istanbul"),
("37.31309", "40.74357", "Mardin", "TR", "Europe/Istanbul"),
("37.58105", "29.26639", "Serinhisar", "TR", "Europe/Istanbul"),
("37.05944", "37.3825", "Gaziantep", "TR", "Europe/Istanbul"),
("39.59611", "27.02444", "Edremit", "TR", "Europe/Istanbul"),
("39.12074", "27.18052", "Bergama", "TR", "Europe/Istanbul"),
("38.37255", "34.02537", "Aksaray", "TR", "Europe/Istanbul"),
("40.98894", "28.67582", "Yakuplu", "TR", "Europe/Istanbul"),
("40.1675", "34.37389", "Sungurlu", "TR", "Europe/Istanbul"),
("40.37528", "28.88222", "Mudanya", "TR", "Europe/Istanbul"),
("10.66668", "-61.51889", "Port of Spain", "TT", "America/Port_of_Spain"),
("23.5654", "119.58627", "Magong", "TW", "Asia/Taipei"),
("-2.68333", "33", "Usagara", "TZ", "Africa/Dar_es_Salaam"),
("-4.06667", "37.73333", "Same", "TZ", "Africa/Dar_es_Salaam"),
("-6.25", "38.66667", "Mvomero", "TZ", "Africa/Dar_es_Salaam"),
("-4.83", "29.65806", "Mwandiga", "TZ", "Africa/Dar_es_Salaam"),
("-6.8", "39.25", "Magomeni", "TZ", "Africa/Dar_es_Salaam"),
("-7.60361", "37.00438", "Kidodi", "TZ", "Africa/Dar_es_Salaam"),
("-7.76667", "35.7", "Iringa", "TZ", "Africa/Dar_es_Salaam"),
("-5.41667", "38.01667", "Chanika", "TZ", "Africa/Dar_es_Salaam"),
("-10.33333", "39.28333", "Nyangao", "TZ", "Africa/Dar_es_Salaam"),
("49.07866", "30.96755", "Zvenihorodka", "UA", "Europe/Kiev"),
("47.56494", "31.33078", "Voznesensk", "UA", "Europe/Kiev"),
("49.41029", "38.15035", "Svatove", "UA", "Europe/Zaporozhye"),
("50.18545", "27.06365", "Shepetivka", "UA", "Europe/Kiev"),
("47.48444", "36.25361", "Polohy", "UA", "Europe/Zaporozhye"),
("46.75451", "33.34864", "Nova Kakhovka", "UA", "Europe/Kiev"),
("50.75932", "25.34244", "Lutsk", "UA", "Europe/Kiev"),
("49.65186", "26.97253", "Krasyliv", "UA", "Europe/Kiev"),
("46.65581", "32.6178", "Kherson", "UA", "Europe/Kiev"),
("51.67822", "33.9162", "Hlukhiv", "UA", "Europe/Kiev"),
("45.99194", "29.41824", "Artsyz", "UA", "Europe/Kiev"),
("2.41669", "30.98551", "Paidha", "UG", "Africa/Kampala"),
("3.27833", "32.88667", "Kitgum", "UG", "Africa/Kampala"),
("3.02013", "30.91105", "Arua", "UG", "Africa/Kampala"),
("33.45122", "-86.99666", "Hueytown", "US", "America/Chicago"),
("33.44872", "-86.78777", "Vestavia Hills", "US", "America/Chicago"),
("35.25064", "-91.73625", "Searcy", "US", "America/Chicago"),
("26.68451", "-80.66756", "Belle Glade", "US", "America/New_York"),
("28.54944", "-81.77285", "Clermont", "US", "America/New_York"),
("28.90054", "-81.26367", "Deltona", "US", "America/New_York"),
("29.65163", "-82.32483", "Gainesville", "US", "America/New_York"),
("25.67927", "-80.31727", "Kendall", "US", "America/New_York"),
("28.15112", "-82.46148", "Lutz", "US", "America/New_York"),
("26.2173", "-80.22588", "North Lauderdale", "US", "America/New_York"),
("30.17746", "-81.38758", "Palm Valley", "US", "America/New_York"),
("26.91756", "-82.07842", "Punta Gorda Isles", "US", "America/New_York"),
("27.71809", "-82.35176", "Sun City Center", "US", "America/New_York"),
("27.09978", "-82.45426", "Venice", "US", "America/New_York"),
("34.06635", "-84.67837", "Acworth", "US", "America/New_York"),
("32.54044", "-82.90375", "Dublin", "US", "America/New_York"),
("33.08014", "-83.2321", "Milledgeville", "US", "America/New_York"),
("33.54428", "-84.23381", "Stockbridge", "US", "America/New_York"),
("38.58894", "-89.99038", "Fairview Heights", "US", "America/Chicago"),
("39.78504", "-85.76942", "Greenfield", "US", "America/Indiana/Indianapolis"),
("38.06084", "-97.92977", "Hutchinson", "US", "America/Chicago"),
("39.08367", "-84.50855", "Covington", "US", "America/New_York"),
("36.61033", "-88.31476", "Murray", "US", "America/Chicago"),
("29.84576", "-90.10674", "Estelle", "US", "America/Chicago"),
("32.52515", "-93.75018", "Shreveport", "US", "America/Chicago"),
("38.96372", "-76.99081", "Chillum", "US", "America/New_York"),
("38.70734", "-77.02303", "Fort Washington", "US", "America/New_York"),
("39.33427", "-76.43941", "Middle River", "US", "America/New_York"),
("39.32011", "-76.51552", "Rosedale", "US", "America/New_York"),
("39.32288", "-76.72803", "Woodlawn", "US", "America/New_York"),
("39.09112", "-94.41551", "Independence", "US", "America/Chicago"),
("37.95143", "-91.77127", "Rolla", "US", "America/Chicago"),
("33.41012", "-91.06177", "Greenville", "US", "America/Chicago"),
("34.25807", "-88.70464", "Tupelo", "US", "America/Chicago"),
("35.05266", "-78.87836", "Fayetteville", "US", "America/New_York"),
("34.25628", "-78.04471", "Leland", "US", "America/New_York"),
("35.88264", "-80.08199", "Thomasville", "US", "America/New_York"),
("39.71734", "-74.96933", "Sicklerville", "US", "America/New_York"),
("39.43534", "-84.20299", "Lebanon", "US", "America/New_York"),
("34.77453", "-96.67834", "Ada", "US", "America/Chicago"),
("35.74788", "-95.36969", "Muskogee", "US", "America/Chicago"),
("39.96097", "-75.60804", "West Chester", "US", "America/New_York"),
("33.98154", "-81.23621", "Lexington", "US", "America/New_York"),
("36.02506", "-86.77917", "Brentwood Estates", "US", "America/Chicago"),
("35.61452", "-88.81395", "Jackson", "US", "America/Chicago"),
("32.44874", "-99.73314", "Abilene", "US", "America/Chicago"),
("30.16688", "-96.39774", "Brenham", "US", "America/Chicago"),
("31.12406", "-97.90308", "Copperas Cove", "US", "America/Chicago"),
("29.53885", "-95.44744", "Fresno", "US", "America/Chicago"),
("30.5427", "-97.54667", "Hutto", "US", "America/Chicago"),
("32.5007", "-94.74049", "Longview", "US", "America/Chicago"),
("31.76212", "-95.63079", "Palestine", "US", "America/Chicago"),
("26.18924", "-98.15529", "San Juan", "US", "America/Chicago"),
("32.35126", "-95.30106", "Tyler", "US", "America/Chicago"),
("37.52487", "-77.55777", "Bon Air", "US", "America/New_York"),
("38.91817", "-78.19444", "Front Royal", "US", "America/New_York"),
("37.60876", "-77.37331", "Mechanicsville", "US", "America/New_York"),
("39.00622", "-77.4286", "Sterling", "US", "America/New_York"),
("39.45621", "-77.96389", "Martinsburg", "US", "America/New_York"),
("41.27621", "-72.86843", "East Haven", "US", "America/New_York"),
("41.14676", "-73.49484", "New Canaan", "US", "America/New_York"),
("41.55815", "-73.0515", "Waterbury", "US", "America/New_York"),
("41.6764", "-91.58045", "Coralville", "US", "America/Chicago"),
("41.57721", "-93.71133", "West Des Moines", "US", "America/Chicago"),
("41.15376", "-87.88754", "Bourbonnais", "US", "America/Chicago"),
("42.24113", "-88.3162", "Crystal Lake", "US", "America/Chicago"),
("41.72059", "-87.70172", "Evergreen Park", "US", "America/Chicago"),
("42.16808", "-88.42814", "Huntley", "US", "America/Chicago"),
("41.8542", "-87.66561", "Lower West Side", "US", "America/Chicago"),
("41.80753", "-87.65644", "New City", "US", "America/Chicago"),
("40.56754", "-89.64066", "Pekin", "US", "America/Chicago"),
("41.84364", "-87.71255", "South Lawndale", "US", "America/Chicago"),
("41.85059", "-87.882", "Westchester", "US", "America/Chicago"),
("41.75338", "-86.11084", "Granger", "US", "America/Indiana/Indianapolis"),
("41.47892", "-87.45476", "Schererville", "US", "America/Chicago"),
("42.35843", "-71.05977", "Boston", "US", "America/New_York"),
("42.58342", "-71.8023", "Fitchburg", "US", "America/New_York"),
("42.4251", "-71.06616", "Malden", "US", "America/New_York"),
("42.52787", "-70.92866", "Peabody", "US", "America/New_York"),
("41.9001", "-71.08977", "Taunton", "US", "America/New_York"),
("43.91452", "-69.96533", "Brunswick", "US", "America/New_York"),
("42.30865", "-83.48216", "Canton", "US", "America/Detroit"),
("46.09273", "-88.64235", "Iron River", "US", "America/Menominee"),
("42.97086", "-82.42491", "Port Huron", "US", "America/Detroit"),
("42.7392", "-84.62081", "Waverly", "US", "America/Detroit"),
("45.0408", "-93.263", "Columbia Heights", "US", "America/Chicago"),
("45.16024", "-93.08883", "Lino Lakes", "US", "America/Chicago"),
("44.73941", "-93.12577", "Rosemount", "US", "America/Chicago"),
("47.92526", "-97.03285", "Grand Forks", "US", "America/Chicago"),
("42.93369", "-72.27814", "Keene", "US", "America/New_York"),
("40.94065", "-73.99681", "Dumont", "US", "America/New_York"),
("40.72816", "-74.07764", "Jersey City", "US", "America/New_York"),
("40.82232", "-74.15987", "Nutley", "US", "America/New_York"),
("40.65538", "-74.38987", "Scotch Plains", "US", "America/New_York"),
("40.5576", "-74.28459", "Woodbridge", "US", "America/New_York"),
("40.57788", "-73.95958", "Brighton Beach", "US", "America/New_York"),
("40.67705", "-73.89125", "Cypress Hills", "US", "America/New_York"),
("40.60538", "-73.75513", "Far Rockaway", "US", "America/New_York"),
("40.72371", "-73.95097", "Greenpoint", "US", "America/New_York"),
("40.64621", "-73.97069", "Kensington", "US", "America/New_York"),
("40.68066", "-73.47429", "Massapequa", "US", "America/New_York"),
("41.50343", "-74.01042", "Newburgh", "US", "America/New_York"),
("40.63316", "-74.13653", "Port Richmond", "US", "America/New_York"),
("41.0051", "-73.78458", "Scarsdale", "US", "America/New_York"),
("43.1009", "-75.23266", "Utica", "US", "America/New_York"),
("40.93121", "-73.89875", "Yonkers", "US", "America/New_York"),
("41.55838", "-81.56929", "Collinwood", "US", "America/New_York"),
("41.48199", "-81.79819", "Lakewood", "US", "America/New_York"),
("41.24255", "-82.61573", "Norwalk", "US", "America/New_York"),
("41.66394", "-83.55521", "Toledo", "US", "America/New_York"),
("40.2737", "-76.88442", "Harrisburg", "US", "America/New_York"),
("40.24537", "-75.64963", "Pottstown", "US", "America/New_York"),
("41.54566", "-71.29144", "Middletown", "US", "America/New_York"),
("43.61062", "-72.97261", "Rutland", "US", "America/New_York"),
("44.27804", "-88.27205", "Kaukauna", "US", "America/Chicago"),
("42.55308", "-87.93341", "Pleasant Prairie", "US", "America/Chicago"),
("41.16704", "-73.20483", "Bridgeport", "US", "America/New_York"),
("33.35283", "-111.78903", "Gilbert", "US", "America/Phoenix"),
("33.50921", "-111.89903", "Scottsdale", "US", "America/Phoenix"),
("38.17492", "-122.2608", "American Canyon", "US", "America/Los_Angeles"),
("33.92946", "-116.97725", "Beaumont", "US", "America/Los_Angeles"),
("34.21639", "-119.0376", "Camarillo", "US", "America/Los_Angeles"),
("34.09668", "-117.71978", "Claremont", "US", "America/Los_Angeles"),
("38.54491", "-121.74052", "Davis", "US", "America/Los_Angeles"),
("33.03699", "-117.29198", "Encinitas", "US", "America/Los_Angeles"),
("34.14251", "-118.25508", "Glendale", "US", "America/Los_Angeles"),
("33.7207", "-116.21677", "Indio", "US", "America/Los_Angeles"),
("33.52253", "-117.70755", "Laguna Niguel", "US", "America/Los_Angeles"),
("34.63915", "-120.45794", "Lompoc", "US", "America/Los_Angeles"),
("32.9156", "-117.14392", "Mira Mesa", "US", "America/Los_Angeles"),
("33.93113", "-117.54866", "Norco", "US", "America/Los_Angeles"),
("33.72255", "-116.37697", "Palm Desert", "US", "America/Los_Angeles"),
("36.06523", "-119.01677", "Porterville", "US", "America/Los_Angeles"),
("37.73604", "-120.93549", "Riverbank", "US", "America/Los_Angeles"),
("34.09611", "-118.10583", "San Gabriel", "US", "America/Los_Angeles"),
("34.95303", "-120.43572", "Santa Maria", "US", "America/Los_Angeles"),
("33.95015", "-118.03917", "South Whittier", "US", "America/Los_Angeles"),
("33.76446", "-117.79394", "North Tustin", "US", "America/Los_Angeles"),
("36.91023", "-121.75689", "Watsonville", "US", "America/Los_Angeles"),
("39.72943", "-104.83192", "Aurora", "US", "America/Denver"),
("39.57582", "-105.11221", "Ken Caryl", "US", "America/Denver"),
("32.42067", "-104.22884", "Carlsbad", "US", "America/Denver"),
("36.20829", "-115.98391", "Pahrump", "US", "America/Los_Angeles"),
("31.84568", "-102.36764", "Odessa", "US", "America/Chicago"),
("40.58654", "-122.39168", "Redding", "US", "America/Los_Angeles"),
("43.54072", "-116.56346", "Nampa", "US", "America/Boise"),
("45.49428", "-122.86705", "Aloha", "US", "America/Los_Angeles"),
("44.99012", "-123.02621", "Keizer", "US", "America/Los_Angeles"),
("45.53929", "-122.38731", "Troutdale", "US", "America/Los_Angeles"),
("40.65995", "-111.99633", "Kearns", "US", "America/Denver"),
("40.34912", "-111.90466", "Saratoga Springs", "US", "America/Denver"),
("47.76232", "-122.2054", "Bothell", "US", "America/Los_Angeles"),
("47.38093", "-122.23484", "Kent", "US", "America/Los_Angeles"),
("47.64995", "-117.23991", "Opportunity", "US", "America/Los_Angeles"),
("46.32374", "-120.00865", "Sunnyside", "US", "America/Los_Angeles"),
("20.88953", "-156.47432", "Kahului", "US", "Pacific/Honolulu"),
("40.81", "-73.9625", "Morningside Heights", "US", "America/New_York"),
("43.16547", "-77.70066", "Gates-North Gates", "US", "America/New_York"),
("47.4943", "-122.24092", "Bryn Mawr-Skyway", "US", "America/Los_Angeles"),
("47.80527", "-122.24064", "Bothell West", "US", "America/Los_Angeles"),
("37.71715", "-122.40433", "Visitacion Valley", "US", "America/Los_Angeles"),
("-33.38056", "-56.52361", "Durazno", "UY", "America/Montevideo"),
("41.29444", "69.67639", "Parkent", "UZ", "Asia/Tashkent"),
("40.11583", "67.84222", "Jizzax", "UZ", "Asia/Samarkand"),
("40.78206", "72.34424", "Andijon", "UZ", "Asia/Tashkent"),
("9.91861", "-68.30472", "Tinaquillo", "VE", "America/Caracas"),
("10.22677", "-67.33122", "La Victoria", "VE", "America/Caracas"),
("8.35122", "-62.64102", "Ciudad Guayana", "VE", "America/Caracas"),
("8.62261", "-70.20749", "Barinas", "VE", "America/Caracas"),
("10.29085", "105.75635", "Sa Dec", "VN", "Asia/Ho_Chi_Minh"),
("-17.73648", "168.31366", "Port-Vila", "VU", "Pacific/Efate"),
("42.62833", "20.89389", "Glogovac", "XK", "Europe/Belgrade"),
("14.53767", "46.83187", "Ataq", "YE", "Asia/Aden"),
("-27.76952", "30.79165", "Vryheid", "ZA", "Africa/Johannesburg"),
("-26.93366", "29.24152", "Standerton", "ZA", "Africa/Johannesburg"),
("-24.19436", "29.00974", "Mokopane", "ZA", "Africa/Johannesburg"),
)
def coordinate(self, center: Optional[float] = None, radius: Union[float, int] = 0.001) -> Decimal:
"""
Optionally center the coord and pick a point within radius.
"""
if center is None:
return Decimal(str(self.generator.random.randint(-180000000, 180000000) / 1000000)).quantize(
Decimal(".000001"),
)
else:
center = float(center)
radius = float(radius)
geo = self.generator.random.uniform(center - radius, center + radius)
return Decimal(str(geo)).quantize(Decimal(".000001"))
def latitude(self) -> Decimal:
# Latitude has a range of -90 to 90, so divide by two.
return self.coordinate() / 2
def longitude(self) -> Decimal:
return self.coordinate()
def latlng(self) -> Tuple[Decimal, Decimal]:
return (self.latitude(), self.longitude())
def local_latlng(
self,
country_code: str = "US",
coords_only: bool = False,
) -> Optional[Tuple[str, ...]]:
"""Returns a location known to exist on land in a country specified by `country_code`.
Defaults to 'en_US'. See the `land_coords` list for available locations/countries.
"""
results = [loc for loc in self.land_coords if loc[3] == country_code]
if results:
place: PlaceType = self.random_element(results)
return (place[0], place[1]) if coords_only else place
return None
def location_on_land(self, coords_only: bool = False) -> Tuple[str, ...]:
"""Returns a random tuple specifying a coordinate set guaranteed to exist on land.
Format is `(latitude, longitude, place name, two-letter country code, timezone)`
Pass `coords_only` to return coordinates without metadata.
"""
place: PlaceType = self.random_element(self.land_coords)
return (place[0], place[1]) if coords_only else place | PypiClean |
/flask-2.3.3-py3-none-any.whl/flask/json/provider.py | from __future__ import annotations
import dataclasses
import decimal
import json
import typing as t
import uuid
import weakref
from datetime import date
from werkzeug.http import http_date
if t.TYPE_CHECKING: # pragma: no cover
from ..app import Flask
from ..wrappers import Response
class JSONProvider:
"""A standard set of JSON operations for an application. Subclasses
of this can be used to customize JSON behavior or use different
JSON libraries.
To implement a provider for a specific library, subclass this base
class and implement at least :meth:`dumps` and :meth:`loads`. All
other methods have default implementations.
To use a different provider, either subclass ``Flask`` and set
:attr:`~flask.Flask.json_provider_class` to a provider class, or set
:attr:`app.json <flask.Flask.json>` to an instance of the class.
:param app: An application instance. This will be stored as a
:class:`weakref.proxy` on the :attr:`_app` attribute.
.. versionadded:: 2.2
"""
def __init__(self, app: Flask) -> None:
self._app = weakref.proxy(app)
def dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
"""Serialize data as JSON.
:param obj: The data to serialize.
:param kwargs: May be passed to the underlying JSON library.
"""
raise NotImplementedError
def dump(self, obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None:
"""Serialize data as JSON and write to a file.
:param obj: The data to serialize.
:param fp: A file opened for writing text. Should use the UTF-8
encoding to be valid JSON.
:param kwargs: May be passed to the underlying JSON library.
"""
fp.write(self.dumps(obj, **kwargs))
def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any:
"""Deserialize data as JSON.
:param s: Text or UTF-8 bytes.
:param kwargs: May be passed to the underlying JSON library.
"""
raise NotImplementedError
def load(self, fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any:
"""Deserialize data as JSON read from a file.
:param fp: A file opened for reading text or UTF-8 bytes.
:param kwargs: May be passed to the underlying JSON library.
"""
return self.loads(fp.read(), **kwargs)
def _prepare_response_obj(
self, args: tuple[t.Any, ...], kwargs: dict[str, t.Any]
) -> t.Any:
if args and kwargs:
raise TypeError("app.json.response() takes either args or kwargs, not both")
if not args and not kwargs:
return None
if len(args) == 1:
return args[0]
return args or kwargs
def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
"""Serialize the given arguments as JSON, and return a
:class:`~flask.Response` object with the ``application/json``
mimetype.
The :func:`~flask.json.jsonify` function calls this method for
the current application.
Either positional or keyword arguments can be given, not both.
If no arguments are given, ``None`` is serialized.
:param args: A single value to serialize, or multiple values to
treat as a list to serialize.
:param kwargs: Treat as a dict to serialize.
"""
obj = self._prepare_response_obj(args, kwargs)
return self._app.response_class(self.dumps(obj), mimetype="application/json")
def _default(o: t.Any) -> t.Any:
if isinstance(o, date):
return http_date(o)
if isinstance(o, (decimal.Decimal, uuid.UUID)):
return str(o)
if dataclasses and dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
if hasattr(o, "__html__"):
return str(o.__html__())
raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable")
class DefaultJSONProvider(JSONProvider):
"""Provide JSON operations using Python's built-in :mod:`json`
library. Serializes the following additional data types:
- :class:`datetime.datetime` and :class:`datetime.date` are
serialized to :rfc:`822` strings. This is the same as the HTTP
date format.
- :class:`uuid.UUID` is serialized to a string.
- :class:`dataclasses.dataclass` is passed to
:func:`dataclasses.asdict`.
- :class:`~markupsafe.Markup` (or any object with a ``__html__``
method) will call the ``__html__`` method to get a string.
"""
default: t.Callable[[t.Any], t.Any] = staticmethod(
_default
) # type: ignore[assignment]
"""Apply this function to any object that :meth:`json.dumps` does
not know how to serialize. It should return a valid JSON type or
raise a ``TypeError``.
"""
ensure_ascii = True
"""Replace non-ASCII characters with escape sequences. This may be
more compatible with some clients, but can be disabled for better
performance and size.
"""
sort_keys = True
"""Sort the keys in any serialized dicts. This may be useful for
some caching situations, but can be disabled for better performance.
When enabled, keys must all be strings, they are not converted
before sorting.
"""
compact: bool | None = None
"""If ``True``, or ``None`` out of debug mode, the :meth:`response`
output will not add indentation, newlines, or spaces. If ``False``,
or ``None`` in debug mode, it will use a non-compact representation.
"""
mimetype = "application/json"
"""The mimetype set in :meth:`response`."""
def dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
"""Serialize data as JSON to a string.
Keyword arguments are passed to :func:`json.dumps`. Sets some
parameter defaults from the :attr:`default`,
:attr:`ensure_ascii`, and :attr:`sort_keys` attributes.
:param obj: The data to serialize.
:param kwargs: Passed to :func:`json.dumps`.
"""
kwargs.setdefault("default", self.default)
kwargs.setdefault("ensure_ascii", self.ensure_ascii)
kwargs.setdefault("sort_keys", self.sort_keys)
return json.dumps(obj, **kwargs)
def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any:
"""Deserialize data as JSON from a string or bytes.
:param s: Text or UTF-8 bytes.
:param kwargs: Passed to :func:`json.loads`.
"""
return json.loads(s, **kwargs)
def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
"""Serialize the given arguments as JSON, and return a
:class:`~flask.Response` object with it. The response mimetype
will be "application/json" and can be changed with
:attr:`mimetype`.
If :attr:`compact` is ``False`` or debug mode is enabled, the
output will be formatted to be easier to read.
Either positional or keyword arguments can be given, not both.
If no arguments are given, ``None`` is serialized.
:param args: A single value to serialize, or multiple values to
treat as a list to serialize.
:param kwargs: Treat as a dict to serialize.
"""
obj = self._prepare_response_obj(args, kwargs)
dump_args: dict[str, t.Any] = {}
if (self.compact is None and self._app.debug) or self.compact is False:
dump_args.setdefault("indent", 2)
else:
dump_args.setdefault("separators", (",", ":"))
return self._app.response_class(
f"{self.dumps(obj, **dump_args)}\n", mimetype=self.mimetype
) | PypiClean |
/DiggersToolbox-0.1.0.tar.gz/DiggersToolbox-0.1.0/README.md | # The digger toolbox [](https://travis-ci.org/ClemDoum/DiggersToolbox)
## Installation
### Prerequisite
- Install [`ffmpeg`](https://www.ffmpeg.org/download.html)
- Python >= 2.7
### Install the toolbox via pip
Find out how to install pip [here](https://pip.pypa.io/en/stable/installing/)
```
pip install DiggersToolbox
```
## Documentation
### Audio file conversion
To convert a `flac` album to `mp3` (320):
```
diggers-toolbox convert-directory path_to_flac_directory path_to_my_converted_directory flac mp3
```
| PypiClean |
/FastGets-0.3.5.tar.gz/FastGets-0.3.5/fastgets/web/static/dist/plugins/template/plugin.min.js | !(function () { var a = {}, b = function (b) { for (var c = a[b], e = c.deps, f = c.defn, g = e.length, h = new Array(g), i = 0; i < g; ++i)h[i] = d(e[i]); var j = f.apply(null, h); if (void 0 === j) throw 'module [' + b + '] returned undefined'; c.instance = j; }, c = function (b, c, d) { if (typeof b !== 'string') throw 'module id must be a string'; if (void 0 === c) throw 'no dependencies for ' + b; if (void 0 === d) throw 'no definition function for ' + b; a[b] = {deps: c, defn: d, instance: void 0}; }, d = function (c) { var d = a[c]; if (void 0 === d) throw 'module [' + c + '] was undefined'; return void 0 === d.instance && b(c), d.instance; }, e = function (a, b) { for (var c = a.length, e = new Array(c), f = 0; f < c; ++f)e[f] = d(a[f]); b.apply(null, e); }, f = {}; f.bolt = {module: {api: {define: c, require: e, demand: d}}}; var g = c, h = function (a, b) { g(a, [], function () { return b; }); }; h('5', tinymce.util.Tools.resolve), g('1', ['5'], function (a) { return a('tinymce.PluginManager'); }), h('c', Array), h('d', Error), g('6', ['c', 'd'], function (a, b) { var c = function () {}, d = function (a, b) { return function () { return a(b.apply(null, arguments)); }; }, e = function (a) { return function () { return a; }; }, f = function (a) { return a; }, g = function (a, b) { return a === b; }, h = function (b) { for (var c = new a(arguments.length - 1), d = 1; d < arguments.length; d++)c[d - 1] = arguments[d]; return function () { for (var d = new a(arguments.length), e = 0; e < d.length; e++)d[e] = arguments[e]; var f = c.concat(d); return b.apply(null, f); }; }, i = function (a) { return function () { return !a.apply(null, arguments); }; }, j = function (a) { return function () { throw new b(a); }; }, k = function (a) { return a(); }, l = function (a) { a(); }, m = e(!1), n = e(!0); return {noop: c, compose: d, constant: e, identity: f, tripleEquals: g, curry: h, not: i, die: j, apply: k, call: l, never: m, always: n}; }), g('8', ['5'], function (a) { return a('tinymce.util.Tools'); }), g('e', ['5'], function (a) { return a('tinymce.util.XHR'); }), g('f', ['5'], function (a) { return a('tinymce.dom.DOMUtils'); }), g('9', ['f'], function (a) { var b = function (a) { return a.getParam('template_cdate_classes', 'cdate'); }, c = function (a) { return a.getParam('template_mdate_classes', 'mdate'); }, d = function (a) { return a.getParam('template_selected_content_classes', 'selcontent'); }, e = function (a) { return a.getParam('template_preview_replace_values'); }, f = function (a) { return a.getParam('template_replace_values'); }, g = function (a) { return a.templates; }, h = function (a) { return a.getParam('template_cdate_format', a.getLang('template.cdate_format')); }, i = function (a) { return a.getParam('template_mdate_format', a.getLang('template.mdate_format')); }, j = function (a) { return a.getParam('template_popup_width', 600); }, k = function (b) { return Math.min(a.DOM.getViewPort().h, b.getParam('template_popup_height', 500)); }; return {getCreationDateClasses: b, getModificationDateClasses: c, getSelectedContentClasses: d, getPreviewReplaceValues: e, getTemplateReplaceValues: f, getTemplates: g, getCdateFormat: h, getMdateFormat: i, getDialogWidth: j, getDialogHeight: k}; }), g('a', [], function () { var a = function (a, b) { if (a = '' + a, a.length < b) for (var c = 0; c < b - a.length; c++)a = '0' + a; return a; }, b = function (b, c, d) { var e = 'Sun Mon Tue Wed Thu Fri Sat Sun'.split(' '), f = 'Sunday Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.split(' '), g = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split(' '), h = 'January February March April May June July August September October November December'.split(' '); return d = d || new Date(), c = c.replace('%D', '%m/%d/%Y'), c = c.replace('%r', '%I:%M:%S %p'), c = c.replace('%Y', '' + d.getFullYear()), c = c.replace('%y', '' + d.getYear()), c = c.replace('%m', a(d.getMonth() + 1, 2)), c = c.replace('%d', a(d.getDate(), 2)), c = c.replace('%H', '' + a(d.getHours(), 2)), c = c.replace('%M', '' + a(d.getMinutes(), 2)), c = c.replace('%S', '' + a(d.getSeconds(), 2)), c = c.replace('%I', '' + ((d.getHours() + 11) % 12 + 1)), c = c.replace('%p', '' + (d.getHours() < 12 ? 'AM' : 'PM')), c = c.replace('%B', '' + b.translate(h[d.getMonth()])), c = c.replace('%b', '' + b.translate(g[d.getMonth()])), c = c.replace('%A', '' + b.translate(f[d.getDay()])), c = c.replace('%a', '' + b.translate(e[d.getDay()])), c = c.replace('%%', '%'); }; return {getDateTime: b}; }), g('7', ['8', 'e', '9', 'a'], function (a, b, c, d) { var e = function (a, d) { return function () { var e = c.getTemplates(a); return typeof e === 'function' ? void e(d) : void (typeof e === 'string' ? b.send({url: e, success: function (a) { d(JSON.parse(a)); }}) : d(e)); }; }, f = function (b, c, d) { return a.each(d, function (a, b) { typeof a === 'function' && (a = a(b)), c = c.replace(new RegExp('\\{\\$' + b + '\\}', 'g'), a); }), c; }, g = function (b, d) { var e = b.dom, f = c.getTemplateReplaceValues(b); a.each(e.select('*', d), function (b) { a.each(f, function (a, c) { e.hasClass(b, c) && typeof f[c] === 'function' && f[c](b); }); }); }, h = function (a, b) { return new RegExp('\\b' + b + '\\b', 'g').test(a.className); }, i = function (b, e, i) { var j, k, l = b.dom, m = b.selection.getContent(); i = f(b, i, c.getTemplateReplaceValues(b)), j = l.create('div', null, i), k = l.select('.mceTmpl', j), k && k.length > 0 && (j = l.create('div', null), j.appendChild(k[0].cloneNode(!0))), a.each(l.select('*', j), function (a) { h(a, c.getCreationDateClasses(b).replace(/\s+/g, '|')) && (a.innerHTML = d.getDateTime(b, c.getCdateFormat(b))), h(a, c.getModificationDateClasses(b).replace(/\s+/g, '|')) && (a.innerHTML = d.getDateTime(b, c.getMdateFormat(b))), h(a, c.getSelectedContentClasses(b).replace(/\s+/g, '|')) && (a.innerHTML = m); }), g(b, j), b.execCommand('mceInsertContent', !1, j.innerHTML), b.addVisual(); }; return {createTemplateList: e, replaceTemplateValues: f, replaceVals: g, insertTemplate: i}; }), g('2', ['6', '7'], function (a, b) { var c = function (c) { c.addCommand('mceInsertTemplate', a.curry(b.insertTemplate, c)); }; return {register: c}; }), g('3', ['8', '9', 'a', '7'], function (a, b, c, d) { var e = function (e) { e.on('PreProcess', function (f) { var g = e.dom, h = b.getMdateFormat(e); a.each(g.select('div', f.node), function (b) { g.hasClass(b, 'mceTmpl') && (a.each(g.select('*', b), function (a) { g.hasClass(a, e.getParam('template_mdate_classes', 'mdate').replace(/\s+/g, '|')) && (a.innerHTML = c.getDateTime(e, h)); }), d.replaceVals(e, b)); }); }); }; return {setup: e}; }), g('b', ['f', '8', 'e', '9', '7'], function (a, b, c, d, e) { var f = function (a, c, f) { if (f.indexOf('<html>') === -1) { var g = ''; b.each(a.contentCSS, function (b) { g += '<link type="text/css" rel="stylesheet" href="' + a.documentBaseURI.toAbsolute(b) + '">'; }); var h = a.settings.body_class || ''; h.indexOf('=') !== -1 && (h = a.getParam('body_class', '', 'hash'), h = h[a.id] || ''), f = '<!DOCTYPE html><html><head>' + g + '</head><body class="' + h + '">' + f + '</body></html>'; }f = e.replaceTemplateValues(a, f, d.getPreviewReplaceValues(a)); var i = c.find('iframe')[0].getEl().contentWindow.document; i.open(), i.write(f), i.close(); }, g = function (a, g) { var h, i, j = []; if (!g || g.length === 0) { var k = a.translate('No templates defined.'); return void a.notificationManager.open({text: k, type: 'info'}); }b.each(g, function (a) { j.push({selected: !j.length, text: a.title, value: {url: a.url, content: a.content, description: a.description}}); }); var l = function (b) { var d = b.control.value(); d.url ? c.send({url: d.url, success: function (b) { i = b, f(a, h, i); }}) : (i = d.content, f(a, h, i)), h.find('#description')[0].text(b.control.value().description); }; h = a.windowManager.open({title: 'Insert template', layout: 'flex', direction: 'column', align: 'stretch', padding: 15, spacing: 10, items: [{type: 'form', flex: 0, padding: 0, items: [{type: 'container', label: 'Templates', items: {type: 'listbox', label: 'Templates', name: 'template', values: j, onselect: l}}]}, {type: 'label', name: 'description', label: 'Description', text: '\xa0'}, {type: 'iframe', flex: 1, border: 1}], onsubmit: function () { e.insertTemplate(a, !1, i); }, minWidth: d.getDialogWidth(a), minHeight: d.getDialogHeight(a)}), h.find('listbox')[0].fire('select'); }; return {open: g}; }), g('4', ['7', 'b'], function (a, b) { var c = function (a) { return function (c) { b.open(a, c); }; }, d = function (b) { b.addButton('template', {title: 'Insert template', onclick: a.createTemplateList(b.settings, c(b))}), b.addMenuItem('template', {text: 'Template', onclick: a.createTemplateList(b.settings, c(b)), icon: 'template', context: 'insert'}); }; return {register: d}; }), g('0', ['1', '2', '3', '4'], function (a, b, c, d) { return a.add('template', function (a) { d.register(a), b.register(a), c.setup(a); }), function () {}; }), d('0')(); }()); | PypiClean |
/CT3-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl/Cheetah/CacheRegion.py | try:
from hashlib import md5
except ImportError:
from md5 import md5
import time
from . import CacheStore
class CacheItem(object):
'''
A CacheItem is a container storing:
- cacheID (string)
- refreshTime (timestamp or None) : last time the cache was refreshed
- data (string) : the content of the cache
'''
def __init__(self, cacheItemID, cacheStore):
self._cacheItemID = cacheItemID
self._cacheStore = cacheStore
self._refreshTime = None
self._expiryTime = 0
def hasExpired(self):
return (self._expiryTime and time.time() > self._expiryTime)
def setExpiryTime(self, time):
self._expiryTime = time
def getExpiryTime(self):
return self._expiryTime
def setData(self, data):
self._refreshTime = time.time()
self._cacheStore.set(self._cacheItemID, data, self._expiryTime)
def getRefreshTime(self):
return self._refreshTime
def getData(self):
assert self._refreshTime
return self._cacheStore.get(self._cacheItemID)
def renderOutput(self):
"""Can be overridden to implement edge-caching"""
return self.getData() or ""
def clear(self):
self._cacheStore.delete(self._cacheItemID)
self._refreshTime = None
class _CacheDataStoreWrapper(object):
def __init__(self, dataStore, keyPrefix):
self._dataStore = dataStore
self._keyPrefix = keyPrefix
def get(self, key):
return self._dataStore.get(self._keyPrefix + key)
def delete(self, key):
self._dataStore.delete(self._keyPrefix + key)
def set(self, key, val, time=0):
self._dataStore.set(self._keyPrefix + key, val, time=time)
class CacheRegion(object):
'''
A `CacheRegion` stores some `CacheItem` instances.
This implementation stores the data in the memory of the current process.
If you need a more advanced data store, create a cacheStore class that
works with Cheetah's CacheStore protocol and provide it as the cacheStore
argument to __init__. For example you could use
Cheetah.CacheStore.MemcachedCacheStore, a wrapper around the Python
memcached API (http://www.danga.com/memcached).
'''
_cacheItemClass = CacheItem
def __init__(self, regionID, templateCacheIdPrefix='', cacheStore=None):
self._isNew = True
self._regionID = regionID
self._templateCacheIdPrefix = templateCacheIdPrefix
if not cacheStore:
cacheStore = CacheStore.MemoryCacheStore()
self._cacheStore = cacheStore
self._wrappedCacheDataStore = _CacheDataStoreWrapper(
cacheStore, keyPrefix=templateCacheIdPrefix + ':' + regionID + ':')
self._cacheItems = {}
def isNew(self):
return self._isNew
def clear(self):
" drop all the caches stored in this cache region "
for cacheItemId in list(self._cacheItems.keys()):
cacheItem = self._cacheItems[cacheItemId]
cacheItem.clear()
del self._cacheItems[cacheItemId]
def getCacheItem(self, cacheItemID):
""" Lazy access to a cacheItem
Try to find a cache in the stored caches. If it doesn't
exist, it's created.
Returns a `CacheItem` instance.
"""
cacheItemID = md5(str(cacheItemID).encode('ascii')).hexdigest()
if cacheItemID not in self._cacheItems:
cacheItem = self._cacheItemClass(
cacheItemID=cacheItemID,
cacheStore=self._wrappedCacheDataStore)
self._cacheItems[cacheItemID] = cacheItem
self._isNew = False
return self._cacheItems[cacheItemID] | PypiClean |
/OctoBot-Evaluators-1.9.1.tar.gz/OctoBot-Evaluators-1.9.1/octobot_evaluators/matrix/matrix_manager.py | import time
import octobot_commons.constants as common_constants
import octobot_commons.enums as common_enums
import octobot_commons.evaluators_util as evaluators_util
import octobot_commons.logging as logging
import octobot_evaluators.enums as enums
import octobot_evaluators.errors as errors
import octobot_evaluators.matrix as matrix
def get_matrix(matrix_id):
"""
Get the matrix from its id
:param matrix_id: the matrix id
:return: the matrix instance
"""
return matrix.Matrices.instance().get_matrix(matrix_id)
def set_tentacle_value(matrix_id, tentacle_path, tentacle_type, tentacle_value, timestamp=0):
"""
Set the node value at tentacle path
:param matrix_id: the matrix id
:param tentacle_path: the tentacle path
:param tentacle_type: the tentacle type
:param tentacle_value: the tentacle value
:param timestamp: the value modification timestamp.
"""
get_matrix(matrix_id).set_node_value(value=tentacle_value, value_type=tentacle_type,
value_path=tentacle_path, timestamp=timestamp)
def get_tentacle_node(matrix_id, tentacle_path):
"""
Return the node at tentacle path
:param matrix_id: the matrix id
:param tentacle_path: the tentacle path
:return: the tentacle node
"""
return get_matrix(matrix_id).get_node_at_path(node_path=tentacle_path)
def delete_tentacle_node(matrix_id, tentacle_path):
"""
Delete the node at tentacle path
:param matrix_id: the matrix id
:param tentacle_path: the tentacle path
:return: the deleted node
"""
return get_matrix(matrix_id).delete_node_at_path(node_path=tentacle_path)
def get_tentacle_value(matrix_id, tentacle_path):
"""
Get the value of the node at tentacle path
:param matrix_id: the matrix id
:param tentacle_path: the tentacle path
:return: the tentacle value
"""
tentacle_node = get_tentacle_node(matrix_id, tentacle_path)
if tentacle_node:
return tentacle_node.node_value
return None
def get_tentacle_eval_time(matrix_id, tentacle_path):
"""
Get the evaluation time of the node at tentacle path
:param matrix_id: the matrix id
:param tentacle_path: the tentacle path
:return: the tentacle evaluation time
"""
tentacle_node = get_tentacle_node(matrix_id, tentacle_path)
if tentacle_node:
return tentacle_node.node_value_time
return None
def get_matrix_default_value_path(tentacle_name,
tentacle_type,
exchange_name=None,
cryptocurrency=None,
symbol=None,
time_frame=None):
"""
Create matrix value path with default path
:param tentacle_name:
:param tentacle_type:
:param exchange_name:
:param cryptocurrency:
:param symbol:
:param time_frame:
:return: the default matrix
"""
return get_tentacle_path(exchange_name=exchange_name,
tentacle_type=tentacle_type,
tentacle_name=tentacle_name) + get_tentacle_value_path(
cryptocurrency=cryptocurrency,
symbol=symbol,
time_frame=time_frame)
def get_tentacle_nodes(matrix_id, exchange_name=None, tentacle_type=None, tentacle_name=None):
"""
Returns the list of nodes related to the exchange_name, tentacle_type and tentacle_name, ignored if None
:param matrix_id: the matrix id
:param exchange_name: the exchange name to search for in the matrix
:param tentacle_type: the tentacle type to search for in the matrix
:param tentacle_name: the tentacle name to search for in the matrix
:return: nodes linked to the given params
"""
return get_matrix(matrix_id).get_node_children_at_path(get_tentacle_path(exchange_name=exchange_name,
tentacle_type=tentacle_type,
tentacle_name=tentacle_name))
def get_node_children_by_names_at_path(matrix_id, node_path, starting_node=None) -> dict:
"""
:param matrix_id: the matrix id
:param node_path: the node's path to inspect
:param starting_node: the node to start the path from, default is the matrix root
:return: a dict of the children nodes of the given path identified by their name
"""
return get_matrix(matrix_id).get_node_children_by_names_at_path(node_path, starting_node=starting_node)
def get_tentacles_value_nodes(matrix_id, tentacle_nodes, cryptocurrency=None, symbol=None, time_frame=None):
"""
Returns the list of nodes related to the symbol and / or time_frame from the given tentacle_nodes list
:param matrix_id: the matrix id
:param tentacle_nodes: the exchange name to search for in the matrix
:param cryptocurrency: the cryptocurrency to search for in the given node list
:param symbol: the symbol to search for in the given node list
:param time_frame: the time frame to search for in the given nodes list
:return: nodes linked to the given params
"""
return [node_at_path for node_at_path in [
get_matrix(matrix_id).get_node_at_path(get_tentacle_value_path(cryptocurrency=cryptocurrency,
symbol=symbol,
time_frame=time_frame),
starting_node=n)
for n in tentacle_nodes]
if node_at_path is not None]
def get_latest_eval_time(matrix_id, exchange_name=None, tentacle_type=None, cryptocurrency=None,
symbol=None, time_frame=None):
eval_times = []
for value_node in matrix.get_tentacles_value_nodes(
matrix_id,
get_tentacle_nodes(matrix_id,
exchange_name=exchange_name,
tentacle_type=tentacle_type),
cryptocurrency=cryptocurrency,
symbol=symbol,
time_frame=time_frame):
if isinstance(value_node.node_value_time, (float, int)):
eval_times.append(value_node.node_value_time)
return max(eval_times) if eval_times else None
def get_tentacle_path(exchange_name=None, tentacle_type=None, tentacle_name=None) -> list:
"""
Returns the path related to the tentacle name, type and exchange name
:param tentacle_type: the tentacle type to add in the path, ignored if None
:param tentacle_name: the tentacle name to add in the path, ignored if None
:param exchange_name: the exchange name to add in the path (as the first element), ignored if None
:return: a list of string that represents the path of the given params
"""
node_path = []
if exchange_name is not None:
node_path.append(exchange_name)
if tentacle_type is not None:
node_path.append(tentacle_type)
if tentacle_name is not None:
node_path.append(tentacle_name)
return node_path
def get_tentacle_value_path(cryptocurrency=None, symbol=None, time_frame=None) -> list:
"""
Returns the path related to symbol and / or time_frame values
:param cryptocurrency: the cryptocurrency to add in the path, ignored if None
:param symbol: the symbol to add in the path, ignored if None
:param time_frame: the time frame to add in the path, ignored if None
:return: a list of string that represents the path of the given params
"""
node_path: list = []
if cryptocurrency is not None:
node_path.append(cryptocurrency)
if symbol is not None:
node_path.append(symbol)
if time_frame is not None:
node_path.append(time_frame)
return node_path
def get_evaluations_by_evaluator(matrix_id,
exchange_name=None,
tentacle_type=None,
cryptocurrency=None,
symbol=None,
time_frame=None,
allow_missing=True,
allowed_values=None) -> dict:
"""
Return a dict of evaluation nodes by evaluator name
:param matrix_id: the matrix id
:param exchange_name: the exchange name
:param tentacle_type: the tentacle type
:param cryptocurrency: the currency ticker
:param symbol: the traded pair
:param time_frame: the evaluation time frame
:param allow_missing: if False will raise UnsetTentacleEvaluation on missing or invalid evaluation
:param allowed_values: a white list of allowed values not to be taken as invalid
:return: the dict of evaluation nodes by evaluator name
"""
evaluator_nodes = get_node_children_by_names_at_path(matrix_id,
get_tentacle_path(exchange_name=exchange_name,
tentacle_type=tentacle_type))
evaluations_by_evaluator = {}
for evaluator_name, node in evaluator_nodes.items():
evaluation = get_tentacles_value_nodes(matrix_id, [node], cryptocurrency=cryptocurrency,
symbol=symbol, time_frame=time_frame)
if len(evaluation) > 1:
logging.get_logger("matrix_manager").warning(
"More than one evaluation corresponding to the given tentacle filter, "
"this means there is an issue in this methods given arguments")
elif evaluation:
eval_value = evaluation[0].node_value
if (allowed_values is not None and eval_value in allowed_values) or \
evaluators_util.check_valid_eval_note(eval_value):
evaluations_by_evaluator[evaluator_name] = evaluation[0]
elif not allow_missing:
raise errors.UnsetTentacleEvaluation(f"Missing {time_frame if time_frame else 'evaluation'} "
f"for {evaluator_name} on {symbol}, evaluation is "
f"{repr(eval_value)}).")
return evaluations_by_evaluator
def get_available_time_frames(matrix_id, exchange_name, tentacle_type, cryptocurrency, symbol) -> list:
"""
Return the list of available time frames for the given tentacle
:param matrix_id: the matrix id
:param exchange_name: the exchange name
:param tentacle_type: the tentacle type
:param cryptocurrency: the currency ticker
:param symbol: the traded pair
:return: the list of available time frames for the given tentacle
"""
try:
evaluator_nodes = get_node_children_by_names_at_path(matrix_id,
get_tentacle_path(exchange_name=exchange_name,
tentacle_type=tentacle_type))
first_node = next(iter(evaluator_nodes.values()))
return list(get_node_children_by_names_at_path(matrix_id,
get_tentacle_value_path(cryptocurrency=cryptocurrency,
symbol=symbol),
starting_node=first_node))
except StopIteration:
return []
def get_available_symbols(matrix_id,
exchange_name,
cryptocurrency,
tentacle_type=enums.EvaluatorMatrixTypes.TA.value,
second_tentacle_type=enums.EvaluatorMatrixTypes.REAL_TIME.value) -> list:
"""
Return the list of available symbols for the given currency
:param matrix_id: the matrix id
:param exchange_name: the exchange name
:param cryptocurrency: the cryptocurrency ticker
:param tentacle_type: the tentacle type to look into first
:param second_tentacle_type: the tentacle type to look into if no symbol is found in the first tentacle type
:return: the list of available symbols for the given currency
"""
try:
evaluator_nodes = get_node_children_by_names_at_path(matrix_id,
get_tentacle_path(exchange_name=exchange_name,
tentacle_type=tentacle_type))
first_node = next(iter(evaluator_nodes.values()))
possible_symbols = list(get_node_children_by_names_at_path(
matrix_id,
get_tentacle_value_path(cryptocurrency=cryptocurrency),
starting_node=first_node))
if possible_symbols:
return possible_symbols
elif tentacle_type != second_tentacle_type:
# try with second tentacle type
return get_available_symbols(matrix_id, exchange_name,
cryptocurrency, second_tentacle_type, second_tentacle_type)
except StopIteration:
return []
def is_tentacle_value_valid(matrix_id, tentacle_path, timestamp=0, delta=10) -> bool:
"""
Check if the node is ready to be used
WARNING: This method only works with complete default tentacle path
:param matrix_id: the matrix id
:param tentacle_path: the tentacle node path
:param timestamp: the timestamp to use
:param delta: the authorized delta to be valid (in seconds)
:return: True if the node is valid else False
"""
if timestamp == 0:
timestamp = time.time()
try:
node = get_tentacle_node(matrix_id, tentacle_path)
if node is None:
raise KeyError(f"No node at {tentacle_path}")
return timestamp - (node.node_value_time +
common_enums.TimeFramesMinutes[common_enums.TimeFrames(tentacle_path[-1])]
* common_constants.MINUTE_TO_SECONDS + delta) < 0
except (IndexError, ValueError):
return False
def is_tentacles_values_valid(matrix_id, tentacle_path_list, timestamp=0, delta=10) -> bool:
"""
Check if each of the tentacle path value is valid
:param matrix_id: the matrix id
:param tentacle_path_list: the tentacle node path list
:param timestamp: the timestamp to use
:param delta: the authorized delta to be valid (in seconds)
:return: True if all the node values are valid else False
"""
return all([is_tentacle_value_valid(matrix_id=matrix_id,
tentacle_path=tentacle_path,
timestamp=timestamp,
delta=delta)
for tentacle_path in tentacle_path_list]) | PypiClean |
/Chandler-debugPlugin-1.0.tar.gz/Chandler-debugPlugin-1.0/debug/repositoryviewer/Repository.py | import wx
import osaf.framework.blocks.ControlBlocks as ControlBlocks
from chandlerdb.item.RefCollections import RefList
class RepositoryDelegate (ControlBlocks.ListDelegate):
""" Used by the tree in the repository view
"""
def GetElementParent(self, element):
return element.itsParent
def GetElementChildren(self, element):
if element is None:
return [wx.GetApp().UIRepositoryView]
else:
return element.iterChildren()
def GetElementValues(self, element):
cellValues = [element.itsName or '(anonymous)']
name = getattr(element, 'blockName', None)
if name is None:
name = getattr (element, 'displayName', u"")
cellValues.append (name)
name = u''
kind = getattr (element, 'itsKind', None)
if kind is not None:
itsName = getattr (kind, 'itsName', None)
if itsName is not None:
name = itsName
cellValues.append (name)
cellValues.append (unicode (element.itsUUID))
cellValues.append (unicode (element.itsPath))
return cellValues
def ElementHasChildren(self, element):
if element == wx.GetApp().UIRepositoryView:
return True
else:
return element.hasChildren()
class RepositoryItemDetail(ControlBlocks.ItemDetail):
def getHTMLText(self, item):
def formatReference(reference):
"""
Formats the a reference attribute to be clickable, etcetera
"""
if reference == None:
return "(None)"
url = reference.itsPath
kind = reference.itsKind
if kind is not None:
kind = kind.itsName
else:
kind = "(kindless)"
dn = getattr(reference, 'displayName', reference.itsName) or reference.itsUUID.str64()
# Escape < and > for HTML display
kind = kind.replace(u"<", u"<").replace(u">", u">")
dn = dn.replace(u"<", u"<").replace(u">", u">")
return u"<a href=\"%(url)s\">%(kind)s: %(dn)s</a>" % locals()
try:
displayName = getattr(item, 'displayName', item.itsName) or item.itsUUID.str64()
if item.itsKind is None:
kind = "(kindless)"
else:
kind = item.itsKind.itsName
HTMLText = u"<html><body><h5>%s: %s</h5><ul>" % (kind, displayName)
HTMLText = HTMLText + u"<li><b>Path:</b> %s" % item.itsPath
HTMLText = HTMLText + u"<li><b>UUID:</b> %s" % item.itsUUID
HTMLText = HTMLText + u"</ul><h5>Attributes</h5><ul>"
# We build tuples (name, formatted) for all value-only, then
# all reference-only. Then we concatenate the two lists and sort
# the result, and append that to the HTMLText.
valueAttr = []
for k, v in item.iterAttributeValues(valuesOnly=True):
if isinstance(v, dict):
tmpList = [u"<li><b>%s:</b></li><ul>" % k]
for attr in v:
attrString = unicode(attr)
attrString = attrString.replace(u"<", u"<")
attrString = attrString.replace(u">", u">")
tmpList.append(u"<li>%s</li>" % attrString)
tmpList.append(u"</ul>")
valueAttr.append((k, "".join(tmpList)))
else:
value = unicode(v)
value = value.replace(u"<", u"<")
value = value.replace(u">", u">")
valueAttr.append((k,u"<li><b>%s: </b>%s</li>" % (k, value)))
refAttrs = []
for k, v in item.iterAttributeValues(referencesOnly=True):
if (isinstance(v, dict) or
isinstance(v, list) or
isinstance(v, RefList)):
tmpList = [u"<li><b>%s:</b></li><ul>" % k]
for attr in v:
tmpList.append(u"<li>%s</li>" % formatReference(attr))
tmpList.append(u"</ul>")
refAttrs.append((k, "".join(tmpList)))
else:
value = formatReference(v)
refAttrs.append((k, u"<li><b>%s: </b>%s</li>" % (k, value)))
allAttrs = refAttrs + valueAttr
allAttrs.sort()
dyn_html = "".join([y for x, y in allAttrs])
HTMLText = u"%s%s</ul></body></html>" % (HTMLText, dyn_html)
except:
HTMLText = u"<html><body><h5></h5></body></html>"
return HTMLText | PypiClean |
/AbSort-0.0.1.tar.gz/AbSort-0.0.1/README.txt | AbSort
=======
Absort is Powerful Python package that performs 8 different types of stable and unstable Sorting algorithms on List Data Structure with full of documentation.
This package will be more beneficial(Usefull) for competitive programmers and developers. So based on your requirement choose your sorting algorithm.
USAGE
======
importing Library
-----------------
- import AbSort
Creating the Object
sortObj = AbSort.SortingAlgo()
awsort
======
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.awsort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
SelectionSort
=============
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.selectionSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
mergeSort
=========
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.mergeSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
quickSort
=========
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.quickSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
bogoSort
========
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.bogoSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
insertionSort
=============
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.insertionSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
binaryInsertionSort
===================
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.binaryInsertionSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
bubbleSort
==========
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.bubbleSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
Dependencies
============
- Python v3.x is Required.
| PypiClean |
/DVDev-0.1.3.tar.gz/DVDev-0.1.3/dvdev/public/js/development-bundle/ui/ui.draggable.js | (function($) {
$.widget("ui.draggable", $.extend({}, $.ui.mouse, {
_init: function() {
if (this.options.helper == 'original' && !(/^(?:r|a|f)/).test(this.element.css("position")))
this.element[0].style.position = 'relative';
(this.options.addClasses && this.element.addClass("ui-draggable"));
(this.options.disabled && this.element.addClass("ui-draggable-disabled"));
this._mouseInit();
},
destroy: function() {
if(!this.element.data('draggable')) return;
this.element
.removeData("draggable")
.unbind(".draggable")
.removeClass("ui-draggable"
+ " ui-draggable-dragging"
+ " ui-draggable-disabled");
this._mouseDestroy();
},
_mouseCapture: function(event) {
var o = this.options;
if (this.helper || o.disabled || $(event.target).is('.ui-resizable-handle'))
return false;
//Quit if we're not on a valid handle
this.handle = this._getHandle(event);
if (!this.handle)
return false;
return true;
},
_mouseStart: function(event) {
var o = this.options;
//Create and append the visible helper
this.helper = this._createHelper(event);
//Cache the helper size
this._cacheHelperProportions();
//If ddmanager is used for droppables, set the global draggable
if($.ui.ddmanager)
$.ui.ddmanager.current = this;
/*
* - Position generation -
* This block generates everything position related - it's the core of draggables.
*/
//Cache the margins of the original element
this._cacheMargins();
//Store the helper's css position
this.cssPosition = this.helper.css("position");
this.scrollParent = this.helper.scrollParent();
//The element's absolute position on the page minus margins
this.offset = this.element.offset();
this.offset = {
top: this.offset.top - this.margins.top,
left: this.offset.left - this.margins.left
};
$.extend(this.offset, {
click: { //Where the click happened, relative to the element
left: event.pageX - this.offset.left,
top: event.pageY - this.offset.top
},
parent: this._getParentOffset(),
relative: this._getRelativeOffset() //This is a relative to absolute position minus the actual position calculation - only used for relative positioned helper
});
//Generate the original position
this.originalPosition = this._generatePosition(event);
this.originalPageX = event.pageX;
this.originalPageY = event.pageY;
//Adjust the mouse offset relative to the helper if 'cursorAt' is supplied
if(o.cursorAt)
this._adjustOffsetFromHelper(o.cursorAt);
//Set a containment if given in the options
if(o.containment)
this._setContainment();
//Call plugins and callbacks
this._trigger("start", event);
//Recache the helper size
this._cacheHelperProportions();
//Prepare the droppable offsets
if ($.ui.ddmanager && !o.dropBehaviour)
$.ui.ddmanager.prepareOffsets(this, event);
this.helper.addClass("ui-draggable-dragging");
this._mouseDrag(event, true); //Execute the drag once - this causes the helper not to be visible before getting its correct position
return true;
},
_mouseDrag: function(event, noPropagation) {
//Compute the helpers position
this.position = this._generatePosition(event);
this.positionAbs = this._convertPositionTo("absolute");
//Call plugins and callbacks and use the resulting position if something is returned
if (!noPropagation) {
var ui = this._uiHash();
this._trigger('drag', event, ui);
this.position = ui.position;
}
if(!this.options.axis || this.options.axis != "y") this.helper[0].style.left = this.position.left+'px';
if(!this.options.axis || this.options.axis != "x") this.helper[0].style.top = this.position.top+'px';
if($.ui.ddmanager) $.ui.ddmanager.drag(this, event);
return false;
},
_mouseStop: function(event) {
//If we are using droppables, inform the manager about the drop
var dropped = false;
if ($.ui.ddmanager && !this.options.dropBehaviour)
dropped = $.ui.ddmanager.drop(this, event);
//if a drop comes from outside (a sortable)
if(this.dropped) {
dropped = this.dropped;
this.dropped = false;
}
if((this.options.revert == "invalid" && !dropped) || (this.options.revert == "valid" && dropped) || this.options.revert === true || ($.isFunction(this.options.revert) && this.options.revert.call(this.element, dropped))) {
var self = this;
$(this.helper).animate(this.originalPosition, parseInt(this.options.revertDuration, 10), function() {
self._trigger("stop", event);
self._clear();
});
} else {
this._trigger("stop", event);
this._clear();
}
return false;
},
_getHandle: function(event) {
var handle = !this.options.handle || !$(this.options.handle, this.element).length ? true : false;
$(this.options.handle, this.element)
.find("*")
.andSelf()
.each(function() {
if(this == event.target) handle = true;
});
return handle;
},
_createHelper: function(event) {
var o = this.options;
var helper = $.isFunction(o.helper) ? $(o.helper.apply(this.element[0], [event])) : (o.helper == 'clone' ? this.element.clone() : this.element);
if(!helper.parents('body').length)
helper.appendTo((o.appendTo == 'parent' ? this.element[0].parentNode : o.appendTo));
if(helper[0] != this.element[0] && !(/(fixed|absolute)/).test(helper.css("position")))
helper.css("position", "absolute");
return helper;
},
_adjustOffsetFromHelper: function(obj) {
if(obj.left != undefined) this.offset.click.left = obj.left + this.margins.left;
if(obj.right != undefined) this.offset.click.left = this.helperProportions.width - obj.right + this.margins.left;
if(obj.top != undefined) this.offset.click.top = obj.top + this.margins.top;
if(obj.bottom != undefined) this.offset.click.top = this.helperProportions.height - obj.bottom + this.margins.top;
},
_getParentOffset: function() {
//Get the offsetParent and cache its position
this.offsetParent = this.helper.offsetParent();
var po = this.offsetParent.offset();
// This is a special case where we need to modify a offset calculated on start, since the following happened:
// 1. The position of the helper is absolute, so it's position is calculated based on the next positioned parent
// 2. The actual offset parent is a child of the scroll parent, and the scroll parent isn't the document, which means that
// the scroll is included in the initial calculation of the offset of the parent, and never recalculated upon drag
if(this.cssPosition == 'absolute' && this.scrollParent[0] != document && $.ui.contains(this.scrollParent[0], this.offsetParent[0])) {
po.left += this.scrollParent.scrollLeft();
po.top += this.scrollParent.scrollTop();
}
if((this.offsetParent[0] == document.body) //This needs to be actually done for all browsers, since pageX/pageY includes this information
|| (this.offsetParent[0].tagName && this.offsetParent[0].tagName.toLowerCase() == 'html' && $.browser.msie)) //Ugly IE fix
po = { top: 0, left: 0 };
return {
top: po.top + (parseInt(this.offsetParent.css("borderTopWidth"),10) || 0),
left: po.left + (parseInt(this.offsetParent.css("borderLeftWidth"),10) || 0)
};
},
_getRelativeOffset: function() {
if(this.cssPosition == "relative") {
var p = this.element.position();
return {
top: p.top - (parseInt(this.helper.css("top"),10) || 0) + this.scrollParent.scrollTop(),
left: p.left - (parseInt(this.helper.css("left"),10) || 0) + this.scrollParent.scrollLeft()
};
} else {
return { top: 0, left: 0 };
}
},
_cacheMargins: function() {
this.margins = {
left: (parseInt(this.element.css("marginLeft"),10) || 0),
top: (parseInt(this.element.css("marginTop"),10) || 0)
};
},
_cacheHelperProportions: function() {
this.helperProportions = {
width: this.helper.outerWidth(),
height: this.helper.outerHeight()
};
},
_setContainment: function() {
var o = this.options;
if(o.containment == 'parent') o.containment = this.helper[0].parentNode;
if(o.containment == 'document' || o.containment == 'window') this.containment = [
0 - this.offset.relative.left - this.offset.parent.left,
0 - this.offset.relative.top - this.offset.parent.top,
$(o.containment == 'document' ? document : window).width() - this.helperProportions.width - this.margins.left,
($(o.containment == 'document' ? document : window).height() || document.body.parentNode.scrollHeight) - this.helperProportions.height - this.margins.top
];
if(!(/^(document|window|parent)$/).test(o.containment) && o.containment.constructor != Array) {
var ce = $(o.containment)[0]; if(!ce) return;
var co = $(o.containment).offset();
var over = ($(ce).css("overflow") != 'hidden');
this.containment = [
co.left + (parseInt($(ce).css("borderLeftWidth"),10) || 0) + (parseInt($(ce).css("paddingLeft"),10) || 0) - this.margins.left,
co.top + (parseInt($(ce).css("borderTopWidth"),10) || 0) + (parseInt($(ce).css("paddingTop"),10) || 0) - this.margins.top,
co.left+(over ? Math.max(ce.scrollWidth,ce.offsetWidth) : ce.offsetWidth) - (parseInt($(ce).css("borderLeftWidth"),10) || 0) - (parseInt($(ce).css("paddingRight"),10) || 0) - this.helperProportions.width - this.margins.left,
co.top+(over ? Math.max(ce.scrollHeight,ce.offsetHeight) : ce.offsetHeight) - (parseInt($(ce).css("borderTopWidth"),10) || 0) - (parseInt($(ce).css("paddingBottom"),10) || 0) - this.helperProportions.height - this.margins.top
];
} else if(o.containment.constructor == Array) {
this.containment = o.containment;
}
},
_convertPositionTo: function(d, pos) {
if(!pos) pos = this.position;
var mod = d == "absolute" ? 1 : -1;
var o = this.options, scroll = this.cssPosition == 'absolute' && !(this.scrollParent[0] != document && $.ui.contains(this.scrollParent[0], this.offsetParent[0])) ? this.offsetParent : this.scrollParent, scrollIsRootNode = (/(html|body)/i).test(scroll[0].tagName);
return {
top: (
pos.top // The absolute mouse position
+ this.offset.relative.top * mod // Only for relative positioned nodes: Relative offset from element to offset parent
+ this.offset.parent.top * mod // The offsetParent's offset without borders (offset + border)
- ($.browser.safari && this.cssPosition == 'fixed' ? 0 : ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollTop() : ( scrollIsRootNode ? 0 : scroll.scrollTop() ) ) * mod)
),
left: (
pos.left // The absolute mouse position
+ this.offset.relative.left * mod // Only for relative positioned nodes: Relative offset from element to offset parent
+ this.offset.parent.left * mod // The offsetParent's offset without borders (offset + border)
- ($.browser.safari && this.cssPosition == 'fixed' ? 0 : ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollLeft() : scrollIsRootNode ? 0 : scroll.scrollLeft() ) * mod)
)
};
},
_generatePosition: function(event) {
var o = this.options, scroll = this.cssPosition == 'absolute' && !(this.scrollParent[0] != document && $.ui.contains(this.scrollParent[0], this.offsetParent[0])) ? this.offsetParent : this.scrollParent, scrollIsRootNode = (/(html|body)/i).test(scroll[0].tagName);
// This is another very weird special case that only happens for relative elements:
// 1. If the css position is relative
// 2. and the scroll parent is the document or similar to the offset parent
// we have to refresh the relative offset during the scroll so there are no jumps
if(this.cssPosition == 'relative' && !(this.scrollParent[0] != document && this.scrollParent[0] != this.offsetParent[0])) {
this.offset.relative = this._getRelativeOffset();
}
var pageX = event.pageX;
var pageY = event.pageY;
/*
* - Position constraining -
* Constrain the position to a mix of grid, containment.
*/
if(this.originalPosition) { //If we are not dragging yet, we won't check for options
if(this.containment) {
if(event.pageX - this.offset.click.left < this.containment[0]) pageX = this.containment[0] + this.offset.click.left;
if(event.pageY - this.offset.click.top < this.containment[1]) pageY = this.containment[1] + this.offset.click.top;
if(event.pageX - this.offset.click.left > this.containment[2]) pageX = this.containment[2] + this.offset.click.left;
if(event.pageY - this.offset.click.top > this.containment[3]) pageY = this.containment[3] + this.offset.click.top;
}
if(o.grid) {
var top = this.originalPageY + Math.round((pageY - this.originalPageY) / o.grid[1]) * o.grid[1];
pageY = this.containment ? (!(top - this.offset.click.top < this.containment[1] || top - this.offset.click.top > this.containment[3]) ? top : (!(top - this.offset.click.top < this.containment[1]) ? top - o.grid[1] : top + o.grid[1])) : top;
var left = this.originalPageX + Math.round((pageX - this.originalPageX) / o.grid[0]) * o.grid[0];
pageX = this.containment ? (!(left - this.offset.click.left < this.containment[0] || left - this.offset.click.left > this.containment[2]) ? left : (!(left - this.offset.click.left < this.containment[0]) ? left - o.grid[0] : left + o.grid[0])) : left;
}
}
return {
top: (
pageY // The absolute mouse position
- this.offset.click.top // Click offset (relative to the element)
- this.offset.relative.top // Only for relative positioned nodes: Relative offset from element to offset parent
- this.offset.parent.top // The offsetParent's offset without borders (offset + border)
+ ($.browser.safari && this.cssPosition == 'fixed' ? 0 : ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollTop() : ( scrollIsRootNode ? 0 : scroll.scrollTop() ) ))
),
left: (
pageX // The absolute mouse position
- this.offset.click.left // Click offset (relative to the element)
- this.offset.relative.left // Only for relative positioned nodes: Relative offset from element to offset parent
- this.offset.parent.left // The offsetParent's offset without borders (offset + border)
+ ($.browser.safari && this.cssPosition == 'fixed' ? 0 : ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollLeft() : scrollIsRootNode ? 0 : scroll.scrollLeft() ))
)
};
},
_clear: function() {
this.helper.removeClass("ui-draggable-dragging");
if(this.helper[0] != this.element[0] && !this.cancelHelperRemoval) this.helper.remove();
//if($.ui.ddmanager) $.ui.ddmanager.current = null;
this.helper = null;
this.cancelHelperRemoval = false;
},
// From now on bulk stuff - mainly helpers
_trigger: function(type, event, ui) {
ui = ui || this._uiHash();
$.ui.plugin.call(this, type, [event, ui]);
if(type == "drag") this.positionAbs = this._convertPositionTo("absolute"); //The absolute position has to be recalculated after plugins
return $.widget.prototype._trigger.call(this, type, event, ui);
},
plugins: {},
_uiHash: function(event) {
return {
helper: this.helper,
position: this.position,
absolutePosition: this.positionAbs, //deprecated
offset: this.positionAbs
};
}
}));
$.extend($.ui.draggable, {
version: "1.7",
eventPrefix: "drag",
defaults: {
addClasses: true,
appendTo: "parent",
axis: false,
cancel: ":input,option",
connectToSortable: false,
containment: false,
cursor: "auto",
cursorAt: false,
delay: 0,
distance: 1,
grid: false,
handle: false,
helper: "original",
iframeFix: false,
opacity: false,
refreshPositions: false,
revert: false,
revertDuration: 500,
scope: "default",
scroll: true,
scrollSensitivity: 20,
scrollSpeed: 20,
snap: false,
snapMode: "both",
snapTolerance: 20,
stack: false,
zIndex: false
}
});
$.ui.plugin.add("draggable", "connectToSortable", {
start: function(event, ui) {
var inst = $(this).data("draggable"), o = inst.options,
uiSortable = $.extend({}, ui, { item: inst.element });
inst.sortables = [];
$(o.connectToSortable).each(function() {
var sortable = $.data(this, 'sortable');
if (sortable && !sortable.options.disabled) {
inst.sortables.push({
instance: sortable,
shouldRevert: sortable.options.revert
});
sortable._refreshItems(); //Do a one-time refresh at start to refresh the containerCache
sortable._trigger("activate", event, uiSortable);
}
});
},
stop: function(event, ui) {
//If we are still over the sortable, we fake the stop event of the sortable, but also remove helper
var inst = $(this).data("draggable"),
uiSortable = $.extend({}, ui, { item: inst.element });
$.each(inst.sortables, function() {
if(this.instance.isOver) {
this.instance.isOver = 0;
inst.cancelHelperRemoval = true; //Don't remove the helper in the draggable instance
this.instance.cancelHelperRemoval = false; //Remove it in the sortable instance (so sortable plugins like revert still work)
//The sortable revert is supported, and we have to set a temporary dropped variable on the draggable to support revert: 'valid/invalid'
if(this.shouldRevert) this.instance.options.revert = true;
//Trigger the stop of the sortable
this.instance._mouseStop(event);
this.instance.options.helper = this.instance.options._helper;
//If the helper has been the original item, restore properties in the sortable
if(inst.options.helper == 'original')
this.instance.currentItem.css({ top: 'auto', left: 'auto' });
} else {
this.instance.cancelHelperRemoval = false; //Remove the helper in the sortable instance
this.instance._trigger("deactivate", event, uiSortable);
}
});
},
drag: function(event, ui) {
var inst = $(this).data("draggable"), self = this;
var checkPos = function(o) {
var dyClick = this.offset.click.top, dxClick = this.offset.click.left;
var helperTop = this.positionAbs.top, helperLeft = this.positionAbs.left;
var itemHeight = o.height, itemWidth = o.width;
var itemTop = o.top, itemLeft = o.left;
return $.ui.isOver(helperTop + dyClick, helperLeft + dxClick, itemTop, itemLeft, itemHeight, itemWidth);
};
$.each(inst.sortables, function(i) {
//Copy over some variables to allow calling the sortable's native _intersectsWith
this.instance.positionAbs = inst.positionAbs;
this.instance.helperProportions = inst.helperProportions;
this.instance.offset.click = inst.offset.click;
if(this.instance._intersectsWith(this.instance.containerCache)) {
//If it intersects, we use a little isOver variable and set it once, so our move-in stuff gets fired only once
if(!this.instance.isOver) {
this.instance.isOver = 1;
//Now we fake the start of dragging for the sortable instance,
//by cloning the list group item, appending it to the sortable and using it as inst.currentItem
//We can then fire the start event of the sortable with our passed browser event, and our own helper (so it doesn't create a new one)
this.instance.currentItem = $(self).clone().appendTo(this.instance.element).data("sortable-item", true);
this.instance.options._helper = this.instance.options.helper; //Store helper option to later restore it
this.instance.options.helper = function() { return ui.helper[0]; };
event.target = this.instance.currentItem[0];
this.instance._mouseCapture(event, true);
this.instance._mouseStart(event, true, true);
//Because the browser event is way off the new appended portlet, we modify a couple of variables to reflect the changes
this.instance.offset.click.top = inst.offset.click.top;
this.instance.offset.click.left = inst.offset.click.left;
this.instance.offset.parent.left -= inst.offset.parent.left - this.instance.offset.parent.left;
this.instance.offset.parent.top -= inst.offset.parent.top - this.instance.offset.parent.top;
inst._trigger("toSortable", event);
inst.dropped = this.instance.element; //draggable revert needs that
//hack so receive/update callbacks work (mostly)
inst.currentItem = inst.element;
this.instance.fromOutside = inst;
}
//Provided we did all the previous steps, we can fire the drag event of the sortable on every draggable drag, when it intersects with the sortable
if(this.instance.currentItem) this.instance._mouseDrag(event);
} else {
//If it doesn't intersect with the sortable, and it intersected before,
//we fake the drag stop of the sortable, but make sure it doesn't remove the helper by using cancelHelperRemoval
if(this.instance.isOver) {
this.instance.isOver = 0;
this.instance.cancelHelperRemoval = true;
//Prevent reverting on this forced stop
this.instance.options.revert = false;
// The out event needs to be triggered independently
this.instance._trigger('out', event, this.instance._uiHash(this.instance));
this.instance._mouseStop(event, true);
this.instance.options.helper = this.instance.options._helper;
//Now we remove our currentItem, the list group clone again, and the placeholder, and animate the helper back to it's original size
this.instance.currentItem.remove();
if(this.instance.placeholder) this.instance.placeholder.remove();
inst._trigger("fromSortable", event);
inst.dropped = false; //draggable revert needs that
}
};
});
}
});
$.ui.plugin.add("draggable", "cursor", {
start: function(event, ui) {
var t = $('body'), o = $(this).data('draggable').options;
if (t.css("cursor")) o._cursor = t.css("cursor");
t.css("cursor", o.cursor);
},
stop: function(event, ui) {
var o = $(this).data('draggable').options;
if (o._cursor) $('body').css("cursor", o._cursor);
}
});
$.ui.plugin.add("draggable", "iframeFix", {
start: function(event, ui) {
var o = $(this).data('draggable').options;
$(o.iframeFix === true ? "iframe" : o.iframeFix).each(function() {
$('<div class="ui-draggable-iframeFix" style="background: #fff;"></div>')
.css({
width: this.offsetWidth+"px", height: this.offsetHeight+"px",
position: "absolute", opacity: "0.001", zIndex: 1000
})
.css($(this).offset())
.appendTo("body");
});
},
stop: function(event, ui) {
$("div.ui-draggable-iframeFix").each(function() { this.parentNode.removeChild(this); }); //Remove frame helpers
}
});
$.ui.plugin.add("draggable", "opacity", {
start: function(event, ui) {
var t = $(ui.helper), o = $(this).data('draggable').options;
if(t.css("opacity")) o._opacity = t.css("opacity");
t.css('opacity', o.opacity);
},
stop: function(event, ui) {
var o = $(this).data('draggable').options;
if(o._opacity) $(ui.helper).css('opacity', o._opacity);
}
});
$.ui.plugin.add("draggable", "scroll", {
start: function(event, ui) {
var i = $(this).data("draggable");
if(i.scrollParent[0] != document && i.scrollParent[0].tagName != 'HTML') i.overflowOffset = i.scrollParent.offset();
},
drag: function(event, ui) {
var i = $(this).data("draggable"), o = i.options, scrolled = false;
if(i.scrollParent[0] != document && i.scrollParent[0].tagName != 'HTML') {
if(!o.axis || o.axis != 'x') {
if((i.overflowOffset.top + i.scrollParent[0].offsetHeight) - event.pageY < o.scrollSensitivity)
i.scrollParent[0].scrollTop = scrolled = i.scrollParent[0].scrollTop + o.scrollSpeed;
else if(event.pageY - i.overflowOffset.top < o.scrollSensitivity)
i.scrollParent[0].scrollTop = scrolled = i.scrollParent[0].scrollTop - o.scrollSpeed;
}
if(!o.axis || o.axis != 'y') {
if((i.overflowOffset.left + i.scrollParent[0].offsetWidth) - event.pageX < o.scrollSensitivity)
i.scrollParent[0].scrollLeft = scrolled = i.scrollParent[0].scrollLeft + o.scrollSpeed;
else if(event.pageX - i.overflowOffset.left < o.scrollSensitivity)
i.scrollParent[0].scrollLeft = scrolled = i.scrollParent[0].scrollLeft - o.scrollSpeed;
}
} else {
if(!o.axis || o.axis != 'x') {
if(event.pageY - $(document).scrollTop() < o.scrollSensitivity)
scrolled = $(document).scrollTop($(document).scrollTop() - o.scrollSpeed);
else if($(window).height() - (event.pageY - $(document).scrollTop()) < o.scrollSensitivity)
scrolled = $(document).scrollTop($(document).scrollTop() + o.scrollSpeed);
}
if(!o.axis || o.axis != 'y') {
if(event.pageX - $(document).scrollLeft() < o.scrollSensitivity)
scrolled = $(document).scrollLeft($(document).scrollLeft() - o.scrollSpeed);
else if($(window).width() - (event.pageX - $(document).scrollLeft()) < o.scrollSensitivity)
scrolled = $(document).scrollLeft($(document).scrollLeft() + o.scrollSpeed);
}
}
if(scrolled !== false && $.ui.ddmanager && !o.dropBehaviour)
$.ui.ddmanager.prepareOffsets(i, event);
}
});
$.ui.plugin.add("draggable", "snap", {
start: function(event, ui) {
var i = $(this).data("draggable"), o = i.options;
i.snapElements = [];
$(o.snap.constructor != String ? ( o.snap.items || ':data(draggable)' ) : o.snap).each(function() {
var $t = $(this); var $o = $t.offset();
if(this != i.element[0]) i.snapElements.push({
item: this,
width: $t.outerWidth(), height: $t.outerHeight(),
top: $o.top, left: $o.left
});
});
},
drag: function(event, ui) {
var inst = $(this).data("draggable"), o = inst.options;
var d = o.snapTolerance;
var x1 = ui.offset.left, x2 = x1 + inst.helperProportions.width,
y1 = ui.offset.top, y2 = y1 + inst.helperProportions.height;
for (var i = inst.snapElements.length - 1; i >= 0; i--){
var l = inst.snapElements[i].left, r = l + inst.snapElements[i].width,
t = inst.snapElements[i].top, b = t + inst.snapElements[i].height;
//Yes, I know, this is insane ;)
if(!((l-d < x1 && x1 < r+d && t-d < y1 && y1 < b+d) || (l-d < x1 && x1 < r+d && t-d < y2 && y2 < b+d) || (l-d < x2 && x2 < r+d && t-d < y1 && y1 < b+d) || (l-d < x2 && x2 < r+d && t-d < y2 && y2 < b+d))) {
if(inst.snapElements[i].snapping) (inst.options.snap.release && inst.options.snap.release.call(inst.element, event, $.extend(inst._uiHash(), { snapItem: inst.snapElements[i].item })));
inst.snapElements[i].snapping = false;
continue;
}
if(o.snapMode != 'inner') {
var ts = Math.abs(t - y2) <= d;
var bs = Math.abs(b - y1) <= d;
var ls = Math.abs(l - x2) <= d;
var rs = Math.abs(r - x1) <= d;
if(ts) ui.position.top = inst._convertPositionTo("relative", { top: t - inst.helperProportions.height, left: 0 }).top - inst.margins.top;
if(bs) ui.position.top = inst._convertPositionTo("relative", { top: b, left: 0 }).top - inst.margins.top;
if(ls) ui.position.left = inst._convertPositionTo("relative", { top: 0, left: l - inst.helperProportions.width }).left - inst.margins.left;
if(rs) ui.position.left = inst._convertPositionTo("relative", { top: 0, left: r }).left - inst.margins.left;
}
var first = (ts || bs || ls || rs);
if(o.snapMode != 'outer') {
var ts = Math.abs(t - y1) <= d;
var bs = Math.abs(b - y2) <= d;
var ls = Math.abs(l - x1) <= d;
var rs = Math.abs(r - x2) <= d;
if(ts) ui.position.top = inst._convertPositionTo("relative", { top: t, left: 0 }).top - inst.margins.top;
if(bs) ui.position.top = inst._convertPositionTo("relative", { top: b - inst.helperProportions.height, left: 0 }).top - inst.margins.top;
if(ls) ui.position.left = inst._convertPositionTo("relative", { top: 0, left: l }).left - inst.margins.left;
if(rs) ui.position.left = inst._convertPositionTo("relative", { top: 0, left: r - inst.helperProportions.width }).left - inst.margins.left;
}
if(!inst.snapElements[i].snapping && (ts || bs || ls || rs || first))
(inst.options.snap.snap && inst.options.snap.snap.call(inst.element, event, $.extend(inst._uiHash(), { snapItem: inst.snapElements[i].item })));
inst.snapElements[i].snapping = (ts || bs || ls || rs || first);
};
}
});
$.ui.plugin.add("draggable", "stack", {
start: function(event, ui) {
var o = $(this).data("draggable").options;
var group = $.makeArray($(o.stack.group)).sort(function(a,b) {
return (parseInt($(a).css("zIndex"),10) || o.stack.min) - (parseInt($(b).css("zIndex"),10) || o.stack.min);
});
$(group).each(function(i) {
this.style.zIndex = o.stack.min + i;
});
this[0].style.zIndex = o.stack.min + group.length;
}
});
$.ui.plugin.add("draggable", "zIndex", {
start: function(event, ui) {
var t = $(ui.helper), o = $(this).data("draggable").options;
if(t.css("zIndex")) o._zIndex = t.css("zIndex");
t.css('zIndex', o.zIndex);
},
stop: function(event, ui) {
var o = $(this).data("draggable").options;
if(o._zIndex) $(ui.helper).css('zIndex', o._zIndex);
}
});
})(jQuery); | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/editor/plugins/TextColor.js.uncompressed.js | define("dojox/editor/plugins/TextColor", [
"dojo",
"dijit",
"dojox",
"dijit/_base/popup",
"dijit/_Widget",
"dijit/_TemplatedMixin",
"dijit/_WidgetsInTemplateMixin",
"dijit/TooltipDialog",
"dijit/form/Button",
"dijit/form/DropDownButton",
"dijit/_editor/_Plugin",
"dojox/widget/ColorPicker",
"dojo/_base/connect",
"dojo/_base/declare",
"dojo/i18n",
"dojo/i18n!dojox/editor/plugins/nls/TextColor"
], function(dojo, dijit, dojox) {
dojo.experimental("dojox.editor.plugins.TextColor");
dojo.declare("dojox.editor.plugins._TextColorDropDown", [dijit._Widget, dijit._TemplatedMixin, dijit._WidgetsInTemplateMixin], {
// summary:
// A smple widget that uses/creates a dropdown with a dojox.widget.ColorPicker. Also provides
// passthroughs to the value of the color picker and convenient hook points.
// tags:
// private
// templateString: String
// The template used to create the ColorPicker.
templateString: "<div style='display: none; position: absolute; top: -10000; z-index: -10000'>" +
"<div dojoType='dijit.TooltipDialog' dojoAttachPoint='dialog' class='dojoxEditorColorPicker'>" +
"<div dojoType='dojox.widget.ColorPicker' dojoAttachPoint='_colorPicker'></div>" +
"<br>" +
"<center>" +
"<button dojoType='dijit.form.Button' type='button' dojoAttachPoint='_setButton'>${setButtonText}</button>" +
" " +
"<button dojoType='dijit.form.Button' type='button' dojoAttachPoint='_cancelButton'>${cancelButtonText}</button>" +
"</center>" +
"</div>" +
"</div>",
// widgetsInTemplate: Boolean
// Flag denoting widgets are contained in the template.
widgetsInTemplate: true,
constructor: function(){
// summary:
// Constructor over-ride so that the translated strings are mixsed in so
// the template fills out.
var strings = dojo.i18n.getLocalization("dojox.editor.plugins", "TextColor");
dojo.mixin(this, strings);
},
startup: function(){
// summary:
// Over-ride of startup to do the basic connect setups and such.
if(!this._started){
this.inherited(arguments);
this.connect(this._setButton, "onClick", dojo.hitch(this, function(){
this.onChange(this.get("value"));
}));
this.connect(this._cancelButton, "onClick", dojo.hitch(this, function(){
dijit.popup.close(this.dialog);
this.onCancel();
}));
// Fully statred, so go ahead and remove the hide.
dojo.style(this.domNode, "display", "block");
}
},
_setValueAttr: function(value, priorityChange){
// summary:
// Passthrough function for the color picker value.
// value: String
// The value to set in the color picker
// priorityChange:
// Value to indicate whether or not to trigger an onChange event.
this._colorPicker.set("value", value, priorityChange);
},
_getValueAttr: function(){
// summary:
// Passthrough function for the color picker value.
return this._colorPicker.get("value");
},
onChange: function(value){
// summary:
// Hook point to get the value when the color picker value is selected.
// value: String
// The value from the color picker.
},
onCancel: function(){
// summary:
// Hook point to get when the dialog is canceled.
}
});
dojo.declare("dojox.editor.plugins.TextColor", dijit._editor._Plugin, {
// summary:
// This plugin provides dropdown color pickers for setting text color and background color
// and makes use of the nicer-looking (though not entirely accessible), dojox.widget.ColorPicker.
//
// description:
// The commands provided by this plugin are:
// * foreColor - sets the text color
// * hiliteColor - sets the background color
// Override _Plugin.buttonClass to use DropDownButton (with ColorPalette) to control this plugin
buttonClass: dijit.form.DropDownButton,
// useDefaultCommand: Boolean
// False as we do not use the default editor command/click behavior.
useDefaultCommand: false,
constructor: function(){
this._picker = new dojox.editor.plugins._TextColorDropDown();
dojo.body().appendChild(this._picker.domNode);
this._picker.startup();
this.dropDown = this._picker.dialog;
this.connect(this._picker, "onChange", function(color){
this.editor.execCommand(this.command, color);
});
this.connect(this._picker, "onCancel", function(){
this.editor.focus();
});
},
updateState: function(){
// summary:
// Overrides _Plugin.updateState(). This updates the ColorPalette
// to show the color of the currently selected text.
// tags:
// protected
var _e = this.editor;
var _c = this.command;
if(!_e || !_e.isLoaded || !_c.length){
return;
}
var disabled = this.get("disabled");
var value;
if(this.button){
this.button.set("disabled", disabled);
if(disabled){
return;
}
try{
value = _e.queryCommandValue(_c)|| "";
}catch(e){
//Firefox may throw error above if the editor is just loaded, ignore it
value = "";
}
}
if(value == ""){
value = "#000000";
}
if(value == "transparent"){
value = "#ffffff";
}
if(typeof value == "string"){
//if RGB value, convert to hex value
if(value.indexOf("rgb")> -1){
value = dojo.colorFromRgb(value).toHex();
}
}else{ //it's an integer(IE returns an MS access #)
value =((value & 0x0000ff)<< 16)|(value & 0x00ff00)|((value & 0xff0000)>>> 16);
value = value.toString(16);
value = "#000000".slice(0, 7 - value.length)+ value;
}
if(value !== this._picker.get('value')){
this._picker.set('value', value, false);
}
},
destroy: function(){
// summary:
// Over-ride cleanup function.
this.inherited(arguments);
this._picker.destroyRecursive();
delete this._picker;
}
});
// Register this plugin. Uses the same name as the dijit one, so you
// use one or the other, not both.
dojo.subscribe(dijit._scopeName + ".Editor.getPlugin", null, function(o){
if(o.plugin){
return;
}
switch(o.args.name){
case "foreColor":
case "hiliteColor":
o.plugin = new dojox.editor.plugins.TextColor({
command: o.args.name
});
}
});
return dojox.editor.plugins.TextColor;
}); | PypiClean |
/Lasagne-0.1.tar.gz/Lasagne-0.1/lasagne/regularization.py | import theano.tensor as T
from .layers import Layer, get_all_params
def l1(x):
"""Computes the L1 norm of a tensor
Parameters
----------
x : Theano tensor
Returns
-------
Theano scalar
l1 norm (sum of absolute values of elements)
"""
return T.sum(abs(x))
def l2(x):
"""Computes the squared L2 norm of a tensor
Parameters
----------
x : Theano tensor
Returns
-------
Theano scalar
squared l2 norm (sum of squared values of elements)
"""
return T.sum(x**2)
def apply_penalty(tensor_or_tensors, penalty, **kwargs):
"""
Computes the total cost for applying a specified penalty
to a tensor or group of tensors.
Parameters
----------
tensor_or_tensors : Theano tensor or list of tensors
penalty : callable
**kwargs
keyword arguments passed to penalty.
Returns
-------
Theano scalar
a scalar expression for the total penalty cost
"""
try:
return sum(penalty(x, **kwargs) for x in tensor_or_tensors)
except (TypeError, ValueError):
return penalty(tensor_or_tensors, **kwargs)
def regularize_layer_params(layer, penalty,
tags={'regularizable': True}, **kwargs):
"""
Computes a regularization cost by applying a penalty to the parameters
of a layer or group of layers.
Parameters
----------
layer : a :class:`Layer` instances or list of layers.
penalty : callable
tags: dict
Tag specifications which filter the parameters of the layer or layers.
By default, only parameters with the `regularizable` tag are included.
**kwargs
keyword arguments passed to penalty.
Returns
-------
Theano scalar
a scalar expression for the cost
"""
layers = [layer, ] if isinstance(layer, Layer) else layer
all_params = []
for layer in layers:
all_params += layer.get_params(**tags)
return apply_penalty(all_params, penalty, **kwargs)
def regularize_layer_params_weighted(layers, penalty,
tags={'regularizable': True}, **kwargs):
"""
Computes a regularization cost by applying a penalty to the parameters
of a layer or group of layers, weighted by a coefficient for each layer.
Parameters
----------
layers : dict
A mapping from :class:`Layer` instances to coefficients.
penalty : callable
tags: dict
Tag specifications which filter the parameters of the layer or layers.
By default, only parameters with the `regularizable` tag are included.
**kwargs
keyword arguments passed to penalty.
Returns
-------
Theano scalar
a scalar expression for the cost
"""
return sum(coeff * apply_penalty(layer.get_params(**tags),
penalty,
**kwargs)
for layer, coeff in layers.items()
)
def regularize_network_params(layer, penalty,
tags={'regularizable': True}, **kwargs):
"""
Computes a regularization cost by applying a penalty to the parameters
of all layers in a network.
Parameters
----------
layer : a :class:`Layer` instance.
Parameters of this layer and all layers below it will be penalized.
penalty : callable
tags: dict
Tag specifications which filter the parameters of the layer or layers.
By default, only parameters with the `regularizable` tag are included.
**kwargs
keyword arguments passed to penalty.
Returns
-------
Theano scalar
a scalar expression for the cost
"""
return apply_penalty(get_all_params(layer, **tags), penalty, **kwargs) | PypiClean |
/GraphQL_core_next-1.1.1-py3-none-any.whl/graphql/validation/__init__.py | from .validate import validate
from .validation_context import (
ASTValidationContext,
SDLValidationContext,
ValidationContext,
)
from .rules import ValidationRule, ASTValidationRule, SDLValidationRule
# All validation rules in the GraphQL Specification.
from .specified_rules import specified_rules
# Spec Section: "Executable Definitions"
from .rules.executable_definitions import ExecutableDefinitionsRule
# Spec Section: "Field Selections on Objects, Interfaces, and Unions Types"
from .rules.fields_on_correct_type import FieldsOnCorrectTypeRule
# Spec Section: "Fragments on Composite Types"
from .rules.fragments_on_composite_types import FragmentsOnCompositeTypesRule
# Spec Section: "Argument Names"
from .rules.known_argument_names import KnownArgumentNamesRule
# Spec Section: "Directives Are Defined"
from .rules.known_directives import KnownDirectivesRule
# Spec Section: "Fragment spread target defined"
from .rules.known_fragment_names import KnownFragmentNamesRule
# Spec Section: "Fragment Spread Type Existence"
from .rules.known_type_names import KnownTypeNamesRule
# Spec Section: "Lone Anonymous Operation"
from .rules.lone_anonymous_operation import LoneAnonymousOperationRule
# Spec Section: "Fragments must not form cycles"
from .rules.no_fragment_cycles import NoFragmentCyclesRule
# Spec Section: "All Variable Used Defined"
from .rules.no_undefined_variables import NoUndefinedVariablesRule
# Spec Section: "Fragments must be used"
from .rules.no_unused_fragments import NoUnusedFragmentsRule
# Spec Section: "All Variables Used"
from .rules.no_unused_variables import NoUnusedVariablesRule
# Spec Section: "Field Selection Merging"
from .rules.overlapping_fields_can_be_merged import OverlappingFieldsCanBeMergedRule
# Spec Section: "Fragment spread is possible"
from .rules.possible_fragment_spreads import PossibleFragmentSpreadsRule
# Spec Section: "Argument Optionality"
from .rules.provided_required_arguments import ProvidedRequiredArgumentsRule
# Spec Section: "Leaf Field Selections"
from .rules.scalar_leafs import ScalarLeafsRule
# Spec Section: "Subscriptions with Single Root Field"
from .rules.single_field_subscriptions import SingleFieldSubscriptionsRule
# Spec Section: "Argument Uniqueness"
from .rules.unique_argument_names import UniqueArgumentNamesRule
# Spec Section: "Directives Are Unique Per Location"
from .rules.unique_directives_per_location import UniqueDirectivesPerLocationRule
# Spec Section: "Fragment Name Uniqueness"
from .rules.unique_fragment_names import UniqueFragmentNamesRule
# Spec Section: "Input Object Field Uniqueness"
from .rules.unique_input_field_names import UniqueInputFieldNamesRule
# Spec Section: "Operation Name Uniqueness"
from .rules.unique_operation_names import UniqueOperationNamesRule
# Spec Section: "Variable Uniqueness"
from .rules.unique_variable_names import UniqueVariableNamesRule
# Spec Section: "Value Type Correctness"
from .rules.values_of_correct_type import ValuesOfCorrectTypeRule
# Spec Section: "Variables are Input Types"
from .rules.variables_are_input_types import VariablesAreInputTypesRule
# Spec Section: "All Variable Usages Are Allowed"
from .rules.variables_in_allowed_position import VariablesInAllowedPositionRule
__all__ = [
"validate",
"ASTValidationContext",
"ASTValidationRule",
"SDLValidationContext",
"SDLValidationRule",
"ValidationContext",
"ValidationRule",
"specified_rules",
"ExecutableDefinitionsRule",
"FieldsOnCorrectTypeRule",
"FragmentsOnCompositeTypesRule",
"KnownArgumentNamesRule",
"KnownDirectivesRule",
"KnownFragmentNamesRule",
"KnownTypeNamesRule",
"LoneAnonymousOperationRule",
"NoFragmentCyclesRule",
"NoUndefinedVariablesRule",
"NoUnusedFragmentsRule",
"NoUnusedVariablesRule",
"OverlappingFieldsCanBeMergedRule",
"PossibleFragmentSpreadsRule",
"ProvidedRequiredArgumentsRule",
"ScalarLeafsRule",
"SingleFieldSubscriptionsRule",
"UniqueArgumentNamesRule",
"UniqueDirectivesPerLocationRule",
"UniqueFragmentNamesRule",
"UniqueInputFieldNamesRule",
"UniqueOperationNamesRule",
"UniqueVariableNamesRule",
"ValuesOfCorrectTypeRule",
"VariablesAreInputTypesRule",
"VariablesInAllowedPositionRule",
] | PypiClean |
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/toolkits/data_matching/autotagger.py | from graphlab.toolkits._model import CustomModel as _CustomModel
import graphlab as _gl
import graphlab.toolkits._internal_utils as _tkutl
import graphlab.connect as _mt
def _preprocess(column):
"""
Extract basic string features: unigrams (with stopwords removed),
bigrams, and character ngrams of length 4.
Parameters
----------
column : SArray
A column of data from which to extract various string features.
Returns
-------
out : SFrame
An SFrame consisting of the 3 columns of data: "unigrams",
"bigrams", and "4_shingles".
See Also
--------
graphlab.text_analysis.count_words
graphlab.text_analysis.count_ngrams
"""
if not isinstance(column, _gl.SArray):
raise TypeError("column parameter must be an SArray")
features = _gl.SFrame()
# extract unigrams, w/ stopwords filtered
features["unigrams"] = _gl.text_analytics.count_words(column)
features["unigrams"] = features["unigrams"].dict_trim_by_keys(
_gl.text_analytics.stopwords(), exclude=True)
# extract bigrams
features["bigrams"] = _gl.text_analytics.count_ngrams(column, n=2)
# extract character 4-grams
features["4_shingles"] = _gl.text_analytics.count_ngrams(
column, n=4, method="character")
return features
def create(dataset, tag_name=None, features=None, verbose=True):
"""
Create an autotagger model, which can be used to quickly apply tags from a
reference set of text labels to a new query set using the `_AutoTagger.tag`
method.
Parameters
----------
dataset : SFrame
Reference data. This SFrame must contain at least one column. By
default, only the ``tag_name`` column is used as the basis for
tagging. You may optionally include additional columns with the
``features`` parameter.
tag_name : string, optional
Name of the column in ``dataset`` with the tags. This column must
contain string values. If ``dataset`` contains more than one column,
``tag_name`` must be specified.
features : list[string], optional
Names of the columns with features to use as the basis for tagging.
'None' (the default) indicates that only the column specified by the
``tag_name`` parameter should be used. Only str or list fields are
allowed. If a column of type list is specified, all values must be
either of type string or convertible to type string.
verbose : bool, optional
If True, print verbose output during model creation.
Returns
-------
out : model
A model for quickly tagging new query observations with entries from
`dataset`. Currently, the only implementation is the following:
- :class:`~graphlab.data_matching.autotagger.NearestNeighborAutoTagger`
See Also
--------
graphlab.nearest_neighbors.NearestNeighborsModel
graphlab.data_matching.nearest_neighbor_autotagger.NearestNeighborAutoTagger
Examples
--------
First construct a toy `SFrame` of actor names, which will serve as the
reference set for our autotagger model.
>>> actors_sf = gl.SFrame(
{"actor": ["Will Smith", "Tom Hanks", "Bradley Cooper",
"Tom Cruise", "Jude Law", "Robert Pattinson",
"Matt Damon", "Brad Pitt", "Johnny Depp",
"Leonardo DiCaprio", "Jennifer Aniston",
"Jessica Alba", "Emma Stone", "Cameron Diaz",
"Scarlett Johansson", "Mila Kunis", "Julia Roberts",
"Charlize Theron", "Marion Cotillard",
"Angelina Jolie"]})
>>> m = gl.data_matching.autotagger.create(actors_sf, tag_name="actor")
Then we load some IMDB movie reviews into an `SFrame` and tag them using
the model we created above. The score field in the output is a
similarity score, indicating the strength of the match between the query
data and the suggested reference tag.
>>> reviews_sf = gl.SFrame(
"https://static.turi.com/datasets/imdb_reviews/reviews.sframe")
>>> m.tag(reviews_sf.head(10), query_name="review", verbose=False)
+-----------+-------------------------------+------------------+-----------------+
| review_id | review | actor | score |
+-----------+-------------------------------+------------------+-----------------+
| 0 | Story of a man who has unn... | Cameron Diaz | 0.0769230769231 |
| 0 | Story of a man who has unn... | Angelina Jolie | 0.0666666666667 |
| 0 | Story of a man who has unn... | Charlize Theron | 0.0625 |
| 0 | Story of a man who has unn... | Robert Pattinson | 0.0588235294118 |
| 1 | Bromwell High is a cartoon... | Jessica Alba | 0.125 |
| 1 | Bromwell High is a cartoon... | Jennifer Aniston | 0.1 |
| 1 | Bromwell High is a cartoon... | Charlize Theron | 0.05 |
| 1 | Bromwell High is a cartoon... | Robert Pattinson | 0.047619047619 |
| 1 | Bromwell High is a cartoon... | Marion Cotillard | 0.047619047619 |
| 2 | Airport '77 starts as a br... | Julia Roberts | 0.0961538461538 |
| ... | ... | ... | ... |
+-----------+-------------------------------+------------------+-----------------+
The initial results look a little noisy. To filter out obvious spurious
matches, we can set the `tag` method's similarity_threshold parameter.
>>> m.tag(reviews_sf.head(1000), query_name="review", verbose=False,
similarity_threshold=.8)
+-----------+-------------------------------+------------------+----------------+
| review_id | review | actor | score |
+-----------+-------------------------------+------------------+----------------+
| 341 | I caught this film at a te... | Julia Roberts | 0.857142857143 |
| 657 | Fairly funny Jim Carrey ve... | Jennifer Aniston | 0.882352941176 |
| 668 | A very funny movie. It was... | Jennifer Aniston | 0.833333333333 |
| 673 | This film is the best film... | Jennifer Aniston | 0.9375 |
+-----------+-------------------------------+------------------+----------------+
In this second example, you'll notice that the ``review_id`` column is much
more sparse. This is because all results whose score was below the specified
similarity threshold (.8) were excluded from the output.
"""
_mt._get_metric_tracker().track(__name__ + '.create')
from . import nearest_neighbor_autotagger
return nearest_neighbor_autotagger.create(dataset, tag_name, features, verbose)
class _AutoTagger(_CustomModel):
"""
Abstract class for GraphLab Create AutoTagger models. This class defines
methods common to all autotagger models but leaves unique details to
separate model classes.
"""
def get_current_options(self):
"""
Return a dictionary with the options used to define and create the
current AutoTagger model.
"""
raise NotImplementedError("The 'get_current_options' method has not " \
"been implemented for this model.")
def tag(self, dataset, query_name=None):
"""
Match the reference tags passed when a model is created to a new set of
queries. This is a many-to-many match: each query may have any number of
occurrences of a reference tag.
Parameters
----------
dataset : SFrame
Query data to be tagged.
query_name : string, optional
Name of the column in ``dataset`` to be auto-tagged. If ``dataset``
has more than one column, ``query_name`` must be specified.
Returns
-------
out : SFrame
An SFrame with four columns:
- row ID
- column name specified as `tag_name` parameter to `create` method
- column name specified as `query_name` parameter to `tag` method
- a similarity score between 0 and 1, indicating the strength of the
match between the query data and the suggested reference tag,
where a score of zero indicates a poor match and a strength of 1
corresponds to a perfect match
"""
raise NotImplementedError(
"_AutoTagger should not be instantiated directly. This method " \
"is intended to be implemented in subclasses.")
def evaluate(self, dataset):
"""
Match the reference tags to a set of queries labeled with their true
tags, and then evaluate the model's performance on those queries.
The true tags should be provided as an additional column in ``dataset``,
and that column's name should be the same as the ``tag_name`` parameter
specified when the model was created. The type of the tags column should
be either string or list (of strings).
Parameters
----------
dataset : SFrame
Query data to be tagged.
query_name : string, optional
Name of the column in ``dataset`` to be auto-tagged. If ``dataset``
has more than one column, ``query_name`` must be specified.
Returns
-------
out : dict
A dictionary containing the following evaluation metrics:
- Precision
- Recall
- F1 score
"""
raise NotImplementedError(
"_AutoTagger should not be instantiated directly. This method " \
"is intended to be implemented in subclasses.") | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/standard/lang/uk.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['uk']={"editor":"Текстовий редактор","editorPanel":"Панель розширеного текстового редактора","common":{"editorHelp":"натисніть ALT 0 для довідки","browseServer":"Огляд Сервера","url":"URL","protocol":"Протокол","upload":"Надіслати","uploadSubmit":"Надіслати на сервер","image":"Зображення","flash":"Flash","form":"Форма","checkbox":"Галочка","radio":"Кнопка вибору","textField":"Текстове поле","textarea":"Текстова область","hiddenField":"Приховане поле","button":"Кнопка","select":"Список","imageButton":"Кнопка із зображенням","notSet":"<не визначено>","id":"Ідентифікатор","name":"Ім'я","langDir":"Напрямок мови","langDirLtr":"Зліва направо (LTR)","langDirRtl":"Справа наліво (RTL)","langCode":"Код мови","longDescr":"Довгий опис URL","cssClass":"Клас CSS","advisoryTitle":"Заголовок","cssStyle":"Стиль CSS","ok":"ОК","cancel":"Скасувати","close":"Закрити","preview":"Попередній перегляд","resize":"Потягніть для зміни розмірів","generalTab":"Основне","advancedTab":"Додаткове","validateNumberFailed":"Значення не є цілим числом.","confirmNewPage":"Всі незбережені зміни будуть втрачені. Ви впевнені, що хочете завантажити нову сторінку?","confirmCancel":"Деякі опції змінено. Закрити вікно без збереження змін?","options":"Опції","target":"Ціль","targetNew":"Нове вікно (_blank)","targetTop":"Поточне вікно (_top)","targetSelf":"Поточний фрейм/вікно (_self)","targetParent":"Батьківський фрейм/вікно (_parent)","langDirLTR":"Зліва направо (LTR)","langDirRTL":"Справа наліво (RTL)","styles":"Стиль CSS","cssClasses":"Клас CSS","width":"Ширина","height":"Висота","align":"Вирівнювання","left":"По лівому краю","right":"По правому краю","center":"По центру","justify":"По ширині","alignLeft":"По лівому краю","alignRight":"По правому краю","alignCenter":"По центру","alignTop":"По верхньому краю","alignMiddle":"По середині","alignBottom":"По нижньому краю","alignNone":"Нема","invalidValue":"Невірне значення.","invalidHeight":"Висота повинна бути цілим числом.","invalidWidth":"Ширина повинна бути цілим числом.","invalidLength":"Вказане значення для поля \"%1\" має бути позитивним числом без або з коректним символом одиниці виміру (%2).","invalidCssLength":"Значення, вказане для \"%1\" в полі повинно бути позитивним числом або без дійсного виміру CSS блоку (px, %, in, cm, mm, em, ex, pt або pc).","invalidHtmlLength":"Значення, вказане для \"%1\" в полі повинно бути позитивним числом або без дійсного виміру HTML блоку (px або %).","invalidInlineStyle":"Значення, вказане для вбудованого стилю повинне складатися з одного чи кількох кортежів у форматі \"ім'я : значення\", розділених крапкою з комою.","cssLengthTooltip":"Введіть номер значення в пікселях або число з дійсною одиниці CSS (px, %, in, cm, mm, em, ex, pt або pc).","unavailable":"%1<span class=\"cke_accessibility\">, не доступне</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Пробіл","35":"End","36":"Home","46":"Видалити","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Сполучення клавіш","optionDefault":"Типово"},"about":{"copy":"Copyright © $1. Всі права застережено.","dlgTitle":"Про CKEditor 4","moreInfo":"Щодо інформації з ліцензування завітайте на наш сайт:"},"basicstyles":{"bold":"Жирний","italic":"Курсив","strike":"Закреслений","subscript":"Нижній індекс","superscript":"Верхній індекс","underline":"Підкреслений"},"blockquote":{"toolbar":"Цитата"},"notification":{"closed":"Сповіщення закрито."},"toolbar":{"toolbarCollapse":"Згорнути панель інструментів","toolbarExpand":"Розгорнути панель інструментів","toolbarGroups":{"document":"Документ","clipboard":"Буфер обміну / Скасувати","editing":"Редагування","forms":"Форми","basicstyles":"Основний Стиль","paragraph":"Параграф","links":"Посилання","insert":"Вставити","styles":"Стилі","colors":"Кольори","tools":"Інструменти"},"toolbars":"Панель інструментів редактора"},"clipboard":{"copy":"Копіювати","copyError":"Налаштування безпеки Вашого браузера не дозволяють редактору автоматично виконувати операції копіювання. Будь ласка, використовуйте клавіатуру для цього (Ctrl/Cmd+C).","cut":"Вирізати","cutError":"Налаштування безпеки Вашого браузера не дозволяють редактору автоматично виконувати операції вирізування. Будь ласка, використовуйте клавіатуру для цього (Ctrl/Cmd+X)","paste":"Вставити","pasteNotification":"Натисніть %1, щоб вставити. Ваш браузер не підтримує вставку за допомогою кнопки панелі інструментів або пункту контекстного меню.","pasteArea":"Область вставки","pasteMsg":"Вставте вміст у область нижче та натисніть OK."},"contextmenu":{"options":"Опції контекстного меню"},"elementspath":{"eleLabel":"Шлях","eleTitle":"%1 елемент"},"filetools":{"loadError":"Виникла помилка під час читання файлу","networkError":"Під час завантаження файлу виникла помилка мережі.","httpError404":"Під час завантаження файлу виникла помилка HTTP (404: Файл не знайдено).","httpError403":"Під час завантаження файлу виникла помилка HTTP (403: Доступ заборонено).","httpError":"Під час завантаження файлу виникла помилка (статус помилки: %1).","noUrlError":"URL завантаження не визначений.","responseError":"Невірна відповідь сервера."},"format":{"label":"Форматування","panelTitle":"Форматування параграфа","tag_address":"Адреса","tag_div":"Нормальний (div)","tag_h1":"Заголовок 1","tag_h2":"Заголовок 2","tag_h3":"Заголовок 3","tag_h4":"Заголовок 4","tag_h5":"Заголовок 5","tag_h6":"Заголовок 6","tag_p":"Нормальний","tag_pre":"Форматований"},"horizontalrule":{"toolbar":"Горизонтальна лінія"},"image":{"alt":"Альтернативний текст","border":"Рамка","btnUpload":"Надіслати на сервер","button2Img":"Бажаєте перетворити обрану кнопку-зображення на просте зображення?","hSpace":"Гориз. відступ","img2Button":"Бажаєте перетворити обране зображення на кнопку-зображення?","infoTab":"Інформація про зображення","linkTab":"Посилання","lockRatio":"Зберегти пропорції","menu":"Властивості зображення","resetSize":"Очистити поля розмірів","title":"Властивості зображення","titleButton":"Властивості кнопки із зображенням","upload":"Надіслати","urlMissing":"Вкажіть URL зображення.","vSpace":"Верт. відступ","validateBorder":"Ширина рамки повинна бути цілим числом.","validateHSpace":"Гориз. відступ повинен бути цілим числом.","validateVSpace":"Верт. відступ повинен бути цілим числом."},"indent":{"indent":"Збільшити відступ","outdent":"Зменшити відступ"},"fakeobjects":{"anchor":"Якір","flash":"Flash-анімація","hiddenfield":"Приховані Поля","iframe":"IFrame","unknown":"Невідомий об'єкт"},"link":{"acccessKey":"Гаряча клавіша","advanced":"Додаткове","advisoryContentType":"Тип вмісту","advisoryTitle":"Заголовок","anchor":{"toolbar":"Вставити/Редагувати якір","menu":"Властивості якоря","title":"Властивості якоря","name":"Ім'я якоря","errorName":"Будь ласка, вкажіть ім'я якоря","remove":"Прибрати якір"},"anchorId":"За ідентифікатором елементу","anchorName":"За ім'ям елементу","charset":"Кодування","cssClasses":"Клас CSS","download":"Завантажити як файл","displayText":"Відображуваний текст","emailAddress":"Адреса ел. пошти","emailBody":"Тіло повідомлення","emailSubject":"Тема листа","id":"Ідентифікатор","info":"Інформація посилання","langCode":"Код мови","langDir":"Напрямок мови","langDirLTR":"Зліва направо (LTR)","langDirRTL":"Справа наліво (RTL)","menu":"Вставити посилання","name":"Ім'я","noAnchors":"(В цьому документі немає якорів)","noEmail":"Будь ласка, вкажіть адрес ел. пошти","noUrl":"Будь ласка, вкажіть URL посилання","noTel":"Будь ласка, введіть номер телефону","other":"<інший>","phoneNumber":"Номер телефону","popupDependent":"Залежний (Netscape)","popupFeatures":"Властивості випливаючого вікна","popupFullScreen":"Повний екран (IE)","popupLeft":"Позиція зліва","popupLocationBar":"Панель локації","popupMenuBar":"Панель меню","popupResizable":"Масштабоване","popupScrollBars":"Стрічки прокрутки","popupStatusBar":"Рядок статусу","popupToolbar":"Панель інструментів","popupTop":"Позиція зверху","rel":"Зв'язок","selectAnchor":"Оберіть якір","styles":"Стиль CSS","tabIndex":"Послідовність переходу","target":"Ціль","targetFrame":"<фрейм>","targetFrameName":"Ім'я цільового фрейму","targetPopup":"<випливаюче вікно>","targetPopupName":"Ім'я випливаючого вікна","title":"Посилання","toAnchor":"Якір на цю сторінку","toEmail":"Ел. пошта","toUrl":"URL","toPhone":"Телефон","toolbar":"Вставити/Редагувати посилання","type":"Тип посилання","unlink":"Видалити посилання","upload":"Надіслати"},"list":{"bulletedlist":"Маркірований список","numberedlist":"Нумерований список"},"magicline":{"title":"Вставити абзац"},"maximize":{"maximize":"Максимізувати","minimize":"Мінімізувати"},"pastetext":{"button":"Вставити тільки текст","pasteNotification":"Натисніть %1, щоб вставити. Ваш браузер не підтримує вставку за допомогою кнопки панелі інструментів або пункту контекстного меню.","title":"Вставити тільки текст"},"pastefromword":{"confirmCleanup":"Текст, що Ви намагаєтесь вставити, схожий на скопійований з Word. Бажаєте очистити його форматування перед вставлянням?","error":"Неможливо очистити форматування через внутрішню помилку.","title":"Вставити з Word","toolbar":"Вставити з Word"},"removeformat":{"toolbar":"Видалити форматування"},"sourcearea":{"toolbar":"Джерело"},"specialchar":{"options":"Опції","title":"Оберіть спеціальний символ","toolbar":"Спеціальний символ"},"scayt":{"btn_about":"Про SCAYT","btn_dictionaries":"Словники","btn_disable":"Вимкнути SCAYT","btn_enable":"Ввімкнути SCAYT","btn_langs":"Мови","btn_options":"Опції","text_title":"Перефірка орфографії по мірі набору"},"stylescombo":{"label":"Стиль","panelTitle":"Стилі форматування","panelTitle1":"Блочні стилі","panelTitle2":"Рядкові стилі","panelTitle3":"Об'єктні стилі"},"table":{"border":"Розмір рамки","caption":"Заголовок таблиці","cell":{"menu":"Комірки","insertBefore":"Вставити комірку перед","insertAfter":"Вставити комірку після","deleteCell":"Видалити комірки","merge":"Об'єднати комірки","mergeRight":"Об'єднати справа","mergeDown":"Об'єднати донизу","splitHorizontal":"Розділити комірку по горизонталі","splitVertical":"Розділити комірку по вертикалі","title":"Властивості комірки","cellType":"Тип комірки","rowSpan":"Об'єднання рядків","colSpan":"Об'єднання стовпців","wordWrap":"Автоперенесення тексту","hAlign":"Гориз. вирівнювання","vAlign":"Верт. вирівнювання","alignBaseline":"По базовій лінії","bgColor":"Колір фону","borderColor":"Колір рамки","data":"Дані","header":"Заголовок","yes":"Так","no":"Ні","invalidWidth":"Ширина комірки повинна бути цілим числом.","invalidHeight":"Висота комірки повинна бути цілим числом.","invalidRowSpan":"Кількість об'єднуваних рядків повинна бути цілим числом.","invalidColSpan":"Кількість об'єднуваних стовбців повинна бути цілим числом.","chooseColor":"Обрати"},"cellPad":"Внутр. відступ","cellSpace":"Проміжок","column":{"menu":"Стовбці","insertBefore":"Вставити стовбець перед","insertAfter":"Вставити стовбець після","deleteColumn":"Видалити стовбці"},"columns":"Стовбці","deleteTable":"Видалити таблицю","headers":"Заголовки стовбців/рядків","headersBoth":"Стовбці і рядки","headersColumn":"Стовбці","headersNone":"Без заголовків","headersRow":"Рядки","heightUnit":"height unit","invalidBorder":"Розмір рамки повинен бути цілим числом.","invalidCellPadding":"Внутр. відступ комірки повинен бути цілим числом.","invalidCellSpacing":"Проміжок між комірками повинен бути цілим числом.","invalidCols":"Кількість стовбців повинна бути більшою 0.","invalidHeight":"Висота таблиці повинна бути цілим числом.","invalidRows":"Кількість рядків повинна бути більшою 0.","invalidWidth":"Ширина таблиці повинна бути цілим числом.","menu":"Властивості таблиці","row":{"menu":"Рядки","insertBefore":"Вставити рядок перед","insertAfter":"Вставити рядок після","deleteRow":"Видалити рядки"},"rows":"Рядки","summary":"Детальний опис заголовку таблиці","title":"Властивості таблиці","toolbar":"Таблиця","widthPc":"відсотків","widthPx":"пікселів","widthUnit":"Одиниці вимір."},"undo":{"redo":"Повторити","undo":"Повернути"},"widget":{"move":"Клікніть і потягніть для переміщення","label":"%1 віджет"},"uploadwidget":{"abort":"Завантаження перервано користувачем.","doneOne":"Файл цілком завантажено.","doneMany":"Цілком завантажено %1 файлів.","uploadOne":"Завантаження файлу ({percentage}%)...","uploadMany":"Завантажено {current} із {max} файлів завершено на ({percentage}%)..."},"wsc":{"btnIgnore":"Пропустити","btnIgnoreAll":"Пропустити все","btnReplace":"Замінити","btnReplaceAll":"Замінити все","btnUndo":"Назад","changeTo":"Замінити на","errorLoading":"Помилка завантаження : %s.","ieSpellDownload":"Модуль перевірки орфографії не встановлено. Бажаєте завантажити його зараз?","manyChanges":"Перевірку орфографії завершено: 1% слів(ова) змінено","noChanges":"Перевірку орфографії завершено: жодне слово не змінено","noMispell":"Перевірку орфографії завершено: помилок не знайдено","noSuggestions":"- немає варіантів -","notAvailable":"Вибачте, але сервіс наразі недоступний.","notInDic":"Немає в словнику","oneChange":"Перевірку орфографії завершено: змінено одне слово","progress":"Виконується перевірка орфографії...","title":"Перевірка орфографії","toolbar":"Перевірити орфографію"}}; | PypiClean |
/NREL_shift-0.1.0a0-py3-none-any.whl/shift/exporter/opendss.py |
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" This module consists of class and helper functions
to export distribution model in opendss format.
"""
from typing import List
import os
from abc import ABC
from shift.exporter.base import BaseExporter
from shift.load import Load
from shift.transformer import Transformer
from shift.line_section import Line
from shift.exceptions import FolderNotFoundError
from shift.enums import Phase, NumPhase
from shift.constants import VALID_FREQUENCIES
# pylint: disable=redefined-builtin
from shift.exceptions import UnsupportedFrequencyError, NotImplementedError
def remove_invalid_chars(name: str) -> str:
"""Removes invalid OpenDSS charaters from a given string."""
name = str(name)
for char in [".", " ", "!"]:
name = name.replace(char, "_")
return name
class DSSWriter(ABC):
"""Base class for OpenDSS writer.
Attributes:
files (List[str]): List of opendss file names
coord_dict (dict): Mapping between busname and coordinates
"""
def __init__(self) -> None:
"""Constructor for `DSSWriter` class."""
self.files = []
self.coord_dict = {}
def get_filenames(self) -> List[str]:
"""Returns the dss files exported by the class
assuming subclass will update this attribute.
"""
return self.files
def write(self, folder_location: str) -> None:
"""Write models in the specified folder.
Args:
folder_location (str): Valid folder path
Raises:
FolderNotFoundError: If folder path does not exist.
"""
if not os.path.exists(folder_location):
raise FolderNotFoundError(folder_location)
def get_coords(self) -> dict:
"""Returns coordinate mapping for all buses."""
return self.coord_dict
class LoadWriter(DSSWriter):
"""Base load writer inherits from DSS writer.
Attributes:
loads (List[Load]): List of `Load` objects
file_name (str): OpenDSS file name to write all loads.
"""
def __init__(self, loads: List[Load], file_name: str) -> None:
"""Constructor for `LoadWriter` class.
Args:
loads (List[Load]): List of `Load` objects
file_name (str): OpenDSS file name to write all loads.
"""
super().__init__()
self.loads = loads
self.file_name = file_name
class ConstantPowerFactorLoadWriter(LoadWriter):
"""Constant Power Factor load writer inherits from Load writer.
Refer to base class for base attributes.
Attributes:
mapping_dict(dict): Load name to bus name mapping
"""
def __init__(
self, loads: List[Load], mapping_dict: dict, file_name: str
) -> None:
"""Constructor for `ConstantPowerFactorLoadWriter` class.
Refer to base class for base class arguments.
Args:
mapping_dict (dict): Load name to bus name mapping
"""
super().__init__(loads, file_name)
self.mapping_dict = mapping_dict
def write(self, folder_location: str) -> None:
"""Refer to base class for more details."""
super().write(folder_location)
load_contents = []
for load in self.loads:
# pylint: disable-next=line-too-long
bus1 = f"{remove_invalid_chars(self.mapping_dict[load.name])}.{load.phase.value}"
load_contents.append(
f"new load.{remove_invalid_chars(load.name)} "
+ f"phases={load.num_phase.value} bus1={bus1} "
+ f"kv={load.kv} kw={load.kw} pf={load.pf} "
+ f"conn={load.conn_type.value}\n\n"
)
self.coord_dict[bus1.split(".")[0]] = (
load.longitude,
load.latitude,
)
if load_contents:
with open(
os.path.join(folder_location, self.file_name),
"w",
encoding="utf-8",
) as fpointer:
fpointer.writelines(load_contents)
self.files.append(self.file_name)
class TransformerWriter(DSSWriter):
"""Transformer writer inherits from DSS writer.
Attributes:
transformers (List[Transformer]): List of `Transformer` objects
file_name (str): OpenDSS filename to store the transformers
"""
def __init__(self, transformers: List[Transformer], file_name: str) -> None:
"""Constructor for `TransformerWriter` class.
Args:
transformers (List[Transformer]): List of `Transformer` objects
file_name (str): OpenDSS filename to store the transformers
"""
super().__init__()
self.transformers = transformers
self.file_name = file_name
class TwoWindingSimpleTransformerWriter(TransformerWriter):
"""Writer for two winding transformer."""
def write(self, folder_location: str) -> None:
"""Refer to base class for more details."""
super().write(folder_location)
trans_contents = []
for trans in self.transformers:
bus = (
f"{remove_invalid_chars(trans.longitude)}_"
+ f"{remove_invalid_chars(trans.latitude)}"
)
bus1 = f"{bus}_htnode.{trans.primary_phase.value}"
bus2 = f"{bus}_ltnode.{trans.secondary_phase.value}"
trans_contents.append(
f"new transformer.{remove_invalid_chars(trans.name)} "
+ f"phases={trans.num_phase.value} buses=[{bus1}, {bus2}] "
# pylint: disable-next=line-too-long
+ f"conns=[{trans.primary_con.value}, {trans.secondary_con.value}] "
+ f"kvs=[{trans.primary_kv}, {trans.secondary_kv}] "
+ f"kvas=[{trans.kva}, {trans.kva}] xhl={trans.xhl} "
# pylint: disable-next=line-too-long
+ f"%noloadloss={trans.pct_noloadloss} %r={trans.pct_r} leadlag=lead\n\n"
)
self.coord_dict[bus1.split(".")[0]] = (
trans.longitude,
trans.latitude,
)
self.coord_dict[bus2.split(".")[0]] = (
trans.longitude,
trans.latitude,
)
if trans_contents:
with open(
os.path.join(folder_location, self.file_name),
"w",
encoding="utf-8",
) as fpointer:
fpointer.writelines(trans_contents)
self.files.append(self.file_name)
class LineWriter(DSSWriter):
"""Base line writer inherits from DSS writer.
Attributes:
lines (List[Line]): List of `Line` objects.
file_name (str): OpenDSS filename for writing line segments
"""
def __init__(self, lines: List[Line], file_name: str) -> None:
"""Constructor for `LineWriter` class.
Args:
lines (List[Line]): List of `Line` objects.
file_name (str): OpenDSS filename for writing line segments
"""
super().__init__()
self.lines = lines
self.file_name = file_name
class GeometryBasedLineWriter(LineWriter):
"""Writer for geometry based line segments.
Refer to base class for base class attributes.
Attributes:
geometry_file_name (str): OpenDSS file name for writing line geometries
wire_file_name (str): OpenDSS file name for writing wires
cable_file_name (str): OpenDSS file name for writing cables
"""
def __init__(
self,
lines: List[Line],
line_file_name: str,
geometry_file_name: str,
wire_file_name: str,
cable_file_name: str,
) -> None:
"""Constructor for `GeometryBasedLineWriter` class.
Refer to base class for base class arguments.
Args:
geometry_file_name (str): OpenDSS file name
for writing line geometries
wire_file_name (str): OpenDSS file name for writing wires
cable_file_name (str): OpenDSS file name for writing cables
"""
super().__init__(lines, line_file_name)
self.geometry_file_name = geometry_file_name
self.wire_file_name = wire_file_name
self.cable_file_name = cable_file_name
def write(self, folder_location: str) -> None:
"""Refer to base class for more details.
Raises:
NotImplementedError: If conductor type passed is tap shielded cable.
"""
super().write(folder_location)
# To keep the contents for parts of line segments
line_contents, geometry_contents, wire_contents, cable_contents = (
[],
[],
[],
[],
)
# To keep track of geometry objects
geom_objects_dict = {}
for line in self.lines:
# Check if the geom already exists in the object list
geom = line.geometry
gk = geom.__class__
if gk not in geom_objects_dict:
geom_objects_dict[gk] = []
if geom not in geom_objects_dict[gk]:
geom_objects_dict[gk].append(geom)
else:
geom = geom_objects_dict[gk][geom_objects_dict[gk].index(geom)]
# pylint: disable-next=line-too-long
bus1 = f"{remove_invalid_chars(line.fromnode)}.{line.fromnode_phase.value}"
bus2 = (
f"{remove_invalid_chars(line.tonode)}.{line.tonode_phase.value}"
)
line_contents.append(
f"new line.{remove_invalid_chars(line.name)} "
+ f"bus1={bus1} "
+ f"bus2={bus2} "
+ f"length={line.length if line.length !=0 else 0.0001}"
+ f" geometry={geom.name} units={line.length_unit}\n\n"
)
self.coord_dict[bus1.split(".")[0]] = (
line.fromnode.split("_")[0],
line.fromnode.split("_")[1],
)
self.coord_dict[bus2.split(".")[0]] = (
line.tonode.split("_")[0],
line.tonode.split("_")[1],
)
geom_object_list = [
obj for _, obj_arr in geom_objects_dict.items() for obj in obj_arr
]
# Loop over all the geometries
# To keep track of wire objects
wire_object_list, cable_object_list = [], []
for geom in geom_object_list:
# Check if the geom already exists in the object list
if hasattr(geom, "phase_wire"):
wire_attr = "wire"
if geom.phase_wire in wire_object_list:
phase_cond = wire_object_list[
wire_object_list.index(geom.phase_wire)
]
else:
phase_cond = geom.phase_wire
wire_object_list.append(phase_cond)
if hasattr(geom, "neutral_wire"):
if geom.neutral_wire in wire_object_list:
neutral_wire = wire_object_list[
wire_object_list.index(geom.neutral_wire)
]
else:
neutral_wire = geom.neutral_wire
wire_object_list.append(neutral_wire)
else:
wire_attr = "cncable"
if geom.phase_cable in cable_object_list:
phase_cond = cable_object_list[
cable_object_list.index(geom.phase_cable)
]
else:
phase_cond = geom.phase_cable
cable_object_list.append(phase_cond)
geom_x_array = geom.configuration.get_x_array()
geom_h_array = geom.configuration.get_h_array()
geom_content = (
f"new linegeometry.{geom.name} "
+ f"nconds={geom.num_conds} nphases={geom.num_phase.value} "
+ "reduce=no\n"
)
for id, items in enumerate(zip(geom_x_array, geom_h_array)):
if id == len(geom_x_array) - 1 and hasattr(
geom, "neutral_wire"
):
geom_content += (
f"~ cond={id+1} {wire_attr}={neutral_wire.name} "
# pylint: disable-next=line-too-long
+ f"x={items[0]} h={items[1]} units={geom.configuration.unit}\n"
)
else:
geom_content += (
f"~ cond={id+1} {wire_attr}={phase_cond.name} "
# pylint: disable-next=line-too-long
+ f"x={items[0]} h={items[1]} units={geom.configuration.unit}\n"
)
geom_content += "\n"
geometry_contents.append(geom_content)
# Let's create wire and cables
for wire in wire_object_list:
wire_contents.append(
f"new wiredata.{remove_invalid_chars(wire.name)} "
+ f"diam={wire.diam} gmrac={wire.gmrac} "
+ f"gmrunits={wire.gmrunits} normamps={wire.normamps} "
+ f"rac={wire.rac} runits={wire.runits} "
+ f"radunits={wire.radunits}\n\n"
)
for wire in cable_object_list:
# Define concentric cable
if not hasattr(wire, "taplayer"):
cable_contents.append(
f"new CNData.{remove_invalid_chars(wire.name)}\n"
# pylint: disable-next=line-too-long
f"~ runits={wire.runits} radunits={wire.radunits} gmrunits={wire.gmrunits}\n"
+ f"~ inslayer={wire.inslayer} diains={wire.diains}"
+ f" diacable={wire.diacable} epsr=2.3\n"
+ f"~ rac={wire.rac} gmrac={wire.gmrac} diam={wire.diam}\n"
+ f"~ rstrand={wire.rstrand} gmrstrand={wire.gmrstrand}"
+ f" diastrand={wire.diastrand} k={wire.k}"
+ f" normamps={wire.normamps}\n\n"
)
else:
raise NotImplementedError(
f"Writer for this type {wire} is not developed yet!"
)
for file_, contents in zip(
[
self.wire_file_name,
self.cable_file_name,
self.geometry_file_name,
self.file_name,
],
[wire_contents, cable_contents, geometry_contents, line_contents],
):
if contents:
with open(
os.path.join(folder_location, file_), "w", encoding="utf-8"
) as fpointer:
fpointer.writelines(contents)
self.files.append(file_)
class OpenDSSExporter(BaseExporter):
"""OpenDSS Exporter Class.
Attributes:
writers (List[DSSWriter]): List of `DSSWriter` Instances
folder_location (str): Path to a folder for writing OpenDSS files
master_file_name (str): OpenDSS file name for master file
circuit_name (str): OpenDSS circuit name
circuit_kv (float): OpenDSS circuit voltage in kV
circuit_freq (freq): OpenDSS circuit base frequency in Hz
kv_arrays (List[float]): List of base voltage levels
circuit_phase (Phase): Phase instance for OpenDSS circuit
circuit_bus (str): Bus name for OpenDSS circuit
circuit_num_phase (NumPhase): NumPhase instance for OpenDSS circuit
circuit_z1 (List[float]): List of positive sequence impedance values
circuit_z0 (List[float]): List of zero sequence impedance values
"""
def __init__(
self,
writers: List[DSSWriter],
folder_location: str,
master_file_name: str,
circuit_name: str,
circuit_kv: float,
circuit_freq: float,
circuit_phase: Phase,
circuit_num_phase: NumPhase,
circuit_bus: str,
circuit_z1: List[float],
circuit_z0: List[float],
kv_arrays: List[float],
) -> None:
"""Constructor for `OpenDSSExporter` class.
Args:
writers (List[DSSWriter]): List of `DSSWriter` Instances
folder_location (str): Path to a folder for writing OpenDSS files
master_file_name (str): OpenDSS file name for master file
circuit_name (str): OpenDSS circuit name
circuit_kv (float): OpenDSS circuit voltage in kV
circuit_freq (freq): OpenDSS circuit base frequency in Hz
circuit_phase (Phase): Phase instance for OpenDSS circuit
circuit_num_phase (NumPhase): NumPhase instance for OpenDSS circuit
circuit_bus (str): Bus name for OpenDSS circuit
circuit_z1 (List[float]): List of positive sequence impedance values
circuit_z0 (List[float]): List of zero sequence impedance values
kv_arrays (List[float]): List of base voltage levels
Raises:
UnsupportedFrequencyError: If invalid frequency is passed.
FolderNotFoundError: If folder path is not found.
"""
if circuit_freq not in VALID_FREQUENCIES:
raise UnsupportedFrequencyError(circuit_freq)
self.writers = writers
self.folder_location = folder_location
self.master_file_name = master_file_name
self.circuit_name = circuit_name
self.circuit_kv = circuit_kv
self.circuit_freq = circuit_freq
self.kv_arrays = kv_arrays
self.circuit_phase = circuit_phase
self.circuit_bus = circuit_bus
self.circuit_num_phase = circuit_num_phase
self.circuit_z1 = circuit_z1
self.circuit_z0 = circuit_z0
if not os.path.exists(self.folder_location):
raise FolderNotFoundError(folder_location)
def export(self):
"""Refer to base class for more details."""
files = []
coord_dict = {}
for writer in self.writers:
writer.write(self.folder_location)
files += writer.get_filenames()
coord_dict.update(writer.get_coords())
coord_content = ""
for key, vals in coord_dict.items():
coord_content += f"{key}, {vals[0]}, {vals[1]}\n"
with open(
os.path.join(self.folder_location, "buscoords.dss"),
"w",
encoding="utf-8",
) as fpointer:
fpointer.writelines(coord_content)
master_file_content = (
"clear\n\n"
+ f"new circuit.{self.circuit_name} basekv={self.circuit_kv} "
# pylint: disable-next=line-too-long
+ f"basefreq={self.circuit_freq} pu=1.0 phases={self.circuit_num_phase.value} "
+ f"Z1={self.circuit_z1} Z0={self.circuit_z0} "
# pylint: disable-next=line-too-long
+ f"bus1={remove_invalid_chars(self.circuit_bus)}.{self.circuit_phase.value} \n\n"
)
for file in files:
master_file_content += f"redirect {file}\n\n"
master_file_content += (
f"set voltagebases={self.kv_arrays}\n\nCalcvoltagebases\n\n"
)
master_file_content += "Buscoords buscoords.dss\n\nsolve"
with open(
os.path.join(self.folder_location, self.master_file_name),
"w",
encoding="utf-8",
) as fpointer:
fpointer.writelines(master_file_content) | PypiClean |
/MLplayground-1.0.1.tar.gz/MLplayground-1.0.1/miniml/minisvm/AlgebraicSVC.py | from .Plot import *
class AlgebraicSVC(Plot):
def __init__(self, z=[], u=[], b=1):
self.z = np.array(z) # classes, only for two: 1 or -1
self.u = np.array(u) # data
self.N = len(z) # number of points
# parameters for calc lambda
self.A = [] # A in Aρ=b
self.b = b # margin, which is also one value for b in Aρ=b
self.lambda_ = [] # lambda
self.miu = None # miu
# parameters for calc w
self.w = [] # optimal w
self.w0 = None # optimal bias term w0
# fixed, do not change
self.ani = False
self.ims = []
self.kernel = 'linear'
# calculate A and b in Aρ=b
def cal_A_b(self):
if type(self.z) == list and type(self.u) == list:
return None, None
if type(self.A) != list and type(self.b) != list:
return self.A, self.b
# calculate A
A = []
for i in range(self.N):
Ai = []
for j in range(self.N):
term = self.z[j]*self.z[i]*np.matmul(self.u[j], self.u[i])
Ai.append(term)
Ai.append(-self.z[i])
A.append(Ai)
A.append([self.z[i] for i in range(self.N)] + [0])
self.A = np.array(A)
# calculate b
self.b = np.full(self.N+1, self.b)
self.b[-1] = 0 # this is for sum of z_i*lambda_i = 0
return self.A, self.b
# Use NumPy to invert matrix, and to calculate lambda and µ
def calc_lambda_miu(self):
if self.miu != None:
return self.lambda_, self.miu
A, b = self.cal_A_b()
if type(A) == type(None) or type(b) == type(None):
return None, None
# calculate ρ in Aρ=b
ρ = np.matmul(np.linalg.inv(A), b)
self.lambda_, self.miu = ρ[:-1], ρ[-1]
return self.lambda_, self.miu
# assign certain lambda to zero
def calc_lambda_miu_with_zero_lambda(self, zeros=[]):
# after deleting, A becomes NxN, b becomes Nx1
A, b = np.delete(self.A, zeros, axis=1), np.delete(self.b, zeros) # delete the columns in list of zeros
A = np.delete(A, zeros, axis=0) # delete the rows in list of zeros
if type(A) == type(None) or type(b) == type(None):
return None, None
# calculate ρ in Aρ=b
ρ = np.matmul(np.linalg.inv(A), b)
self.lambda_, self.miu = ρ[:-1], ρ[-1]
zeros.sort()
for loc in zeros:
self.lambda_ = np.insert(self.lambda_, loc, 0) # insert the fixed zero lambda
return self.lambda_, self.miu
# Calculate the optimal (nonaugmented) weight vector w∗ and w0
def calc_w_w0(self):
# calculate for optimal w
self.w = np.full(2, 0.0)
for i in range(self.N):
self.w += self.lambda_[i]*self.z[i]*self.u[i]
# calculate for optimal w0
for i in range(self.N):
if self.lambda_[i] != 0:
self.w0 = self.b[0]/self.z[i] - np.matmul(self.w, self.u[i])
break
return self.w, self.w0
# Check lambda satisfies the KKT conditions
def check_KKT_lambda(self):
sum_lambda_z = 0
for i in range(self.N):
sum_lambda_z += self.lambda_[i]*self.z[i]
if self.lambda_[i] < 0:
print(f"[Check Error] Lambda {self.lambda_[i]} is less than zero!!!")
return False
if abs(sum_lambda_z) > 1e-10:
print(f"[Check Error] Sum of product of lambda and z ({sum_lambda_z}) is not zero!!!")
return False
return True
# Check that the resulting w and w0 satisfy the KKT conditions
def check_KKT_w_w0(self):
sum_lambda_z_u = np.full(2, 0.0)
for i in range(self.N):
check1 = self.lambda_[i]*(self.z[i]*(np.matmul(self.w, self.u[i]) + self.w0)-self.b[0])
if abs(check1) > 1e-10:
print(f"[Check Error] λi*[Zi(w*ui + w0)-1] != 0 ({check1})!!!")
return False
check2 = self.z[i]*(np.matmul(self.w, self.u[i]) + self.w0) - self.b[0]
check2 = 0 if abs(check2) < 1e-10 else check2
if check2 < 0:
print(f"[Check Error] Zi(w*ui + w0)-1 < 0 ({check2})!!!")
return False
sum_lambda_z_u += self.lambda_[i]*self.z[i]*self.u[i]
if sum(sum_lambda_z_u != self.w):
print(f"[Check Error] W* != λi*Zi*ui {sum_lambda_z_u} != {self.w}!!!")
return False
return True | PypiClean |
/Dulcinea-0.11.tar.gz/Dulcinea-0.11/lib/permission.py | from dulcinea.spec import instance, sequence, anything, spec
from dulcinea.base import DulcineaPersistent
from sets import Set
def new_permissions(connection):
from durus.connection import gen_every_instance
from dulcinea.user import DulcineaUser, Permissions
from dulcinea.contact import ContactAdmin
from dulcinea.permission import PermissionManager
users = list(gen_every_instance(connection, DulcineaUser))
for user in users:
user.permissions = Permissions()
permission_managers = list(gen_every_instance(connection, PermissionManager))
for permission_manager in permission_managers:
if isinstance(permission_manager, ContactAdmin):
granter = True
valid_permissions = user.global_permissions
else:
granter = permission_manager
valid_permissions = permission_manager.valid_permissions
for permission in valid_permissions:
for grantee in permission_manager.get_direct_grantees(permission):
if isinstance(grantee, DulcineaUser):
grantee.permissions.grant(permission, granter)
else:
print ("Found direct grantee that's not a user!",
permission_manager, grantee, permission)
permission_set = permission_manager.permissions.get(permission)
if permission_set and permission_set.indirect_grantees:
if len(permission_set.indirect_grantees) == 1:
other, other_permission = list(
permission_set.indirect_grantees)[0]
if (permission == 'edit-resources' and
other_permission == 'edit-resources' and
isinstance(other, ContactAdmin)):
continue
print permission, permission_manager, list(
permission_set.indirect_grantees)
for user in users:
if permission_manager.grants(permission, user):
user.permissions.grant(permission, granter)
for permission_manager in permission_managers:
if hasattr(permission_manager, 'permissions'):
del permission_manager.permissions
else:
print repr(permission_manager), 'has no permissions attr'
del connection.get_root()['user_db'].admin
class PermissionSet(DulcineaPersistent):
direct_grantees_is = spec(
sequence(anything, Set),
"This permission is granted to any object in this set.")
indirect_grantees_is = spec(
sequence((instance('PermissionManager'), str), Set),
"This permission is granted if 'pm' grants the permission "
"'prerequisite' to the object.")
def get_items(self):
items = [(obj, None) for obj in self.direct_grantees]
items += list(self.indirect_grantees)
return items
def get_direct_grantees(self):
return list(self.direct_grantees)
def grants(self, obj, _covered=None):
if obj in self.direct_grantees:
return True
for permission_manager, prerequisite in self.indirect_grantees:
if permission_manager.grants(prerequisite, obj, _covered):
return True
return False
class PermissionManager:
"""
Class attributes:
valid_permissions : { permission:string : description:string }
Subclasses should set this value to list and describe the permissions
they may grant.
"""
valid_permissions = None
def get_direct_grantees(self, permission):
"""(permission: str) -> [obj]
Return the list of objects that are directly granted 'permission'.
"""
permission_set = self.permissions.get(permission)
if permission_set is None:
return []
else:
return permission_set.get_direct_grantees()
def grants(self, permission, obj, _covered=None):
"""(permission:str, obj, _covered:Set=None) -> bool
Return true if 'permission' is granted (directly or indirectly)
to 'obj'.
"""
if permission not in self.permissions:
return False
else:
_covered = _covered or Set()
if self in _covered:
return False
else:
_covered.add(self)
return self.permissions[permission].grants(obj, _covered) | PypiClean |
/ORCID-Hub-4.16.6.tar.gz/ORCID-Hub-4.16.6/docs/installation.rst | .. _installation:
ORCID Hub Installation
======================
Python Version
--------------
We recommend using the latest version of Python 3: Python3.6.
Dependencies
------------
In order to enable authentication via ORCID you need to acquire ORCID API creadential:
- Create or login with an existing account at https://orcid.org (or https://sandbox.orcid.org)
- Navigate to "Developer Tools" (https://orcid.org/developer-tools or https://sandbox.orcid.org/developer-tools);
- Add http://127.0.0.1:5000/auth to redirect URLs and hit *Save*;
- Copy CLIENT_ID and CLIENT_SECRET and set up environment varliables ORCID_CLIENT_ID and ORCID_CLIENT_SECRET with these values;
- run :shell:`
TODO: acquire ORCID API creadentials, add the link
Sign up at ORCID with your email address.
Optional dependencies
~~~~~~~~~~~~~~~~~~~~~
For the integration with Shibboleth, the application should be deployed on Apache web server with ...
TODO: set up SENTRY account...
Minial Deployment
-----------------
Minimal runnig ORCID Hub (assuming you have created and activated Python 3.6 virtual environment):
.. code-block:: bash
virtualenv -p python3.6 venv
. ./venv/bin/activate
pip install -U 'orcid-hub'
orcidhub cradmin myadmin@mydomain.net --orcid YOUR-ORCID-ID -O YOUR-ORGANISATION-NAME
orcidhub run
By defaul the application will create a Sqlite3 database.
You can customize the backend specifying *DATABASE_URL* (defaul: *sqlite:///orcidhub.db*), for example:
.. code-block:: bash
export DATABASE_URL=postgresql://test.orcidhub.org.nz:5432/orcidhub
# OR
export DATABASE_URL=sqlite:///data.db
It is possible to run the application as stand-alone Python Flask application using another remote
application instance for Tuakiri user authentication. For example, if the remote
(another application instance) url is https://dev.orcidhub.org.nz, all you need is to set up
environment varliable `export EXTERNAL_SP=https://dev.orcidhub.org.nz/Tuakiri/SP`.
.. code-block:: bash
export EXTERNAL_SP=https://dev.orcidhub.org.nz/Tuakiri/SP
export DATABASE_URL=sqlite:///data.db
export FLASK_ENV=development
orcidhub run
To connect to the PostgreSQL node (if you are using the doker image):
.. code-block:: bash
export PGHOST=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $(docker-compose ps -q db))
export DATABASE_URL=postgresql://orcidhub:p455w0rd@${PGHOST}:5432/orcidhub
| PypiClean |
/Furious-GUI-0.2.4.tar.gz/Furious-GUI-0.2.4/Furious/Utility/Theme.py | from PySide6 import QtCore
from PySide6.QtGui import QColor, QFont, QSyntaxHighlighter, QTextCharFormat
class HighlightRules:
def __init__(self, regex, color, isBold=False, isJSONKey=False):
self.regex = QtCore.QRegularExpression(regex)
self.color = QColor(color)
self.rules = QTextCharFormat()
self.rules.setForeground(self.color)
if isBold:
self.rules.setFontWeight(QFont.Weight.Bold)
self.isJSONKey = isJSONKey
class Theme(QSyntaxHighlighter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def getStyleSheet(*args, **kwargs):
raise NotImplementedError
class DraculaTheme(Theme):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.highlightRules = [
# Keywords: true, false, null. Pink. Bold
HighlightRules(r'\b(true|false|null)\b', '#FF79C6', isBold=True),
# Numbers. Purple
HighlightRules(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', '#BD93F9'),
# Symbols: :, [, ], {, }. White
HighlightRules(r'[:,\[\]\{\}]', '#F8F8F2'),
# Double-quoted strings. Yellow
HighlightRules(r'"[^"\\]*(\\.[^"\\]*)*"', '#F1FA8C'),
# JSON keys. Green
HighlightRules(r'"([^"\\]*(\\.[^"\\]*)*)"\s*:', '#50FA7B', isJSONKey=True),
# Comments(only for hints on display). Grey
HighlightRules(r'^#.*', '#6272a4'),
]
def highlightBlock(self, text):
for highlightRule in self.highlightRules:
iterator = highlightRule.regex.globalMatch(text)
while iterator.hasNext():
match = iterator.next()
if highlightRule.isJSONKey:
# JSON keys. Ignore trailing :
capturedLength = match.capturedLength() - 1
else:
capturedLength = match.capturedLength()
self.setFormat(
match.capturedStart(), capturedLength, highlightRule.rules
)
@staticmethod
def getStyleSheet(widgetName, fontFamily):
return (
f'{widgetName} {{'
f' background-color: #282A36;'
f' color: #F8F8F2;'
f' font-family: \'{fontFamily}\';'
f'}}'
) | PypiClean |
/Blue-DiscordBot-3.2.0.tar.gz/Blue-DiscordBot-3.2.0/bluebot/core/drivers/red_mongo.py | import re
from getpass import getpass
from typing import Match, Pattern, Tuple
from urllib.parse import quote_plus
import motor.core
import motor.motor_asyncio
from motor.motor_asyncio import AsyncIOMotorCursor
from .red_base import BaseDriver, IdentifierData
__all__ = ["Mongo"]
_conn = None
def _initialize(**kwargs):
uri = kwargs.get("URI", "mongodb")
host = kwargs["HOST"]
port = kwargs["PORT"]
admin_user = kwargs["USERNAME"]
admin_pass = kwargs["PASSWORD"]
db_name = kwargs.get("DB_NAME", "default_db")
if port is 0:
ports = ""
else:
ports = ":{}".format(port)
if admin_user is not None and admin_pass is not None:
url = "{}://{}:{}@{}{}/{}".format(
uri, quote_plus(admin_user), quote_plus(admin_pass), host, ports, db_name
)
else:
url = "{}://{}{}/{}".format(uri, host, ports, db_name)
global _conn
_conn = motor.motor_asyncio.AsyncIOMotorClient(url)
class Mongo(BaseDriver):
"""
Subclass of :py:class:`.red_base.BaseDriver`.
"""
def __init__(self, cog_name, identifier, **kwargs):
super().__init__(cog_name, identifier)
if _conn is None:
_initialize(**kwargs)
async def has_valid_connection(self) -> bool:
# Maybe fix this?
return True
@property
def db(self) -> motor.core.Database:
"""
Gets the mongo database for this cog's name.
.. warning::
Right now this will cause a new connection to be made every time the
database is accessed. We will want to create a connection pool down the
line to limit the number of connections.
:return:
PyMongo Database object.
"""
return _conn.get_database()
def get_collection(self, category: str) -> motor.core.Collection:
"""
Gets a specified collection within the PyMongo database for this cog.
Unless you are doing custom stuff ``category`` should be one of the class
attributes of :py:class:`core.config.Config`.
:param str category:
:return:
PyMongo collection object.
"""
return self.db[self.cog_name][category]
def get_primary_key(self, identifier_data: IdentifierData) -> Tuple[str]:
# noinspection PyTypeChecker
return identifier_data.primary_key
async def rebuild_dataset(self, identifier_data: IdentifierData, cursor: AsyncIOMotorCursor):
ret = {}
async for doc in cursor:
pkeys = doc["_id"]["RED_primary_key"]
del doc["_id"]
doc = self._unescape_dict_keys(doc)
if len(pkeys) == 0:
# Global data
ret.update(**doc)
elif len(pkeys) > 0:
# All other data
partial = ret
for key in pkeys[:-1]:
if key in identifier_data.primary_key:
continue
if key not in partial:
partial[key] = {}
partial = partial[key]
if pkeys[-1] in identifier_data.primary_key:
partial.update(**doc)
else:
partial[pkeys[-1]] = doc
return ret
async def get(self, identifier_data: IdentifierData):
mongo_collection = self.get_collection(identifier_data.category)
pkey_filter = self.generate_primary_key_filter(identifier_data)
if len(identifier_data.identifiers) > 0:
dot_identifiers = ".".join(map(self._escape_key, identifier_data.identifiers))
proj = {"_id": False, dot_identifiers: True}
partial = await mongo_collection.find_one(filter=pkey_filter, projection=proj)
else:
# The case here is for partial primary keys like all_members()
cursor = mongo_collection.find(filter=pkey_filter)
partial = await self.rebuild_dataset(identifier_data, cursor)
if partial is None:
raise KeyError("No matching document was found and Config expects a KeyError.")
for i in identifier_data.identifiers:
partial = partial[i]
if isinstance(partial, dict):
return self._unescape_dict_keys(partial)
return partial
async def set(self, identifier_data: IdentifierData, value=None):
uuid = self._escape_key(identifier_data.uuid)
primary_key = list(map(self._escape_key, self.get_primary_key(identifier_data)))
dot_identifiers = ".".join(map(self._escape_key, identifier_data.identifiers))
if isinstance(value, dict):
if len(value) == 0:
await self.clear(identifier_data)
return
value = self._escape_dict_keys(value)
mongo_collection = self.get_collection(identifier_data.category)
if len(dot_identifiers) > 0:
update_stmt = {"$set": {dot_identifiers: value}}
else:
update_stmt = {"$set": value}
await mongo_collection.update_one(
{"_id": {"RED_uuid": uuid, "RED_primary_key": primary_key}},
update=update_stmt,
upsert=True,
)
def generate_primary_key_filter(self, identifier_data: IdentifierData):
uuid = self._escape_key(identifier_data.uuid)
primary_key = list(map(self._escape_key, self.get_primary_key(identifier_data)))
ret = {"_id.RED_uuid": uuid}
if len(identifier_data.identifiers) > 0:
ret["_id.RED_primary_key"] = primary_key
elif len(identifier_data.primary_key) > 0:
for i, key in enumerate(primary_key):
keyname = f"_id.RED_primary_key.{i}"
ret[keyname] = key
else:
ret["_id.RED_primary_key"] = {"$exists": True}
return ret
async def clear(self, identifier_data: IdentifierData):
# There are three cases here:
# 1) We're clearing out a subset of identifiers (aka identifiers is NOT empty)
# 2) We're clearing out full primary key and no identifiers
# 3) We're clearing out partial primary key and no identifiers
# 4) Primary key is empty, should wipe all documents in the collection
mongo_collection = self.get_collection(identifier_data.category)
pkey_filter = self.generate_primary_key_filter(identifier_data)
if len(identifier_data.identifiers) == 0:
# This covers cases 2-4
await mongo_collection.delete_many(pkey_filter)
else:
dot_identifiers = ".".join(map(self._escape_key, identifier_data.identifiers))
await mongo_collection.update_one(pkey_filter, update={"$unset": {dot_identifiers: 1}})
@staticmethod
def _escape_key(key: str) -> str:
return _SPECIAL_CHAR_PATTERN.sub(_replace_with_escaped, key)
@staticmethod
def _unescape_key(key: str) -> str:
return _CHAR_ESCAPE_PATTERN.sub(_replace_with_unescaped, key)
@classmethod
def _escape_dict_keys(cls, data: dict) -> dict:
"""Recursively escape all keys in a dict."""
ret = {}
for key, value in data.items():
key = cls._escape_key(key)
if isinstance(value, dict):
value = cls._escape_dict_keys(value)
ret[key] = value
return ret
@classmethod
def _unescape_dict_keys(cls, data: dict) -> dict:
"""Recursively unescape all keys in a dict."""
ret = {}
for key, value in data.items():
key = cls._unescape_key(key)
if isinstance(value, dict):
value = cls._unescape_dict_keys(value)
ret[key] = value
return ret
_SPECIAL_CHAR_PATTERN: Pattern[str] = re.compile(r"([.$]|\\U0000002E|\\U00000024)")
_SPECIAL_CHARS = {
".": "\\U0000002E",
"$": "\\U00000024",
"\\U0000002E": "\\U&0000002E",
"\\U00000024": "\\U&00000024",
}
def _replace_with_escaped(match: Match[str]) -> str:
return _SPECIAL_CHARS[match[0]]
_CHAR_ESCAPE_PATTERN: Pattern[str] = re.compile(r"(\\U0000002E|\\U00000024)")
_CHAR_ESCAPES = {
"\\U0000002E": ".",
"\\U00000024": "$",
"\\U&0000002E": "\\U0000002E",
"\\U&00000024": "\\U00000024",
}
def _replace_with_unescaped(match: Match[str]) -> str:
return _CHAR_ESCAPES[match[0]]
def get_config_details():
uri = None
while True:
uri = input("Enter URI scheme (mongodb or mongodb+srv): ")
if uri is "":
uri = "mongodb"
if uri in ["mongodb", "mongodb+srv"]:
break
else:
print("Invalid URI scheme")
host = input("Enter host address: ")
if uri is "mongodb":
port = int(input("Enter host port: "))
else:
port = 0
admin_uname = input("Enter login username: ")
admin_password = getpass("Enter login password: ")
db_name = input("Enter mongodb database name: ")
if admin_uname == "":
admin_uname = admin_password = None
ret = {
"HOST": host,
"PORT": port,
"USERNAME": admin_uname,
"PASSWORD": admin_password,
"DB_NAME": db_name,
"URI": uri,
}
return ret | PypiClean |
/FinvoiceLib-0.1.13.tar.gz/FinvoiceLib-0.1.13/finvoicelib/elements/epi.py |
from finvoicelib.elements import AccountElement
from finvoicelib.elements import Element
from finvoicelib.elements import ReferenceNumberElement
class EpiAccountID(AccountElement):
"""
EpiAccountID
"""
tag = 'EpiAccountID'
class EpiBei(Element):
"""
EpiBei
"""
tag = 'EpiBei'
class EpiBfiIdentifier(Element):
"""
EpiBfiIdentifier
"""
tag = 'EpiBfiIdentifier'
class EpiBfiPartyDetails(Element):
"""
EpiBfiPartyDetails
"""
tag = 'EpiBfiPartyDetails'
aggregate = [EpiBfiIdentifier]
class EpiCharge(Element):
"""
EpiCharge
"""
tag = 'EpiCharge'
class EpiDate(Element):
"""
EpiDate
"""
tag = 'EpiDate'
class EpiDateOptionDate(Element):
"""
EpiDateOptionDate
"""
tag = 'EpiDateOptionDate'
class EpiInstructedAmount(Element):
"""
EpiInstructedAmount
"""
tag = 'EpiInstructedAmount'
class EpiReference(Element):
"""
EpiReference
"""
tag = 'EpiReference'
class EpiRemittanceInfoIdentifier(ReferenceNumberElement):
"""
EpiRemittanceInfoIdentifier
"""
tag = 'EpiRemittanceInfoIdentifier'
required = True
class EpiNameAddressDetails(Element):
"""
EpiNameAddressDetails
"""
tag = 'EpiNameAddressDetails'
class EpiBeneficiaryPartyDetails(Element):
"""
EpiBeneficiaryPartyDetails
"""
tag = 'EpiBeneficiaryPartyDetails'
aggregate = [EpiNameAddressDetails, EpiBei, EpiAccountID]
class EpiIdentificationDetails(Element):
"""
EpiIdentificationDetails
"""
tag = 'EpiIdentificationDetails'
aggregate = [EpiDate, EpiReference]
class EpiPartyDetails(Element):
"""
EpiPartyDetails
"""
tag = 'EpiPartyDetails'
aggregate = [EpiBfiPartyDetails,
EpiBeneficiaryPartyDetails]
class EpiPaymentInstructionDetails(Element):
"""
EpiPaymentInstructionDetails
"""
tag = 'EpiPaymentInstructionDetails'
aggregate = [
EpiRemittanceInfoIdentifier,
EpiInstructedAmount,
EpiCharge,
EpiDateOptionDate, ]
class EpiDetails(Element):
"""
EpiDetails
"""
tag = 'EpiDetails'
aggregate = [
EpiIdentificationDetails,
EpiPartyDetails,
EpiPaymentInstructionDetails, ] | PypiClean |
/DicksonUI-Micro-2.0.0.tar.gz/DicksonUI-Micro-2.0.0/README.md | # DicksonUI - The Best GUI Library For Python

With DicksonUI, you can make Graphical User Interfaces with python with just few lines of code. DicksonUI is super easy to use and handles everything for you. Just write your code easily
or import any HTML code.
## Overview
The DicksonUI Python GUI Library was written with lightweight use in mind. It provides the following key features
- lightweight
- few dependancies (all are designed by me)- but micro version is independant.
- Cross-Platform(Windows, Linux, Mac)
- No Runtime Installer(Runtime is Browser)
- Low Ram Usage(less on your script, all used by browser)
- full featured(Many features of html,css,js)
- only python knowladge reqired.(knowladge about web technologies is better)
- browser based(Any device has a browser installed)
- powerful(power of bootstrap/AngularJS/React Coming Soon)
- Extensible(write your own plugin and share)
- HTML support - not just web pages - with js, css or any library(eg :-bootstap).
- The most common ui widgets available
- Events - with wide range of event data(all event is handling in own thread so no errors)
- never wait - all are thraded
## Usage
In the following paragraphs, I am going to describe how you can get and use DicksonUI for your own projects.
### Getting it
To download dicksonui, either fork this Github repo or simply use Pypi via pip.
DicksonUI is available on python 2 and 3 both. Dosen"t require Additional dependencies
```sh
$ pip install dicksonui
```
If you use easy_install, `easy_install dicksonui`.
If you don't like package managers, just download from Github and unzip and run
```sh
$ python setup.py install
```
## Initialize a Window
First, let's create a new Application.
```python
from dicksonui import Application, window
mywindow=window()
document=mywindow.document
App = Application(('',1024))
App.Add(mywindow)
print("Navigate To - "+App.location)
```
#### Run!!!
Run your code.
For Python 3
```sh
python3 myscript.py
```
Or, For Python 2
```sh
python myscript.py
```
This will print a link
`http://localhost:<port>`
Run your favorite browser
```sh
chromium-browser
```
And then navigate to above link.
😥😥😥 Nothing!!!but a blank page.
#### Add items to form
Okay, now that we will learn about Controls
```Python
from dicksonui import Application, window
mywindow=window()
document=mywindow.document
App = Application(('',1024))
App.Add(mywindow)
heading=document.createElement('h1')
heading.innerHTML='Hello World!'
document.body.appendChild(heading)
print("Navigate To - "+App.location)
```
Run it
View wiki for more info
## alternatives?
-[RemI](https://github.com/dddomodossola/remi), which has exactly the same idea (build a GUI in Python, run it in a browser). Definitely worth a look.It is little heavy and use websockets. So it cannot run on older browsers. we used both websockets and long polling
-[tkinter](https://docs.python.org/3/library/tkinter.html#module-tkinter) (standard library)
Advantages: it's well-known. Lots of people have written tutorials and documentation for it.
Disadvantages: it feels like a wrapper around Tk, because it is. This gives good performance and detailed control, but writing it feels unintuitive (to me). it isnt based on browsers and have limited features.
-[flexx](https://github.com/zoofIO/flexx) is very large and had more dependencies, it use tornado server. but we use our own library.limited features! and you can easily mix server-side and client-side
-eel is an alternative for Electron but it is based on bottle server. and it is not a pythonic way.
##Ok until next time, Bye!
| PypiClean |
/MUGAlyser-1.0.6a0.tar.gz/MUGAlyser-1.0.6a0/mugalyser/gdrive.py | from __future__ import print_function
import httplib2
import os
from apiclient import discovery
from apiclient.http import MediaFileUpload
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
#SCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly'
homedir=os.getenv( "HOME")
CLIENT_SECRET_FILE = os.path.join( homedir, 'pydrive_auth.json' )
class GDrive( object ):
def __init__(self, application_name = "gdrive", client_secret_file=CLIENT_SECRET_FILE, credentials_dir=".credentials" ):
self._client_secret_file = client_secret_file
self._application_name = application_name
self._scopes = 'https://www.googleapis.com/auth/drive.file'
home_dir = os.path.expanduser('~')
self._credential_dir = os.path.join(home_dir, credentials_dir )
self._credentials_dir = credentials_dir
if not os.path.exists( self._credential_dir):
os.makedirs( self._credential_dir)
self._credential_path = os.path.join( self._credential_dir,
self._application_name + ".json" )
self._secret_path = os.path.join( self._credential_dir, "pydrive_auth.json")
self._credentials = None
self._http = None
self._service = None
self.get_credentials()
def get_credentials( self ):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
store = Storage(self._credential_path)
self._credentials = store.get()
if not self._credentials or self._credentials.invalid:
flow = client.flow_from_clientsecrets( self._secret_path, self._scopes )
flow.user_agent = self._application_name
self._credentials = tools.run_flow(flow, store )
self._http = self._credentials.authorize(httplib2.Http())
self._service = discovery.build('drive', 'v2', http=self._http)
return self._credentials
def upload_csvFile( self, folder_id, source_filename, target_filename=None ):
return self.upload_file( folder_id, source_filename,
source_mimetype="text/csv",
target_mimetype="application/vnd.google-apps.spreadsheet",
target_filename=target_filename )
def upload_file( self, folder_id, source_filename, source_mimetype, target_mimetype, target_filename=None ):
if target_filename is None :
target_filename = os.path.basename( source_filename )
target_filename = os.path.splitext( target_filename )[0]
file_metadata = {
'title' : target_filename,
'mimeType' : target_mimetype,
'parents': [{ 'id': folder_id }]
}
media = MediaFileUpload( source_filename,
mimetype= source_mimetype,
resumable=True)
file_obj = self._service.files().insert(body=file_metadata,
media_body=media,
fields='id').execute()
return ( target_filename, file_obj.get( "id"))
#print( 'File ID: %s' % file_obj.get( "id"))
def service(self):
return self._service | PypiClean |
/Nuitka_winsvc-1.7.10-cp310-cp310-win_amd64.whl/nuitka/tools/testing/Valgrind.py | import sys
from nuitka.Tracing import my_print
from nuitka.utils.Execution import check_output, executeProcess
from nuitka.utils.FileOperations import (
copyFile,
getFileContentByLine,
withTemporaryFile,
)
from nuitka.utils.Utils import isWin32Windows
def runValgrind(descr, tool, args, include_startup, save_logfilename=None):
# Many cases to deal with, pylint: disable=too-many-branches
if isWin32Windows():
sys.exit("Error, valgrind is not available on Windows.")
if descr:
my_print(descr, tool, file=sys.stderr, end="... ")
with withTemporaryFile() as log_file:
log_filename = log_file.name
command = ["valgrind", "-q"]
if tool == "callgrind":
command += ("--tool=callgrind", "--callgrind-out-file=%s" % log_filename)
elif tool == "massif":
command += ("--tool=massif", "--massif-out-file=%s" % log_filename)
else:
sys.exit("Error, no support for tool '%s' yet." % tool)
# Do not count things before main module starts its work.
if not include_startup:
command += (
"--zero-before=init__main__()",
"--zero-before=init__main__",
"--zero-before=PyInit___main__",
"--zero-before=PyInit___main__()",
)
command.extend(args)
_stdout_valgrind, stderr_valgrind, exit_valgrind = executeProcess(command)
assert exit_valgrind == 0, stderr_valgrind
if descr:
my_print("OK", file=sys.stderr)
if save_logfilename is not None:
copyFile(log_filename, save_logfilename)
max_mem = None
for line in getFileContentByLine(log_filename):
if tool == "callgrind" and line.startswith("summary:"):
return int(line.split()[1])
elif tool == "massif" and line.startswith("mem_heap_B="):
mem = int(line.split("=")[1])
if max_mem is None:
max_mem = 0
max_mem = max(mem, max_mem)
if tool == "massif" and max_mem is not None:
return max_mem
sys.exit("Error, didn't parse Valgrind log file successfully.")
def getBinarySizes(filename):
command = ["size", filename]
sizes = check_output(command).strip()
sizes = sizes.split(b"\n")[-1].replace(b"\t", b"").split()
return int(sizes[0]), int(sizes[1]) | PypiClean |
/FishTaco-1.1.6-py3-none-any.whl/fishtaco/learn_non_neg_elastic_net_with_prior.py | from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
from sklearn.model_selection import KFold
from sklearn.linear_model import enet_path, ElasticNet
__author__ = 'Ohad Manor'
__email__ = 'omanor@gmail.com'
__status__ = "Development"
def learn(cov_train, res_train, params={}):
"""
Learns a cross-validation Non Negative Elastic Net model with a Prior
from given features.
Parameters
----------
cov_train:
the covariate matrix of size NxP, with N samples and P covariates
res_train:
the response vector of size N, with N samples
params: dictionary
a dictionary containing optional params
params.['covariates_prior']: a vector of priors for the different
features in the range [0,1]
params.['class_subfeatures']: a vector of binary case/control labels
for the samples, dividing them to two classes. If this parameter is
given when learning, 2*Pcovariates will be created. The P covariates
are zero for controls and copied from cov_train for the cases,
and the second P covariates are zero for cases and copied from
cov_train for the controls. This matrix will replace the original,
so the returned model will have 2*P weights
params.['num_cv']: the number of folds in the internal cross-validation
(default: 5)
params.['l1_ratio']: the ratio of L1 penalty in the regularization in
the range [0,1] (default: 0.5)
Returns
-------
enet:
the elastic-net model
best_validation_rsqr: real
the value of the best validation r^2 for which the final model was fit
"""
# nicer output
np.set_printoptions(precision=2, suppress=False, linewidth=200)
# update train covariates by the given params
if 'covariates_prior' in params.keys() and params['covariates_prior'] \
is not None:
cov_train = cov_train * params['covariates_prior']
if 'class_subfeatures' in params.keys() and params['class_subfeatures'] \
is not None:
# cases subfeatures
cases = params['class_subfeatures'] > 0
case_subfeatures = np.zeros_like(cov_train)
case_subfeatures[cases, :] = cov_train[cases, :]
# control subfeatures
controls = params['class_subfeatures'] == 0
control_subfeatures = np.zeros_like(cov_train)
control_subfeatures[controls, :] = cov_train[controls, :]
# update cov_train
cov_train = np.hstack((case_subfeatures, control_subfeatures))
# more params
if 'num_cv' in params.keys():
num_cv = params['num_cv']
else:
num_cv = 5
if 'l1_ratio' in params.keys():
l1_ratio = params['l1_ratio']
else:
l1_ratio = 0.5
k_fold = KFold(n_splits=num_cv, shuffle=True).split(cov_train, res_train)
best_validation_rsqr = np.zeros(num_cv)
best_validation_alpha = np.zeros(num_cv)
for inner_k, (inner_train, inner_validation) in enumerate(k_fold):
cov_inner_train = cov_train[inner_train, :]
cov_inner_validation = cov_train[inner_validation, :]
response_inner_train = res_train[inner_train]
response_inner_validation = res_train[inner_validation]
alphas_positive_enet, coefs_positive_enet, \
_ = enet_path(cov_inner_train, response_inner_train,
l1_ratio=l1_ratio, fit_intercept=False,
normalize=False, positive=True,
return_models=False)
num_alphas = len(alphas_positive_enet)
prediction_validation = np.dot(coefs_positive_enet.transpose(),
cov_inner_validation.transpose())
rep_res_val = np.repeat(response_inner_validation,
num_alphas).reshape(len(
response_inner_validation), num_alphas).transpose()
rep_mean_val = np.repeat(np.mean(response_inner_validation),
len(response_inner_validation)*num_alphas).\
reshape(len(response_inner_validation), num_alphas).transpose()
sos_residual = np.sum((prediction_validation-rep_res_val) ** 2, axis=1)
sos_original = np.sum((rep_res_val - rep_mean_val) ** 2, axis=1)
rep_validation_rsqr = np.array(1 - (sos_residual / sos_original))
sorted_ind = np.argsort(rep_validation_rsqr)[::-1]
best_validation_rsqr[inner_k] = rep_validation_rsqr[sorted_ind[0]]
best_validation_alpha[inner_k] = alphas_positive_enet[sorted_ind[0]]
mean_best_alpha = np.mean(best_validation_alpha)
# now learn one unified model on the given data using the mean_best_alpha
enet = ElasticNet(l1_ratio=l1_ratio, alpha=mean_best_alpha,
fit_intercept=False, normalize=False,
positive=True)
enet.fit(cov_train, res_train)
return enet, best_validation_rsqr
def test(enet_model, cov_test, res_test, params={}):
"""
Evaluates a given Elastic Net model on test data
Parameters
----------
enet_model:
the given elastic-net model to evaludate
cov_test:
the covariate matrix of size NxP, with N samples and P covariates
res_test:
the response vector of size N, with N samples
params: dictionary
a dictionary containing optional params
params.['covariates_prior']: a vector of priors for the different
features in the range [0,1]
params.['class_subfeatures']: a vector of binary case/control labels
for the samples, dividing them to two classes. If this parameter is
given when learning, 2*Pcovariates will be created. The P covariates
are zero for controls and copied from cov_train for the cases,
and the second P covariates are zero for cases and copied from
cov_train for the controls. This matrix will replace the original,
so the returned model will have 2*P weights
params.['num_cv']: the number of folds in the internal cross-validation
(default: 5)
params.['l1_ratio']: the ratio of L1 penalty in the regularization in
the range [0,1] (default: 0.5)
Returns
-------
prediction: vector
the predicted values
test_rsqr: real
the value of the test r^2
"""
# update test covariates by the given params
if 'covariates_prior' in params.keys() and params['covariates_prior'] is\
not None:
cov_test = cov_test * params['covariates_prior']
if 'class_subfeatures' in params.keys() and params['class_subfeatures'] \
is not None:
# cases subfeatures
cases = params['class_subfeatures'] > 0
case_subfeatures = np.zeros_like(cov_test)
case_subfeatures[cases, :] = cov_test[cases, :]
# control subfeatures
controls = params['class_subfeatures'] == 0
control_subfeatures = np.zeros_like(cov_test)
control_subfeatures[controls, :] = cov_test[controls, :]
# update cov_train
cov_test = np.hstack((case_subfeatures, control_subfeatures))
prediction = enet_model.predict(cov_test)
sos_residual = np.sum((prediction - res_test) ** 2)
sos_original = np.sum((res_test - np.mean(res_test)) ** 2)
test_rsqr = 1 - (sos_residual / sos_original)
return prediction, test_rsqr | PypiClean |
/FanFicFare-4.27.0.tar.gz/FanFicFare-4.27.0/fanficfare/adapters/adapter_adultfanfictionorg.py | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
import re
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
# py2 vs py3 transition
from ..six import text_type as unicode
from .base_adapter import BaseSiteAdapter, makeDate
################################################################################
def getClass():
return AdultFanFictionOrgAdapter
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class AdultFanFictionOrgAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
# logger.debug("AdultFanFictionOrgAdapter.__init__ - url='{0}'".format(url))
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
#Setting the 'Zone' for each "Site"
self.zone = self.parsedUrl.netloc.split('.')[0]
# normalized story URL.(checking self.zone against list
# removed--it was redundant w/getAcceptDomains and
# getSiteURLPattern both)
self._setURL('https://{0}.{1}/story.php?no={2}'.format(self.zone, self.getBaseDomain(), self.story.getMetadata('storyId')))
#self._setURL('https://' + self.zone + '.' + self.getBaseDomain() + '/story.php?no='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
#self.story.setMetadata('siteabbrev',self.getSiteAbbrev())
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev',self.zone+'aff')
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%Y-%m-%d"
## Added because adult-fanfiction.org does send you to
## www.adult-fanfiction.org when you go to it and it also moves
## the site & examples down the web service front page so the
## first screen isn't dominated by 'adult' links.
def getBaseDomain(self):
return 'adult-fanfiction.org'
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'www.adult-fanfiction.org'
@classmethod
def getAcceptDomains(cls):
# mobile.fimifction.com isn't actually a valid domain, but we can still get the story id from URLs anyway
return ['anime.adult-fanfiction.org',
'anime2.adult-fanfiction.org',
'bleach.adult-fanfiction.org',
'books.adult-fanfiction.org',
'buffy.adult-fanfiction.org',
'cartoon.adult-fanfiction.org',
'celeb.adult-fanfiction.org',
'comics.adult-fanfiction.org',
'ff.adult-fanfiction.org',
'games.adult-fanfiction.org',
'hp.adult-fanfiction.org',
'inu.adult-fanfiction.org',
'lotr.adult-fanfiction.org',
'manga.adult-fanfiction.org',
'movies.adult-fanfiction.org',
'naruto.adult-fanfiction.org',
'ne.adult-fanfiction.org',
'original.adult-fanfiction.org',
'tv.adult-fanfiction.org',
'xmen.adult-fanfiction.org',
'ygo.adult-fanfiction.org',
'yuyu.adult-fanfiction.org']
@classmethod
def getSiteExampleURLs(self):
return ("https://anime.adult-fanfiction.org/story.php?no=123456789 "
+ "https://anime2.adult-fanfiction.org/story.php?no=123456789 "
+ "https://bleach.adult-fanfiction.org/story.php?no=123456789 "
+ "https://books.adult-fanfiction.org/story.php?no=123456789 "
+ "https://buffy.adult-fanfiction.org/story.php?no=123456789 "
+ "https://cartoon.adult-fanfiction.org/story.php?no=123456789 "
+ "https://celeb.adult-fanfiction.org/story.php?no=123456789 "
+ "https://comics.adult-fanfiction.org/story.php?no=123456789 "
+ "https://ff.adult-fanfiction.org/story.php?no=123456789 "
+ "https://games.adult-fanfiction.org/story.php?no=123456789 "
+ "https://hp.adult-fanfiction.org/story.php?no=123456789 "
+ "https://inu.adult-fanfiction.org/story.php?no=123456789 "
+ "https://lotr.adult-fanfiction.org/story.php?no=123456789 "
+ "https://manga.adult-fanfiction.org/story.php?no=123456789 "
+ "https://movies.adult-fanfiction.org/story.php?no=123456789 "
+ "https://naruto.adult-fanfiction.org/story.php?no=123456789 "
+ "https://ne.adult-fanfiction.org/story.php?no=123456789 "
+ "https://original.adult-fanfiction.org/story.php?no=123456789 "
+ "https://tv.adult-fanfiction.org/story.php?no=123456789 "
+ "https://xmen.adult-fanfiction.org/story.php?no=123456789 "
+ "https://ygo.adult-fanfiction.org/story.php?no=123456789 "
+ "https://yuyu.adult-fanfiction.org/story.php?no=123456789")
def getSiteURLPattern(self):
return r'https?://(anime|anime2|bleach|books|buffy|cartoon|celeb|comics|ff|games|hp|inu|lotr|manga|movies|naruto|ne|original|tv|xmen|ygo|yuyu)\.adult-fanfiction\.org/story\.php\?no=\d+$'
##This is not working right now, so I'm commenting it out, but leaving it for future testing
## Login seems to be reasonably standard across eFiction sites.
#def needToLoginCheck(self, data):
##This adapter will always require a login
# return True
# <form name="login" method="post" action="">
# <div class="top">E-mail: <span id="sprytextfield1">
# <input name="email" type="text" id="email" size="20" maxlength="255" />
# <span class="textfieldRequiredMsg">Email is required.</span><span class="textfieldInvalidFormatMsg">Invalid E-mail.</span></span></div>
# <div class="top">Password: <span id="sprytextfield2">
# <input name="pass1" type="password" id="pass1" size="20" maxlength="32" />
# <span class="textfieldRequiredMsg">password is required.</span><span class="textfieldMinCharsMsg">Minimum 8 characters8.</span><span class="textfieldMaxCharsMsg">Exceeded 32 characters.</span></span></div>
# <div class="top"><br /> <input name="loginsubmittop" type="hidden" id="loginsubmit" value="TRUE" />
# <input type="submit" value="Login" />
# </div>
# </form>
##This is not working right now, so I'm commenting it out, but leaving it for future testing
#def performLogin(self, url, soup):
# params = {}
# if self.password:
# params['email'] = self.username
# params['pass1'] = self.password
# else:
# params['email'] = self.getConfig("username")
# params['pass1'] = self.getConfig("password")
# params['submit'] = 'Login'
# # copy all hidden input tags to pick up appropriate tokens.
# for tag in soup.findAll('input',{'type':'hidden'}):
# params[tag['name']] = tag['value']
# logger.debug("Will now login to URL {0} as {1} with password: {2}".format(url, params['email'],params['pass1']))
# d = self.post_request(url, params, usecache=False)
# d = self.post_request(url, params, usecache=False)
# soup = self.make_soup(d)
#if not (soup.find('form', {'name' : 'login'}) == None):
# logger.info("Failed to login to URL %s as %s" % (url, params['email']))
# raise exceptions.FailedToLogin(url,params['email'])
# return False
#else:
# return True
## Getting the chapter list and the meta data, plus 'is adult' checking.
def doExtractChapterUrlsAndMetadata(self, get_cover=True):
## You need to have your is_adult set to true to get this story
if not (self.is_adult or self.getConfig("is_adult")):
raise exceptions.AdultCheckRequired(self.url)
url = self.url
logger.debug("URL: "+url)
data = self.get_request(url)
if "The dragons running the back end of the site can not seem to find the story you are looking for." in data:
raise exceptions.StoryDoesNotExist("{0}.{1} says: The dragons running the back end of the site can not seem to find the story you are looking for.".format(self.zone, self.getBaseDomain()))
soup = self.make_soup(data)
##This is not working right now, so I'm commenting it out, but leaving it for future testing
#self.performLogin(url, soup)
## Title
## Some of the titles have a backslash on the story page, but not on the Author's page
## So I am removing it from the title, so it can be found on the Author's page further in the code.
## Also, some titles may have extra spaces ' ', and the search on the Author's page removes them,
## so I have to here as well. I used multiple replaces to make sure, since I did the same below.
a = soup.find('a', href=re.compile(r'story.php\?no='+self.story.getMetadata('storyId')+"$"))
self.story.setMetadata('title',stripHTML(a).replace('\\','').replace(' ',' ').replace(' ',' ').replace(' ',' ').strip())
# Find the chapters:
chapters = soup.find('ul',{'class':'dropdown-content'})
for i, chapter in enumerate(chapters.findAll('a')):
self.add_chapter(chapter,self.url+'&chapter='+unicode(i+1))
# Find authorid and URL from... author url.
a = soup.find('a', href=re.compile(r"profile.php\?no=\d+"))
if a == None:
# I know that the original author of fanficfare wants to always have metadata,
# but I posit that if the story is there, even if we can't get the metadata from the
# author page, the story should still be able to be downloaded, which is what I've done here.
self.story.setMetadata('authorId','000000000')
self.story.setMetadata('authorUrl','https://www.adult-fanfiction.org')
self.story.setMetadata('author','Unknown')
logger.warning('There was no author found for the story... Metadata will not be retreived.')
self.setDescription(url,'>>>>>>>>>> No Summary Given <<<<<<<<<<')
else:
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl',a['href'])
self.story.setMetadata('author',stripHTML(a))
##The story page does not give much Metadata, so we go to the Author's page
##Get the first Author page to see if there are multiple pages.
##AFF doesn't care if the page number is larger than the actual pages,
##it will continue to show the last page even if the variable is larger than the actual page
author_Url = '{0}&view=story&zone={1}&page=1'.format(self.story.getMetadata('authorUrl'), self.zone)
#author_Url = self.story.getMetadata('authorUrl')+'&view=story&zone='+self.zone+'&page=1'
##I'm resetting the author page to the zone for this story
self.story.setMetadata('authorUrl',author_Url)
logger.debug('Getting the author page: {0}'.format(author_Url))
adata = self.get_request(author_Url)
if "The member you are looking for does not exist." in adata:
raise exceptions.StoryDoesNotExist("{0}.{1} says: The member you are looking for does not exist.".format(self.zone, self.getBaseDomain()))
#raise exceptions.StoryDoesNotExist(self.zone+'.'+self.getBaseDomain() +" says: The member you are looking for does not exist.")
asoup = self.make_soup(adata)
##Getting the number of author pages
pages = 0
pagination=asoup.find('ul',{'class' : 'pagination'})
if pagination:
pages = pagination.findAll('li')[-1].find('a')
if not pages == None:
pages = pages['href'].split('=')[-1]
else:
pages = 0
storya = None
##If there is only 1 page of stories, check it to get the Metadata,
if pages == 0:
a = asoup.findAll('li')
for lc2 in a:
if lc2.find('a', href=re.compile(r'story.php\?no='+self.story.getMetadata('storyId')+"$")):
storya = lc2
break
## otherwise go through the pages
else:
page=1
i=0
while i == 0:
##We already have the first page, so if this is the first time through, skip getting the page
if page != 1:
author_Url = '{0}&view=story&zone={1}&page={2}'.format(self.story.getMetadata('authorUrl'), self.zone, unicode(page))
logger.debug('Getting the author page: {0}'.format(author_Url))
adata = self.get_request(author_Url)
##This will probably never be needed, since AFF doesn't seem to care what number you put as
## the page number, it will default to the last page, even if you use 1000, for an author
## that only hase 5 pages of stories, but I'm keeping it in to appease Saint Justin Case (just in case).
if "The member you are looking for does not exist." in adata:
raise exceptions.StoryDoesNotExist("{0}.{1} says: The member you are looking for does not exist.".format(self.zone, self.getBaseDomain()))
# we look for the li element that has the story here
asoup = self.make_soup(adata)
a = asoup.findAll('li')
for lc2 in a:
if lc2.find('a', href=re.compile(r'story.php\?no='+self.story.getMetadata('storyId')+"$")):
i=1
storya = lc2
break
page = page + 1
if page > int(pages):
break
##Split the Metadata up into a list
##We have to change the soup type to a string, then remove the newlines, and double spaces,
##then changes the <br/> to '-:-', which seperates the different elemeents.
##Then we strip the HTML elements from the string.
##There is also a double <br/>, so we have to fix that, then remove the leading and trailing '-:-'.
##They are always in the same order.
## EDIT 09/26/2016: Had some trouble with unicode errors... so I had to put in the decode/encode parts to fix it
liMetadata = unicode(storya).replace('\n','').replace('\r','').replace('\t',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ')
liMetadata = stripHTML(liMetadata.replace(r'<br/>','-:-').replace('<!-- <br /-->','-:-'))
liMetadata = liMetadata.strip('-:-').strip('-:-').encode('utf-8')
for i, value in enumerate(liMetadata.decode('utf-8').split('-:-')):
if i == 0:
# The value for the title has been manipulated, so may not be the same as gotten at the start.
# I'm going to use the href from the storya retrieved from the author's page to determine if it is correct.
if storya.find('a', href=re.compile(r'story.php\?no='+self.story.getMetadata('storyId')+"$"))['href'] != url:
raise exceptions.StoryDoesNotExist('Did not find story in author story list: {0}'.format(author_Url))
elif i == 1:
##Get the description
self.setDescription(url,stripHTML(value.strip()))
else:
# the rest of the values can be missing, so instead of hardcoding the numbers, we search for them.
if 'Located :' in value:
self.story.setMetadata('category',value.replace(r'>',r'>').replace(r'Located :',r'').strip())
elif 'Category :' in value:
# Get the Category
self.story.setMetadata('category',value.replace(r'>',r'>').replace(r'Located :',r'').strip())
elif 'Content Tags :' in value:
# Get the Erotic Tags
value = stripHTML(value.replace(r'Content Tags :',r'')).strip()
for code in re.split(r'\s',value):
self.story.addToList('eroticatags',code)
elif 'Posted :' in value:
# Get the Posted Date
value = value.replace(r'Posted :',r'').strip()
if value.startswith('008'):
# It is unknown how the 200 became 008, but I'm going to change it back here
value = value.replace('008','200')
elif value.startswith('0000'):
# Since the date is showing as 0000,
# I'm going to put the memberdate here
value = asoup.find('div',{'id':'contentdata'}).find('p').get_text(strip=True).replace('Member Since','').strip()
self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat))
elif 'Edited :' in value:
# Get the 'Updated' Edited date
# AFF has the time for the Updated date, and we only want the date,
# so we take the first 10 characters only
value = value.replace(r'Edited :',r'').strip()[0:10]
if value.startswith('008'):
# It is unknown how the 200 became 008, but I'm going to change it back here
value = value.replace('008','200')
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
elif value.startswith('0000') or '-00-' in value:
# Since the date is showing as 0000,
# or there is -00- in the date,
# I'm going to put the Published date here
self.story.setMetadata('dateUpdated', self.story.getMetadata('datPublished'))
else:
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
else:
# This catches the blank elements, and the Review and Dragon Prints.
# I am not interested in these, so do nothing
zzzzzzz=0
# grab the text for an individual chapter.
def getChapterText(self, url):
#Since each chapter is on 1 page, we don't need to do anything special, just get the content of the page.
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self.get_request(url))
chaptertag = soup.find('ul',{'class':'pagination'}).parent.parent.parent.findNextSibling('li')
if None == chaptertag:
raise exceptions.FailedToDownload("Error downloading Chapter: {0}! Missing required element!".format(url))
# Change td to a div.
chaptertag.name='div'
return self.utf8FromSoup(url,chaptertag) | PypiClean |
/Flask-Pretty-0.2.0.tar.gz/Flask-Pretty-0.2.0/docs/index.rst | Flask-Pretty
============
.. toctree::
:maxdepth: 2
:caption: Contents:
.. module:: flask_pretty
Flask-Pretty is a Flask extension to output prettified HTML pages to ease the
development process of HTML templates.
However, HTML prettifying should only be used for development purposes only.
For production purposes, HTML minifying should be used instead (for instance by using Flask-HTMLmin_).
The underlying HTML prettifying process is provided by BeautifulSoup_.
.. _Flask-HTMLmin: https://github.com/hamidfzm/Flask-HTMLmin
.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup
Installation
------------
Install the extension with with pipenv_ (recommended)::
$ pipenv install flask-pretty
Or with pip_::
$ pip install flask-pretty
.. _pip: https://pip.pypa.io
.. _pipenv: https://docs.pipenv.org
Usage
-----
Using Flask-Pretty is really simple:
.. code-block:: python
import Flask
from flask_pretty import Prettify
app = Flask(__name__)
prettify = Prettify(app)
Or if you are using the Flask `Application Factories`_ pattern:
.. code-block:: python
import Flask
from flask_pretty import Prettify
prettify = Prettify()
def create_app():
app = Flask(__name__)
prettify.init_app(app)
Flask-Pretty is configurable via the following configuration variables:
- `PRETTIFY`: enable Flask-Pretty for all routes (default: False)
.. _Application Factories: http://flask.pocoo.org/docs/0.12/patterns/appfactories/
API
---
.. autoclass:: flask_pretty.Prettify
:members:
| PypiClean |
/EditorConfig-0.12.3-py3-none-any.whl/editorconfig/__main__.py | import getopt
import sys
from editorconfig import VERSION, __version__
from editorconfig.compat import force_unicode
from editorconfig.exceptions import ParsingError, PathError, VersionError
from editorconfig.handler import EditorConfigHandler
from editorconfig.versiontools import split_version
def version():
print("EditorConfig Python Core Version %s" % __version__)
def usage(command, error=False):
if error:
out = sys.stderr
else:
out = sys.stdout
out.write("%s [OPTIONS] FILENAME\n" % command)
out.write('-f '
'Specify conf filename other than ".editorconfig".\n')
out.write("-b "
"Specify version (used by devs to test compatibility).\n")
out.write("-h OR --help Print this help message.\n")
out.write("-v OR --version Display version information.\n")
def main():
command_name = sys.argv[0]
try:
opts, args = getopt.getopt(list(map(force_unicode, sys.argv[1:])),
"vhb:f:", ["version", "help"])
except getopt.GetoptError as e:
print(str(e))
usage(command_name, error=True)
sys.exit(2)
version_tuple = VERSION
conf_filename = '.editorconfig'
for option, arg in opts:
if option in ('-h', '--help'):
usage(command_name)
sys.exit()
if option in ('-v', '--version'):
version()
sys.exit()
if option == '-f':
conf_filename = arg
if option == '-b':
version_tuple = split_version(arg)
if version_tuple is None:
sys.exit("Invalid version number: %s" % arg)
if len(args) < 1:
usage(command_name, error=True)
sys.exit(2)
filenames = args
multiple_files = len(args) > 1
for filename in filenames:
handler = EditorConfigHandler(filename, conf_filename, version_tuple)
try:
options = handler.get_configurations()
except (ParsingError, PathError, VersionError) as e:
print(str(e))
sys.exit(2)
if multiple_files:
print("[%s]" % filename)
for key, value in options.items():
print("%s=%s" % (key, value))
if __name__ == "__main__":
main() | PypiClean |
/Amino.py-3.0.10-py3-none-any.whl/amino/async_client.py | import json
import base64
import aiohttp
import asyncio
import threading
from uuid import uuid4
from time import timezone, sleep
from typing import BinaryIO, Union
from time import time as timestamp
from locale import getdefaultlocale as locale
from .lib.util import exceptions, headers, device, objects, helpers
from .async_socket import AsyncCallbacks, AsyncSocketHandler
device = device.DeviceGenerator()
class AsyncClient(AsyncCallbacks, AsyncSocketHandler):
def __init__(self, deviceId: str = None, socketDebugging = False):
self.api = "https://service.narvii.com/api/v1"
self.authenticated = False
self.configured = False
self.user_agent = device.user_agent
if deviceId is not None: self.device_id = deviceId
else: self.device_id = device.device_id
AsyncSocketHandler.__init__(self, self, debug=socketDebugging)
AsyncCallbacks.__init__(self, self)
self.json = None
self.sid = None
self.userId = None
self.secret = None
self.account: objects.UserProfile = objects.UserProfile(None)
self.profile: objects.UserProfile = objects.UserProfile(None)
self.session = aiohttp.ClientSession()
def __del__(self):
try:
loop = asyncio.get_event_loop()
loop.create_task(self._close_session())
except RuntimeError:
loop = asyncio.new_event_loop()
loop.run_until_complete(self._close_session())
async def _close_session(self):
if not self.session.closed: await self.session.close()
def parse_headers(self, data = None):
if data:
return headers.Headers(data=data, deviceId=self.device_id).headers
else:
return headers.Headers(deviceId=self.device_id).headers
async def join_voice_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Voice Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
async def join_video_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Video Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
await self.send(data)
async def join_video_chat_as_viewer(self, comId: str, chatId: str):
data = {
"o":
{
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
async def run_vc(self, comId: str, chatId: str, joinType: str):
while self.active:
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
sleep(1)
async def start_vc(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
await self.send(data)
self.active = True
threading.Thread(target=self.run_vc, args=[comId, chatId, joinType])
async def end_vc(self, comId: str, chatId: str, joinType: int = 2):
self.active = False
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
async def login_sid(self, SID: str):
"""
Login into an account with an SID
**Parameters**
- **SID** : SID of the account
"""
uId = helpers.sid_to_uid(SID)
self.authenticated = True
self.sid = SID
self.userId = uId
self.account: objects.UserProfile = await self.get_user_info(uId)
self.profile: objects.UserProfile = await self.get_user_info(uId)
headers.sid = self.sid
await self.startup()
async def login(self, email: str, password: str, secret: str = None):
"""
Login into an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
- **secret** : Secret of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"email": email,
"v": 2,
"secret": f"0 {password}" if secret is None else secret,
"deviceID": self.device_id,
"clientType": 100,
"action": "normal",
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/auth/login", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else:
self.authenticated = True
self.json = json.loads(await response.text())
self.sid = self.json["sid"]
self.userId = self.json["account"]["uid"]
self.secret = self.json["secret"]
self.account: objects.UserProfile = objects.UserProfile(self.json["account"]).UserProfile
self.profile: objects.UserProfile = objects.UserProfile(self.json["userProfile"]).UserProfile
headers.sid = self.sid
await self.startup()
return response.status
async def register(self, nickname: str, email: str, password: str, verificationCode: str, deviceId: str = device.device_id):
"""
Register an account.
**Parameters**
- **nickname** : Nickname of the account.
- **email** : Email of the account.
- **password** : Password of the account.
- **verificationCode** : Verification code.
- **deviceId** : The device id being registered to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": deviceId,
"email": email,
"clientType": 100,
"nickname": nickname,
"latitude": 0,
"longitude": 0,
"address": None,
"clientCallbackURL": "narviiapp://relogin",
"validationContext": {
"data": {
"code": verificationCode
},
"type": 1,
"identity": email
},
"type": 1,
"identity": email,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/auth/register", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def restore(self, email: str, password: str):
"""
Restore a deleted account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": device.device_id,
"email": email,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/account/delete-request/cancel", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def logout(self):
"""
Logout from an account.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": self.device_id,
"clientType": 100,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/auth/logout", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else:
self.authenticated = False
self.json = None
self.sid = None
self.userId = None
self.account: None
self.profile: None
headers.sid = None
await self.close()
await self.session.close()
return response.status
async def configure(self, age: int, gender: str):
"""
Configure the settings of an account.
**Parameters**
- **age** : Age of the account. Minimum is 13.
- **gender** : Gender of the account.
- ``Male``, ``Female`` or ``Non-Binary``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if gender.lower() == "male": gender = 1
elif gender.lower() == "female": gender = 2
elif gender.lower() == "non-binary": gender = 255
else: raise exceptions.SpecifyType()
if age <= 12: raise exceptions.AgeTooLow()
data = json.dumps({
"age": age,
"gender": gender,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/persona/profile/basic", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def verify(self, email: str, code: str):
"""
Verify an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"validationContext": {
"type": 1,
"identity": email,
"data": {"code": code}},
"deviceID": device.device_id,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/auth/check-security-validation", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def request_verify_code(self, email: str, resetPassword: bool = False):
"""
Request an verification code to the targeted email.
**Parameters**
- **email** : Email of the account.
- **resetPassword** : If the code should be for Password Reset.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"identity": email,
"type": 1,
"deviceID": device.device_id
}
if resetPassword is True:
data["level"] = 2
data["purpose"] = "reset-password"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/auth/request-security-validation", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def activate_account(self, email: str, code: str):
"""
Activate an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"type": 1,
"identity": email,
"data": {"code": code},
"deviceID": device.device_id
})
async with self.session.post(f"{self.api}/g/s/auth/activate-email", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
# Provided by "𝑰 𝑵 𝑻 𝑬 𝑹 𝑳 𝑼 𝑫 𝑬#4082"
async def delete_account(self, password: str):
"""
Delete an account.
**Parameters**
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": device.device_id,
"secret": f"0 {password}"
})
async with self.session.post(f"{self.api}/g/s/account/delete-request", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def change_password(self, email: str, password: str, code: str):
"""
Change password of an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"updateSecret": f"0 {password}",
"emailValidationContext": {
"data": {
"code": code
},
"type": 1,
"identity": email,
"level": 2,
"deviceID": device.device_id
},
"phoneNumberValidationContext": None,
"deviceID": device.device_id
})
async with self.session.post(f"{self.api}/g/s/auth/reset-password", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def check_device(self, deviceId: str):
"""
Check if the Device ID is valid.
**Parameters**
- **deviceId** : ID of the Device.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": deviceId,
"bundleID": "com.narvii.amino.master",
"clientType": 100,
"timezone": -timezone // 1000,
"systemPushEnabled": True,
"locale": locale()[0],
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/device", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_account_info(self):
async with self.session.get(f"{self.api}/g/s/account", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfile(json.loads(await response.text())["account"]).UserProfile
async def upload_media(self, file: BinaryIO, fileType: str):
"""
Upload file to the amino servers.
**Parameters**
- **file** : File to be uploaded.
**Returns**
- **Success** : Url of the file uploaded to the server.
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if fileType == "audio":
t = "audio/aac"
elif fileType == "image":
t = "image/jpg"
else: raise exceptions.SpecifyType(fileType)
data = file.read()
async with self.session.post(f"{self.api}/g/s/media/upload", headers=headers.Headers(type=t, data=data, deviceId=self.device_id).headers, data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["mediaValue"]
def handle_socket_message(self, data):
return self.resolve(data)
async def get_eventlog(self, language: str = "en"):
async with self.session.get(f"{self.api}/g/s/eventlog/profile?language={language}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())
async def sub_clients(self, start: int = 0, size: int = 25):
"""
List of Communities the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if not self.authenticated: raise exceptions.NotLoggedIn()
async with self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommunityList(json.loads(await response.text())["communityList"]).CommunityList
async def sub_clients_profile(self, start: int = 0, size: int = 25):
if not self.authenticated: raise exceptions.NotLoggedIn()
async with self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["communityList"]
async def get_user_info(self, userId: str):
"""
Information of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`User Object <amino.lib.util.objects.UserProfile>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfile(json.loads(await response.text())["userProfile"]).UserProfile
async def get_chat_threads(self, start: int = 0, size: int = 25):
"""
List of Chats the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Chat List <amino.lib.util.objects.ThreadList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/chat/thread?type=joined-me&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.ThreadList(json.loads(await response.text())["threadList"]).ThreadList
async def get_chat_thread(self, chatId: str):
"""
Get the Chat Object from an Chat ID.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : :meth:`Chat Object <amino.lib.util.objects.Thread>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.Thread(json.loads(await response.text())["thread"]).Thread
async def get_chat_users(self, chatId: str, start: int = 0, size: int = 25):
async with self.session.get(f"{self.api}/g/s/chat/thread/{chatId}/member?start={start}&size={size}&type=default&cv=1.2", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileList(json.loads(await response.text())["memberList"]).UserProfileList
async def join_chat(self, chatId: str):
"""
Join an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def leave_chat(self, chatId: str):
"""
Leave an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def start_chat(self, userId: Union[str, list], message: str, title: str = None, content: str = None, isGlobal: bool = False, publishToGlobal: bool = False):
"""
Start an Chat with an User or List of Users.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **message** : Starting Message.
- **title** : Title of Group Chat.
- **content** : Content of Group Chat.
- **isGlobal** : If Group Chat is Global.
- **publishToGlobal** : If Group Chat should show in Global.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType()
data = {
"title": title,
"inviteeUids": userIds,
"initialMessageContent": message,
"content": content,
"timestamp": int(timestamp() * 1000)
}
if isGlobal is True: data["type"] = 2; data["eventSource"] = "GlobalComposeMenu"
else: data["type"] = 0
if publishToGlobal is True: data["publishToGlobal"] = 1
else: data["publishToGlobal"] = 0
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/chat/thread", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.Thread(json.loads(await response.text())["thread"]).Thread
async def invite_to_chat(self, userId: Union[str, list], chatId: str):
"""
Invite a User or List of Users to a Chat.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType(type(userId))
data = json.dumps({
"uids": userIds,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/invite", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def kick(self, userId: str, chatId: str, allowRejoin: bool = True):
if allowRejoin: allowRejoin = 1
if not allowRejoin: allowRejoin = 0
async with self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{userId}?allowRejoin={allowRejoin}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_chat_messages(self, chatId: str, size: int = 25, pageToken: str = None):
"""
List of Messages from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- *size* : Size of the list.
- *size* : Size of the list.
- *pageToken* : Next Page Token.
**Returns**
- **Success** : :meth:`Message List <amino.lib.util.objects.MessageList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if pageToken is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&pageToken={pageToken}&size={size}"
else: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&size={size}"
async with self.session.get(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.GetMessages(json.loads(await response.text())).GetMessages
async def get_message_info(self, chatId: str, messageId: str):
"""
Information of an Message from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **messageId** : ID of the Message.
**Returns**
- **Success** : :meth:`Message Object <amino.lib.util.objects.Message>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.Message(json.loads(await response.text())["message"]).Message
async def get_community_info(self, comId: str):
"""
Information of an Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : :meth:`Community Object <amino.lib.util.objects.Community>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s-x{comId}/community/info?withInfluencerList=1&withTopicList=true&influencerListOrderStrategy=fansCount", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.Community(json.loads(await response.text())["community"]).Community
async def search_community(self, aminoId: str):
"""
Search a Community byt its Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Community.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/search/amino-id-and-link?q={aminoId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else:
response = json.loads(await response.text())["resultList"]
if len(response) == 0: raise exceptions.CommunityNotFound(aminoId)
else: return objects.CommunityList([com["refObject"] for com in response]).CommunityList
async def get_user_following(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that the User is Following.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/joined?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileList(json.loads(await response.text())["userProfileList"]).UserProfileList
async def get_user_followers(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that are Following the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/member?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileList(json.loads(await response.text())["userProfileList"]).UserProfileList
async def get_user_visitors(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that Visited the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Visitors List <amino.lib.util.objects.VisitorsList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/visitors?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.VisitorsList(json.loads(await response.text())).VisitorsList
async def get_blocked_users(self, start: int = 0, size: int = 25):
"""
List of Users that the User Blocked.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Users List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/block?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileList(json.loads(await response.text())["userProfileList"]).UserProfileList
async def get_blog_info(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None):
if blogId or quizId:
if quizId is not None: blogId = quizId
async with self.session.get(f"{self.api}/g/s/blog/{blogId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.GetBlogInfo(json.loads(await response.text())).GetBlogInfo
elif wikiId:
async with self.session.get(f"{self.api}/g/s/item/{wikiId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.GetWikiInfo(json.loads(await response.text())).GetWikiInfo
elif fileId:
async with self.session.get(f"{self.api}/g/s/shared-folder/files/{fileId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.SharedFolderFile(json.loads(await response.text())["file"]).SharedFolderFile
else: raise exceptions.SpecifyType()
async def get_blog_comments(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, sorting: str = "newest", start: int = 0, size: int = 25):
if sorting == "newest": sorting = "newest"
elif sorting == "oldest": sorting = "oldest"
elif sorting == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
if blogId or quizId:
if quizId is not None: blogId = quizId
url = f"{self.api}/g/s/blog/{blogId}/comment?sort={sorting}&start={start}&size={size}"
elif wikiId: url = f"{self.api}/g/s/item/{wikiId}/comment?sort={sorting}&start={start}&size={size}"
elif fileId: url = f"{self.api}/g/s/shared-folder/files/{fileId}/comment?sort={sorting}&start={start}&size={size}"
else: raise exceptions.SpecifyType()
async with self.session.get(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommentList(json.loads(await response.text())["commentList"]).CommentList
async def get_blocker_users(self, start: int = 0, size: int = 25):
"""
List of Users that are Blocking the User.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List of User IDs <None>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/block/full-list?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["blockerUidList"]
async def get_wall_comments(self, userId: str, sorting: str, start: int = 0, size: int = 25):
"""
List of Wall Comments of an User.
**Parameters**
- **userId** : ID of the User.
- **sorting** : Order of the Comments.
- ``newest``, ``oldest``, ``top``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Comments List <amino.lib.util.objects.CommentList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if sorting.lower() == "newest": sorting = "newest"
elif sorting.lower() == "oldest": sorting = "oldest"
elif sorting.lower() == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/g-comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommentList(json.loads(await response.text())["commentList"]).CommentList
async def flag(self, reason: str, flagType: int, userId: str = None, blogId: str = None, wikiId: str = None, asGuest: bool = False):
"""
Flag a User, Blog or Wiki.
**Parameters**
- **reason** : Reason of the Flag.
- **flagType** : Type of the Flag.
- **userId** : ID of the User.
- **blogId** : ID of the Blog.
- **wikiId** : ID of the Wiki.
- *asGuest* : Execute as a Guest.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = {
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["objectId"] = userId
data["objectType"] = 0
elif blogId:
data["objectId"] = blogId
data["objectType"] = 1
elif wikiId:
data["objectId"] = wikiId
data["objectType"] = 2
else: raise exceptions.SpecifyType
if asGuest: flg = "g-flag"
else: flg = "flag"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/{flg}", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def send_message(self, chatId: str, message: str = None, messageType: int = 0, file: BinaryIO = None, fileType: str = None, replyTo: str = None, mentionUserIds: list = None, stickerId: str = None, embedId: str = None, embedType: int = None, embedLink: str = None, embedTitle: str = None, embedContent: str = None, embedImage: BinaryIO = None):
"""
Send a Message to a Chat.
**Parameters**
- **message** : Message to be sent
- **chatId** : ID of the Chat.
- **file** : File to be sent.
- **fileType** : Type of the file.
- ``audio``, ``image``, ``gif``
- **messageType** : Type of the Message.
- **mentionUserIds** : List of User IDS to mention. '@' needed in the Message.
- **replyTo** : Message ID to reply to.
- **stickerId** : Sticker ID to be sent.
- **embedTitle** : Title of the Embed.
- **embedContent** : Content of the Embed.
- **embedLink** : Link of the Embed.
- **embedImage** : Image of the Embed.
- **embedId** : ID of the Embed.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is not None and file is None:
message = message.replace("<$", "").replace("$>", "")
mentions = []
if mentionUserIds:
for mention_uid in mentionUserIds:
mentions.append({"uid": mention_uid})
if embedImage:
embedImage = [[100, await self.upload_media(embedImage, "image"), None]]
data = {
"type": messageType,
"content": message,
"clientRefId": int(timestamp() / 10 % 1000000000),
"attachedObject": {
"objectId": embedId,
"objectType": embedType,
"link": embedLink,
"title": embedTitle,
"content": embedContent,
"mediaList": embedImage
},
"extensions": {"mentionedArray": mentions},
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["replyMessageId"] = replyTo
if stickerId:
data["content"] = None
data["stickerId"] = stickerId
data["type"] = 3
if file:
data["content"] = None
if fileType == "audio":
data["type"] = 2
data["mediaType"] = 110
elif fileType == "image":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/jpg"
data["mediaUhqEnabled"] = True
elif fileType == "gif":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/gif"
data["mediaUhqEnabled"] = True
else: raise exceptions.SpecifyType()
data["mediaUploadValue"] = base64.b64encode(file.read()).decode()
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/message", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def delete_message(self, chatId: str, messageId: str, asStaff: bool = False, reason: str = None):
"""
Delete a Message from a Chat.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
- **asStaff** : If execute as a Staff member (Leader or Curator).
- **reason** : Reason of the action to show on the Moderation History.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"adminOpName": 102,
"adminOpNote": {"content": reason},
"timestamp": int(timestamp() * 1000)
}
data = json.dumps(data)
if not asStaff:
async with self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}/admin", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def mark_as_read(self, chatId: str, messageId: str):
"""
Mark a Message from a Chat as Read.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"messageId": messageId,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/mark-as-read", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def edit_chat(self, chatId: str, doNotDisturb: bool = None, pinChat: bool = None, title: str = None, icon: str = None, backgroundImage: BinaryIO = None, content: str = None, announcement: str = None, coHosts: list = None, keywords: list = None, pinAnnouncement: bool = None, publishToGlobal: bool = None, canTip: bool = None, viewOnly: bool = None, canInvite: bool = None, fansOnly: bool = None):
"""
Send a Message to a Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **title** : Title of the Chat.
- **content** : Content of the Chat.
- **icon** : Icon of the Chat.
- **backgroundImage** : Background Image of the Chat.
- **announcement** : Announcement of the Chat.
- **pinAnnouncement** : If the Chat Announcement should Pinned or not.
- **coHosts** : List of User IDS to be Co-Host.
- **keywords** : List of Keywords of the Chat.
- **viewOnly** : If the Chat should be on View Only or not.
- **canTip** : If the Chat should be Tippable or not.
- **canInvite** : If the Chat should be Invitable or not.
- **fansOnly** : If the Chat should be Fans Only or not.
- **publishToGlobal** : If the Chat should show on Public Chats or not.
- **doNotDisturb** : If the Chat should Do Not Disturb or not.
- **pinChat** : If the Chat should Pinned or not.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if title: data["title"] = title
if content: data["content"] = content
if icon: data["icon"] = icon
if keywords: data["keywords"] = keywords
if announcement: data["extensions"] = {"announcement": announcement}
if pinAnnouncement: data["extensions"] = {"pinAnnouncement": pinAnnouncement}
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if publishToGlobal: data["publishToGlobal"] = 0
if not publishToGlobal: data["publishToGlobal"] = 1
res = []
if doNotDisturb is not None:
if doNotDisturb:
data = json.dumps({"alertOption": 2, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not doNotDisturb:
data = json.dumps({"alertOption": 1, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if pinChat is not None:
if pinChat:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/pin", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not pinChat:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/unpin", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if backgroundImage is not None:
data = json.dumps({"media": [100, await self.upload_media(backgroundImage, "image"), None], "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/background", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if coHosts is not None:
data = json.dumps({"uidList": coHosts, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/co-host", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if viewOnly is not None:
#fixed by Minori#6457
if viewOnly:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/enable", headers=self.parse_headers()) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not viewOnly:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/disable", headers=self.parse_headers()) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if canInvite is not None:
if canInvite:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/enable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not canInvite:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/disable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if canTip is not None:
if canTip:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/enable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not canTip:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/disable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
return res
async def visit(self, userId: str):
"""
Visit an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}?action=visit", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def send_coins(self, coins: int, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
url = None
if transactionId is None: transactionId = str(uuid4())
data = {
"coins": coins,
"tippingContext": {"transactionId": transactionId},
"timestamp": int(timestamp() * 1000)
}
if blogId is not None: url = f"{self.api}/g/s/blog/{blogId}/tipping"
if chatId is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/tipping"
if objectId is not None:
data["objectId"] = objectId
data["objectType"] = 2
url = f"{self.api}/g/s/tipping"
if url is None: raise exceptions.SpecifyType()
data = json.dumps(data)
async with self.session.post(url, headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def follow(self, userId: Union[str, list]):
"""
Follow an User or Multiple Users.
**Parameters**
- **userId** : ID of the User or List of IDs of the Users.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str):
async with self.session.post(f"{self.api}/g/s/user-profile/{userId}/member", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif isinstance(userId, list):
data = json.dumps({"targetUidList": userId, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/joined", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.WrongType(type(userId))
async def unfollow(self, userId: str):
"""
Unfollow an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.delete(f"{self.api}/g/s/user-profile/{userId}/member/{self.userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def block(self, userId: str):
"""
Block an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def unblock(self, userId: str):
"""
Unblock an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.delete(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def join_community(self, comId: str, invitationCode: str = None):
"""
Join a Community.
**Parameters**
- **comId** : ID of the Community.
- **invitationCode** : Invitation Code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if invitationCode: data["invitationId"] = await self.link_identify(invitationCode)
data = json.dumps(data)
async with self.session.post(f"{self.api}/x{comId}/s/community/join", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def request_join_community(self, comId: str, message: str = None):
"""
Request to join a Community.
**Parameters**
- **comId** : ID of the Community.
- **message** : Message to be sent.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"message": message, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/x{comId}/s/community/membership-request", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def leave_community(self, comId: str):
"""
Leave a Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/x{comId}/s/community/leave", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def flag_community(self, comId: str, reason: str, flagType: int, isGuest: bool = False):
"""
Flag a Community.
**Parameters**
- **comId** : ID of the Community.
- **reason** : Reason of the Flag.
- **flagType** : Type of Flag.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded()
if flagType is None: raise exceptions.FlagTypeNeeded()
data = json.dumps({
"objectId": comId,
"objectType": 16,
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
})
if isGuest: flg = "g-flag"
else: flg = "flag"
async with self.session.post(f"{self.api}/x{comId}/s/{flg}", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def edit_profile(self, nickname: str = None, content: str = None, icon: BinaryIO = None, backgroundColor: str = None, backgroundImage: str = None, defaultBubbleId: str = None):
"""
Edit account's Profile.
**Parameters**
- **nickname** : Nickname of the Profile.
- **content** : Biography of the Profile.
- **icon** : Icon of the Profile.
- **backgroundImage** : Url of the Background Picture of the Profile.
- **backgroundColor** : Hexadecimal Background Color of the Profile.
- **defaultBubbleId** : Chat bubble ID.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"address": None,
"latitude": 0,
"longitude": 0,
"mediaList": None,
"eventSource": "UserProfileView",
"timestamp": int(timestamp() * 1000)
}
if nickname: data["nickname"] = nickname
if icon: data["icon"] = await self.upload_media(icon, "image")
if content: data["content"] = content
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
if backgroundImage: data["extensions"] = {"style": {"backgroundMediaList": [[100, backgroundImage, None, None, None]]}}
if defaultBubbleId: data["extensions"] = {"defaultBubbleId": defaultBubbleId}
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/user-profile/{self.userId}", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def set_privacy_status(self, isAnonymous: bool = False, getNotifications: bool = False):
"""
Edit account's Privacy Status.
**Parameters**
- **isAnonymous** : If visibility should be Anonymous or not.
- **getNotifications** : If account should get new Visitors Notifications.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if not isAnonymous: data["privacyMode"] = 1
if isAnonymous: data["privacyMode"] = 2
if not getNotifications: data["notificationStatus"] = 2
if getNotifications: data["privacyMode"] = 1
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/account/visit-settings", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def set_amino_id(self, aminoId: str):
"""
Edit account's Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"aminoId": aminoId, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/account/change-amino-id", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_linked_communities(self, userId: str):
"""
Get a List of Linked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommunityList(json.loads(await response.text())["linkedCommunityList"]).CommunityList
async def get_unlinked_communities(self, userId: str):
"""
Get a List of Unlinked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommunityList(json.loads(await response.text())["unlinkedCommunityList"]).CommunityList
async def reorder_linked_communities(self, comIds: list):
"""
Reorder List of Linked Communities.
**Parameters**
- **comIds** : IDS of the Communities.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"ndcIds": comIds, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/reorder", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def add_linked_community(self, comId: str):
"""
Add a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def remove_linked_community(self, comId: str):
"""
Remove a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.delete(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def comment(self, message: str, userId: str = None, blogId: str = None, wikiId: str = None, replyTo: str = None):
"""
Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **message** : Message to be sent.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
- **replyTo** : ID of the Comment to Reply to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is None: raise exceptions.MessageNeeded
data = {
"content": message,
"stickerId": None,
"type": 0,
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["respondTo"] = replyTo
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/user-profile/{userId}/g-comment", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/blog/{blogId}/g-comment", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/item/{wikiId}/g-comment", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.SpecifyType()
async def delete_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Delete a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: url = f"{self.api}/g/s/user-profile/{userId}/g-comment/{commentId}"
elif blogId: url = f"{self.api}/g/s/blog/{blogId}/g-comment/{commentId}"
elif wikiId: url = f"{self.api}/g/s/item/{wikiId}/g-comment/{commentId}"
else: raise exceptions.SpecifyType()
async with self.session.delete(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def like_blog(self, blogId: Union[str, list] = None, wikiId: str = None):
"""
Like a Blog, Multiple Blogs or a Wiki.
**Parameters**
- **blogId** : ID of the Blog or List of IDs of the Blogs. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if blogId:
if isinstance(blogId, str):
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/blog/{blogId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif isinstance(blogId, list):
data["targetIdList"] = blogId
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/feed/g-vote", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.WrongType(type(blogId))
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/item/{wikiId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.SpecifyType()
async def unlike_blog(self, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Blog or Wiki.
**Parameters**
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if blogId: url = f"{self.api}/g/s/blog/{blogId}/g-vote?eventSource=UserProfileView"
elif wikiId: url = f"{self.api}/g/s/item/{wikiId}/g-vote?eventSource=PostDetailView"
else: raise exceptions.SpecifyType()
async with self.session.delete(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def like_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Like a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.SpecifyType()
async def unlike_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: url = f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?eventSource=UserProfileView"
elif blogId: url = f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?eventSource=PostDetailView"
elif wikiId: url = f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?eventSource=PostDetailView"
else: raise exceptions.SpecifyType()
async with self.session.delete(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_membership_info(self):
"""
Get Information about your Amino+ Membership.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Membership Object <amino.lib.util.objects.Membership>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/membership?force=true", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.Membership(json.loads(await response.text())).Membership
async def get_ta_announcements(self, language: str = "en", start: int = 0, size: int = 25):
"""
Get the list of Team Amino's Announcement Blogs.
**Parameters**
- **language** : Language of the Blogs.
- ``en``, ``es``, ``pt``, ``ar``, ``ru``, ``fr``, ``de``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Blogs List <amino.lib.util.objects.BlogList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if language not in self.get_supported_languages(): raise exceptions.UnsupportedLanguage(language)
async with self.session.get(f"{self.api}/g/s/announcement?language={language}&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.BlogList(json.loads(await response.text())["blogList"]).BlogList
async def get_wallet_info(self):
"""
Get Information about the account's Wallet.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/wallet", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.WalletInfo(json.loads(await response.text())["wallet"]).WalletInfo
async def get_wallet_history(self, start: int = 0, size: int = 25):
"""
Get the Wallet's History Information.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/wallet/coin/history?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.WalletHistory(json.loads(await response.text())["coinHistoryList"]).WalletHistory
async def get_from_deviceid(self, deviceId: str):
"""
Get the User ID from an Device ID.
**Parameters**
- **deviceID** : ID of the Device.
**Returns**
- **Success** : :meth:`User ID <amino.lib.util.objects.UserProfile.userId>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/auid?deviceId={deviceId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["auid"]
async def get_from_code(self, code: str):
"""
Get the Object Information from the Amino URL Code.
**Parameters**
- **code** : Code from the Amino URL.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/link-resolution?q={code}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.FromCode(json.loads(await response.text())["linkInfoV2"]).FromCode
async def get_from_id(self, objectId: str, objectType: int, comId: str = None):
"""
Get the Object Information from the Object ID and Type.
**Parameters**
- **objectID** : ID of the Object. User ID, Blog ID, etc.
- **objectType** : Type of the Object.
- *comId* : ID of the Community. Use if the Object is in a Community.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"objectId": objectId,
"targetCode": 1,
"objectType": objectType,
"timestamp": int(timestamp() * 1000)
})
if comId: url = f"{self.api}/g/s-x{comId}/link-resolution"
else: url = f"{self.api}/g/s/link-resolution"
async with self.session.post(url, headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.FromCode(json.loads(await response.text())["linkInfoV2"]).FromCode
async def get_supported_languages(self):
"""
Get the List of Supported Languages by Amino.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`List of Supported Languages <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/community-collection/supported-languages?start=0&size=100", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["supportedLanguages"]
async def claim_new_user_coupon(self):
"""
Claim the New User Coupon available when a new account is created.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/g/s/coupon/new-user-coupon/claim", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_subscriptions(self, start: int = 0, size: int = 25):
"""
Get Information about the account's Subscriptions.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/store/subscription?objectType=122&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["storeSubscriptionItemList"]
async def get_all_users(self, start: int = 0, size: int = 25):
"""
Get list of users of Amino.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User Profile Count List Object <amino.lib.util.objects.UserProfileCountList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile?type=recent&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileCountList(json.loads(await response.text())).UserProfileCountList
async def accept_host(self, chatId: str, requestId: str):
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/transfer-organizer/{requestId}/accept", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def accept_organizer(self, chatId: str, requestId: str):
await self.accept_host(chatId, requestId)
# Contributed by 'https://github.com/LynxN1'
async def link_identify(self, code: str):
async with self.session.get(f"{self.api}/g/s/community/link-identify?q=http%3A%2F%2Faminoapps.com%2Finvite%2F{code}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())
async def invite_to_vc(self, chatId: str, userId: str):
"""
Invite a User to a Voice Chat
**Parameters**
- **chatId** - ID of the Chat
- **userId** - ID of the User
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"uid": userId
})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/vvchat-presenter/invite", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def wallet_config(self, level: int):
"""
Changes ads config
**Parameters**
- **level** - Level of the ads.
- ``1``, ``2``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"adsLevel": level,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/wallet/ads/config", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_avatar_frames(self, start: int = 0, size: int = 25):
async with self.session.get(f"{self.api}/g/s/avatar-frame?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.AvatarFrameList(json.loads(await response.text())["avatarFrameList"]).AvatarFrameList
async def subscribe_amino_plus(self, transactionId="", sku="d940cf4a-6cf2-4737-9f3d-655234a92ea5"):
"""
Subscibes to amino+
**Parameters**
- **transactionId** - The transaction Id as a uuid4
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
{
"sku": sku,
"packageName": "com.narvii.amino.master",
"paymentType": 1,
"paymentContext": {
"transactionId": (transactionId or str(uuid4())),
"isAutoRenew": True
},
"timestamp": timestamp()
}
})
async with self.session.post(f"{self.api}/g/s/membership/product/subscribe", headers=self.parse_headers(), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status | PypiClean |
/NREL_cloud_fs-0.0.8-py3-none-any.whl/cloud_fs/cloud_fs.py | import inspect
from .filesystems import Local, S3
class FileSystem:
"""
Class to abstract file location and allow file-system commands that are
"""
def __init__(self, path, anon=False, profile=None, **kwargs):
"""
Parameters
----------
path : str
S3 object path or file path
anon : bool, optional
Whether to use anonymous credentials, by default False
profile : str, optional
AWS credentials profile, by default None
"""
self._path = path
self._handler = None
if path.lower().startswith('s3:'):
self._fs = S3(path, anon=anon, profile=profile, **kwargs)
else:
self._fs = Local(path)
self._check_operations()
def __repr__(self):
msg = ("{} operations on {}"
.format(self.__class__.__name__, self.path))
return msg
def __enter__(self):
self._handler = self.open()
return self._handler
def __exit__(self, type, value, traceback):
try:
self._handler.close()
except AttributeError:
pass
except Exception:
raise
if type is not None:
raise
@property
def path(self):
"""
File path to perform filesystem operation on
Returns
-------
str
"""
return self._path
def _check_operations(self):
"""
Check to ensure the File System class being used has all of the
required file system operations defined.
"""
operations = [attr for attr, attr_obj
in inspect.getmembers(self.__class__)
if not attr.startswith('_')
and not isinstance(attr_obj, property)
and not inspect.ismethod(attr_obj)]
missing = list(set(operations) - set(self._fs.operations))
if missing:
msg = ("The following filesystem operations are not defined in "
"{}:\n{}".format(missing, self._fs))
raise NotImplementedError(msg)
return operations
def cp(self, dst, **kwargs):
"""
Copy file to given destination
Parameters
----------
dst : str
Destination path
kwargs : dict
kwargs for s3fs.S3FileSystem.copy
"""
self._fs['cp'](self.path, dst, **kwargs)
def exists(self):
"""
Check if file path exists
Returns
-------
bool
"""
return self._fs['exists'](self.path)
def isfile(self):
"""
Check if path is a file
Returns
-------
bool
"""
return self._fs['isfile'](self.path)
def isdir(self):
"""
Check if path is a directory
Returns
-------
bool
"""
return self._fs['isdir'](self.path)
def glob(self, **kwargs):
"""
Find all file paths matching the given pattern
Parameters
----------
kwargs : dict
kwargs for s3fs.S3FileSystem.glob
Returns
-------
list
"""
return self._fs['glob'](self.path, **kwargs)
def ls(self):
"""
List everyting under given path
Returns
-------
list
"""
return sorted(self._fs['ls'](self.path))
def mkdirs(self, **kwargs):
"""
Make desired directory and any intermediate directories
Parameters
----------
kwargs : dict
kwargs for s3fs.S3FileSystem.mkdirs
"""
self._fs['mkdirs'](self.path, **kwargs)
def mv(self, dst, **kwargs):
"""
Move file or all files in directory to given destination
Parameters
----------
dst : str
Destination path
kwargs : dict
kwargs for s3fs.S3FileSystem.mv
"""
self._fs['mv'](self.path, dst, **kwargs)
def open(self, mode='rb', **kwargs):
"""
Open S3 object and return a file-like object
Parameters
----------
mode : str
Mode with which to open the s3 object
kwargs : dict
kwargs for s3fs.S3FileSystem.open
Returns
-------
Return a file-like object from the filesystem
"""
return self._fs['open'](self.path, mode=mode, **kwargs)
def rm(self, **kwargs):
"""
Delete file or files in given directory
Parameters
----------
kwargs : dict
kwargs for s3fs.S3FileSystem.rm
"""
self._fs['rm'](self.path, **kwargs)
def size(self):
"""
Get file size in bytes
Returns
-------
float
"""
return self._fs['size'](self.path)
def walk(self):
"""
Recursively search directory and all sub-directories
Returns
-------
path : str
Root path
directory : list
All directories in path
file : list
All files in path
"""
return self._fs['walk'](self.path)
@classmethod
def copy(cls, src_path, dst_path, anon=False, profile=None, **kwargs):
"""
Copy file(s) from src_path to dst_path. Either can be local or in the
cloud.
Parameters
----------
src_path : str
Source path to copy file(s) from, can be local or in the cloud
dst_path : str
Destination path to copy file(s) to, can be local or in the cloud
anon : bool, optional
Whether to use anonymous credentials, by default False
profile : str, optional
AWS credentials profile, by default None
"""
s3 = (src_path.lower().startswith('s3:')
or dst_path.lower().startswith('s3:'))
if s3:
path = 's3:'
else:
path = ''
fs = cls(path, anon=anon, profile=profile, **kwargs)
fs._fs['cp'](src_path, dst_path) | PypiClean |
/Flask-Dance-7.0.0.tar.gz/Flask-Dance-7.0.0/flask_dance/contrib/slack.py | from flask import g
from requests_oauthlib.compliance_fixes.slack import slack_compliance_fix
from werkzeug.local import LocalProxy
from flask_dance.consumer import OAuth2ConsumerBlueprint
__maintainer__ = "David Baumgold <david@davidbaumgold.com>"
class SlackBlueprint(OAuth2ConsumerBlueprint):
def session_created(self, session):
return slack_compliance_fix(session)
def make_slack_blueprint(
client_id=None,
client_secret=None,
*,
scope=None,
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
subdomain=None,
rule_kwargs=None,
):
"""
Make a blueprint for authenticating with Slack using OAuth 2. This requires
a client ID and client secret from Slack. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`SLACK_OAUTH_CLIENT_ID` and
:envvar:`SLACK_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on Slack.
client_secret (str): The client secret for your application on Slack
scope (str, optional): comma-separated list of scopes for the OAuth token
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/slack``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/slack/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
subdomain (str, optional): the name of the subdomain under which your
Slack space is accessed. Providing this may improve the login experience.
rule_kwargs (dict, optional): Additional arguments that should be passed when adding
the login and authorized routes. Defaults to ``None``.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :doc:`blueprint <flask:blueprints>` to attach to your Flask app.
"""
scope = scope or ["identify", "chat:write:bot"]
slack_bp = SlackBlueprint(
"slack",
__name__,
client_id=client_id,
client_secret=client_secret,
scope=scope,
base_url="https://slack.com/api/",
authorization_url="https://slack.com/oauth/authorize"
if subdomain is None
else f"https://{subdomain}.slack.com/oauth/authorize",
token_url="https://slack.com/api/oauth.access",
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class,
storage=storage,
rule_kwargs=rule_kwargs,
)
slack_bp.from_config["client_id"] = "SLACK_OAUTH_CLIENT_ID"
slack_bp.from_config["client_secret"] = "SLACK_OAUTH_CLIENT_SECRET"
@slack_bp.before_app_request
def set_applocal_session():
g.flask_dance_slack = slack_bp.session
return slack_bp
slack = LocalProxy(lambda: g.flask_dance_slack) | PypiClean |
/AWS_IFG_distributions-1.0.tar.gz/AWS_IFG_distributions-1.0/AWS_IFG_distributions/Gaussiandistribution.py | import math
import matplotlib.pyplot as plt
import numpy as np
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
plt.show()
def replace_stats_with_data(self, sample=True):
"""Function to calculate mean and standard deviation from the data set
Args:
Sample or not
Returns:
float: the mean value
float: the stdev value
"""
# TODO: The read_data_file() from the Generaldistribution class can read in a data
# file. Because the Gaussiandistribution class inherits from the Generaldistribution class,
# you don't need to re-write this method. However, the method
# doesn't update the mean or standard deviation of
# a distribution. Hence you are going to write a method that calculates mean and stdev
# updates the mean attribute
# updates the standard deviation attribute
#
# Hint: You can use the calculate_mean() and calculate_stdev() methods
# defined previously.
self.calculate_mean()
self.calculate_stdev(sample)
return self.mean, self.stdev
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / math.sqrt(2*math.pi*(self.stdev**2))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
# mu , sigma = self.replace_stats_with_data(True)
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def create_gaussian_file(self, mu, sigma, size_file):
s = np.random.normal(mu, sigma, size_file)
f = open("numbers_gaussian.txt", "w+")
i = 0
for i in range(size_file):
line_number = "{} \n".format(str(int(s[i])))
f.write(line_number)
f.close()
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | PypiClean |
/Mopidy-WaitForInternet-0.2.1.tar.gz/Mopidy-WaitForInternet-0.2.1/README.rst | ****************************
Mopidy-WaitForInternet
****************************
.. image:: https://img.shields.io/pypi/v/Mopidy-WaitForInternet.svg?style=flat
:target: https://pypi.org/project/Mopidy-WaitForInternet/
:alt: Latest PyPI version
.. image:: https://img.shields.io/pypi/dm/Mopidy-WaitForInternet.svg?style=flat
:target: https://pypi.org/project/Mopidy-WaitForInternet/
:alt: Number of PyPI downloads
.. image:: https://img.shields.io/github/actions/workflow/status/DavisNT/mopidy-waitforinternet/ci.yml?branch=develop&style=flat
:target: https://github.com/DavisNT/mopidy-waitforinternet/actions/workflows/ci.yml
:alt: GitHub Actions build status
.. image:: https://img.shields.io/coveralls/github/DavisNT/mopidy-waitforinternet.svg?style=flat
:target: https://coveralls.io/github/DavisNT/mopidy-waitforinternet
:alt: Coveralls test coverage
.. image:: https://img.shields.io/github/actions/workflow/status/DavisNT/mopidy-waitforinternet/servers-test.yml?branch=develop&style=flat&label=servers-test
:target: https://github.com/DavisNT/mopidy-waitforinternet/actions/workflows/servers-test.yml
:alt: Weekly build that tests connectivity check servers
`Mopidy <http://www.mopidy.com/>`_ extensions that wait (up to around 5 minutes) for an internet connection (and optionally for time synchronization) during early phase of Mopidy startup (before other extensions start to initialize).
Installation
============
Install by running::
pip install Mopidy-WaitForInternet
Configuration
=============
This package consists of two Mopidy extensions - ``mopidy_waitforinternet`` (enabled by default) that waits **only** for internet connection and ``mopidy_waitfortimesync`` (disabled by default) that waits for internet connection **and** time synchronization. They have no configuration options in ``mopidy.conf`` apart from the default ``enabled`` setting::
# To enable waiting for internet connection and time synchronization
[waitforinternet]
enabled = false
[waitfortimesync]
enabled = true
These extensions don't support proxy servers (they ignore proxy configuration in ``mopidy.conf``).
Usage
=====
Mopidy-WaitForInternet might be useful if other Mopidy extensions (e.g. extensions for online music streaming services) fail to initialize, because they try to connect to internet resources before machine running Mopidy has established an internet connection (e.g. connected to wifi) or synchronized its clock.
``mopidy_waitforinternet`` will delay initialization of other Mopidy extensions until an internet connection has been established (the extension will wait for up to around 5 minutes). It's recommended if:
* the computer running Mopidy has a `real-time clock <https://en.wikipedia.org/wiki/Real-time_clock>`_
* all of the below:
* it is important to minimize Mopidy startup time
* it is acceptable if other Mopidy extensions occasionally (once in several months or so) fail to initialize due to inaccurate date/time
* the computer does not have a real-time clock
* the computer/OS saves the time between reboots (like Raspberry Pi OS does)
* the computer is used often
``mopidy_waitfortimesync`` will delay initialization of other Mopidy extensions until an internet connection has been established and computer's clock has been synchronized (the extension will wait for up to around 5 minutes). It's recommended if:
* prolonged Mopidy startup time is not a problem
* it is important to minimize initialization failures of other Mopidy extensions
* the computer running Mopidy does not have a real-time clock and is used rarely
Local time (computer's clock) is somewhat important for connectivity. Most internet services use HTTPS and HTTPS has certificates that are valid for a specific time period (usually 3 or 13 months). To connect to an HTTPS resource, computer's clock must be within the validity period of the HTTPS certificate used by that particular resource. Some computers (e.g. Raspberry Pi) don't have `real-time clocks <https://en.wikipedia.org/wiki/Real-time_clock>`_ and synchronize their clocks from the internet (via `NTP <https://en.wikipedia.org/wiki/Network_Time_Protocol>`_). In most cases, until the clock of such computer is synchronized it is set to the time saved at previous shutdown, for some computers the clock is set to a constant time/date (e.g. midnight January 1, 2020). As ``mopidy_waitforinternet`` uses HTTPS, it will detect internet connectivity only when computer's clock is within the validity period of the HTTPS certificate of at least one of the URLs used by ``mopidy_waitforinternet``. This guarantees that computer's clock has accuracy of a year or so, however this does not guarantee that computer's clock is accurate enough to allow connectivity (to other HTTPS resources) required by other Mopidy extensions.
Both extensions log information about the introduced startup delay.
Important internals
===================
Mopidy-WaitForInternet uses several different URLs (currently - requests to public `DoH <https://en.wikipedia.org/wiki/DNS_over_HTTPS>`_ servers) to check internet connectivity. As a future-proofing measure there is a `weekly servers-test build <https://github.com/DavisNT/mopidy-waitforinternet/actions/workflows/servers-test.yml>`_ that verifies availability of these URLs.
Time synchronization is checked by comparing local time with the ``Date`` response header of HTTP requests to the internet connectivity check URLs (difference of less than 10 seconds is considered synchronized time).
License
=======
::
Copyright 2022 Davis Mosenkovs
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Project resources
=================
- `Source code <https://github.com/DavisNT/mopidy-waitforinternet>`_
- `Issue tracker <https://github.com/DavisNT/mopidy-waitforinternet/issues>`_
- `Development branch tarball <https://github.com/DavisNT/mopidy-waitforinternet/archive/develop.tar.gz#egg=Mopidy-WaitForInternet-dev>`_
- `Weekly servers-test build that tests URLs used by Mopidy-WaitForInternet for internet connectivity check <https://github.com/DavisNT/mopidy-waitforinternet/actions/workflows/servers-test.yml>`_
Changelog
=========
v0.2.1
----------------------------------------
- Fixed build badges (including servers-test).
v0.2.0
----------------------------------------
- Added second extension (mopidy_waitfortimesync).
- Minor improvements.
v0.1.1
----------------------------------------
- Fixed README formatting.
- Initial release.
v0.1.0 (UNRELEASED)
----------------------------------------
- Initial version.
| PypiClean |
/NlpToolkit-Util-Cy-1.0.9.tar.gz/NlpToolkit-Util-Cy-1.0.9/README.md | For Developers
============
You can also see [Python](https://github.com/starlangsoftware/Util-Py), [Java](https://github.com/starlangsoftware/Util), [C++](https://github.com/starlangsoftware/Util-CPP), [Swift](https://github.com/starlangsoftware/Util-Swift), [Js](https://github.com/starlangsoftware/Util-Js), or [C#](https://github.com/starlangsoftware/Util-CS) repository.
## Requirements
* [Python 3.7 or higher](#python)
* [Git](#git)
### Python
To check if you have a compatible version of Python installed, use the following command:
python -V
You can find the latest version of Python [here](https://www.python.org/downloads/).
### Git
Install the [latest version of Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
## Pip Install
pip3 install NlpToolkit-Util-Cy
## Download Code
In order to work on code, create a fork from GitHub page.
Use Git for cloning the code to your local or below line for Ubuntu:
git clone <your-fork-git-link>
A directory called Util will be created. Or you can use below link for exploring the code:
git clone https://github.com/starlangsoftware/Util-Cy.git
## Open project with Pycharm IDE
Steps for opening the cloned project:
* Start IDE
* Select **File | Open** from main menu
* Choose `Util-Cy` file
* Select open as project option
* Couple of seconds, dependencies will be downloaded.
Detailed Description
============
+ [Interval](#interval)
+ [Subset](#subset)
+ [SubsetFromList](#subsetfromlist)
+ [Permutation](#permutation)
## Interval
Aralık veri yapısını tutmak için Interval sınıfı
a = Interval()
1 ve 4 aralığı eklemek için
a.add(1, 4)
i. aralığın başını getirmek için (yukarıdaki örnekteki 1 gibi)
getFirst(self, index: int) -> int
i. aralığın sonunu getirmek için (yukarıdaki örnekteki 4 gibi)
getLast(self, index: int) -> int
## Subset
Altküme tanımlamak ve tüm altkümelere ulaşmak için Subset ve SubsetFromList sınıfları
Subset veri yapısını tanımlamak için
Subset(self, rangeStart: int, rangeEnd: int, elementCount: int)
Burada elemenCount elemanlı, elemanları rangeStart ile rangeEnd arasında değerler alabilen
tüm altkümeleri gezen bir yapıdan bahsediyoruz. Örneğin
Subset(1, 4, 2), bize iki elemanlı elemanlarını 1 ile 4 arasından gelen tüm alt kümeleri
seçmek için kullanılan bir constructor'dır. Tüm altkümeleri elde etmek için
a = Subset(1, 4, 2);
subset = a.get()
while a.next():
subset = a.get()
....
Burada subset sırasıyla {1, 2}, {1, 3}, {1, 4}, {2, 3}, {2, 4}, {3, 4} altkümelerini gezer.
## SubsetFromList
Altküme tanımlamak ve tüm altkümelere ulaşmak için Subset ve SubsetFromList sınıfları
SubsetFromList veri yapısını kullanmak için
SubsetFromList(self, _list: list, elementCount: int)
Burada elementCount elemanlı, elemanları list listesinden çekilen değerler olan ve tüm
altkümeleri gezen bir yapıdan bahsediyoruz. Örneğin
SubsetFromList([1, 2, 3, 4], 3), bize üç elemanlı elemanlarını [1, 2, 3, 4] listesinden
seçen ve tüm alt kümeleri gezmekte kullanılan bir constructor'dır. Tüm altkümeleri elde
etmek için
a = SubsetFromList([1, 2, 3, 4], 3)
subset = a.get()
while a.next():
subset = a.get()
....
Burada SubsetFromList sırasıyla {1, 2, 3}, {1, 2, 4}, {1, 3, 4}, {2, 3, 4} altkümelerini
gezer.
## Permutation
Permütasyon tanımlamak ve tüm permütasyonlara ulaşmak için Permutation sınıfı
Permutation(self, n: int)
Burada 0 ile n - 1 arasındaki değerlerin tüm olası n'li permütasyonlarını gezen bir
yapıdan bahsediyoruz. Örneğin
Permutation(5), bize değerleri 0 ile 4 arasında olan tüm 5'li permütasyonları gezmekte
kullanılan bir constructor'dır. Tüm permütasyonları elde etmek için
a = Permutation(5)
permutation = a.get()
while a.next():
permutation = a.get();
...
Burada Permutation sırasıyla {0, 1, 2, 3, 4}, {0, 1, 2, 4, 3} gibi permütasyonları gezer.
| PypiClean |
/KitchenSink-0.1.0.tar.gz/KitchenSink-0.1.0/README.rst | ==================
Kitchen Sink Pager
==================
The Kitchen Sink Pager is a pager that does more, but its primary feature is
adding per file syntax highlighting to git diffs.
Installation
============
pip install KitchenSink
Examples
=========
::
# use it for paging. sometimes, it can even figure out
# the filetype and add syntax highlighting. (press 's')
cat some_file.py | kk
# Use it as a quick file jumper for grep results.
# Press 'lf' to quickly view a file in the current buffer
grep * -Rn my_string | kk
# Use it as a git log viewer.
# press 'lo' to [l]ist all git [o]bjects
# press 'lf' to [list] [f]iles in the buffer
git log | kk
# it does git diff highlighting, too
# press 's' to toggle highlighting
git log --color -n1 -p | kk
# if there are numbers in the buffer,
# the kitchen sink math them with 'm'
cat lots_of_numbers.txt | kk
Screenshots
-------------------
KitchenSink syntax highlighting
.. image:: https://raw.github.com/okayzed/kk.py/master/images/kk.png
KitchenSink syntax highlighting vs. the traditional git diff highlighting
.. image:: https://raw.github.com/okayzed/kk.py/master/images/kk_vs_less.png
Changing Syntax Coloring
------------------------
if the syntax coloring style isn't your style or isn't showing up well, you can
use any of pygments other available styles by setting KK_STYLE environment variable.
# listing the styles
python -c "import pygments.styles; print pygments.styles.STYLE_MAP.keys()"
# changing the style to vim. put this in .bashrc if you always want it
export KK_STYLE=vim
Why another pager?
------------------
why not? operating on pipe output is one of the slower parts of my workflow.
this is an attempt to make it more bearable.
| PypiClean |
/flask_kits-0.0.24.tar.gz/flask_kits-0.0.24/flask_kits/common/code.py |
ENCODINGS = ['utf8', 'gbk']
def decode_statement(statement, encodings):
if isinstance(statement, unicode):
return statement
for encoding in encodings:
try:
return statement.decode(encoding)
except UnicodeDecodeError:
pass
def get_initial_letters(statement):
statement = decode_statement(statement, ENCODINGS)
if statement is None:
return ''
return ''.join(get_initial_letter(word) for word in statement)
def get_initial_letter(character):
character = character.encode('gbk')
try:
ord(character)
return character.lower()
except Exception:
# ignore exception
asc = ord(character[0]) * 256 + ord(character[1]) - 65536
if -20319 <= asc <= -20284:
return 'a'
if -20283 <= asc <= -19776:
return 'b'
if -19775 <= asc <= -19219:
return 'c'
if -19218 <= asc <= -18711:
return 'd'
if -18710 <= asc <= -18527:
return 'e'
if -18526 <= asc <= -18240:
return 'f'
if -18239 <= asc <= -17923:
return 'g'
if -17922 <= asc <= -17418:
return 'h'
if -17417 <= asc <= -16475:
return 'j'
if -16474 <= asc <= -16213:
return 'k'
if -16212 <= asc <= -15641:
return 'l'
if -15640 <= asc <= -15166:
return 'm'
if -15165 <= asc <= -14923:
return 'n'
if -14922 <= asc <= -14915:
return 'o'
if -14914 <= asc <= -14631:
return 'p'
if -14630 <= asc <= -14150:
return 'q'
if -14149 <= asc <= -14091:
return 'r'
if -14090 <= asc <= -13119:
return 's'
if -13118 <= asc <= -12839:
return 't'
if -12838 <= asc <= -12557:
return 'w'
if -12556 <= asc <= -11848:
return 'x'
if -11847 <= asc <= -11056:
return 'y'
if -11055 <= asc <= -10247:
return 'z'
return ''
if __name__ == "__main__":
x = u'迦舒布鲁姆Ⅰ峰'
print(get_initial_letters(x)) | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ace/mode-snippets.js | ace.define("ace/mode/folding/coffee",["require","exports","module","ace/lib/oop","ace/mode/folding/fold_mode","ace/range"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var BaseFoldMode = require("./fold_mode").FoldMode;
var Range = require("../../range").Range;
var FoldMode = exports.FoldMode = function() {};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.getFoldWidgetRange = function(session, foldStyle, row) {
var range = this.indentationBlock(session, row);
if (range)
return range;
var re = /\S/;
var line = session.getLine(row);
var startLevel = line.search(re);
if (startLevel == -1 || line[startLevel] != "#")
return;
var startColumn = line.length;
var maxRow = session.getLength();
var startRow = row;
var endRow = row;
while (++row < maxRow) {
line = session.getLine(row);
var level = line.search(re);
if (level == -1)
continue;
if (line[level] != "#")
break;
endRow = row;
}
if (endRow > startRow) {
var endColumn = session.getLine(endRow).length;
return new Range(startRow, startColumn, endRow, endColumn);
}
};
this.getFoldWidget = function(session, foldStyle, row) {
var line = session.getLine(row);
var indent = line.search(/\S/);
var next = session.getLine(row + 1);
var prev = session.getLine(row - 1);
var prevIndent = prev.search(/\S/);
var nextIndent = next.search(/\S/);
if (indent == -1) {
session.foldWidgets[row - 1] = prevIndent!= -1 && prevIndent < nextIndent ? "start" : "";
return "";
}
if (prevIndent == -1) {
if (indent == nextIndent && line[indent] == "#" && next[indent] == "#") {
session.foldWidgets[row - 1] = "";
session.foldWidgets[row + 1] = "";
return "start";
}
} else if (prevIndent == indent && line[indent] == "#" && prev[indent] == "#") {
if (session.getLine(row - 2).search(/\S/) == -1) {
session.foldWidgets[row - 1] = "start";
session.foldWidgets[row + 1] = "";
return "";
}
}
if (prevIndent!= -1 && prevIndent < indent)
session.foldWidgets[row - 1] = "start";
else
session.foldWidgets[row - 1] = "";
if (indent < nextIndent)
return "start";
else
return "";
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/snippets",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/text_highlight_rules","ace/mode/folding/coffee"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var SnippetHighlightRules = function() {
var builtins = "SELECTION|CURRENT_WORD|SELECTED_TEXT|CURRENT_LINE|LINE_INDEX|" +
"LINE_NUMBER|SOFT_TABS|TAB_SIZE|FILENAME|FILEPATH|FULLNAME";
this.$rules = {
"start" : [
{token:"constant.language.escape", regex: /\\[\$}`\\]/},
{token:"keyword", regex: "\\$(?:TM_)?(?:" + builtins + ")\\b"},
{token:"variable", regex: "\\$\\w+"},
{onMatch: function(value, state, stack) {
if (stack[1])
stack[1]++;
else
stack.unshift(state, 1);
return this.tokenName;
}, tokenName: "markup.list", regex: "\\${", next: "varDecl"},
{onMatch: function(value, state, stack) {
if (!stack[1])
return "text";
stack[1]--;
if (!stack[1])
stack.splice(0,2);
return this.tokenName;
}, tokenName: "markup.list", regex: "}"},
{token: "doc.comment", regex:/^\${2}-{5,}$/}
],
"varDecl" : [
{regex: /\d+\b/, token: "constant.numeric"},
{token:"keyword", regex: "(?:TM_)?(?:" + builtins + ")\\b"},
{token:"variable", regex: "\\w+"},
{regex: /:/, token: "punctuation.operator", next: "start"},
{regex: /\//, token: "string.regex", next: "regexp"},
{regex: "", next: "start"}
],
"regexp" : [
{regex: /\\./, token: "escape"},
{regex: /\[/, token: "regex.start", next: "charClass"},
{regex: "/", token: "string.regex", next: "format"},
{"token": "string.regex", regex:"."}
],
charClass : [
{regex: "\\.", token: "escape"},
{regex: "\\]", token: "regex.end", next: "regexp"},
{"token": "string.regex", regex:"."}
],
"format" : [
{regex: /\\[ulULE]/, token: "keyword"},
{regex: /\$\d+/, token: "variable"},
{regex: "/[gim]*:?", token: "string.regex", next: "start"},
{"token": "string", regex:"."}
]
};
};
oop.inherits(SnippetHighlightRules, TextHighlightRules);
exports.SnippetHighlightRules = SnippetHighlightRules;
var SnippetGroupHighlightRules = function() {
this.$rules = {
"start" : [
{token: "text", regex: "^\\t", next: "sn-start"},
{token:"invalid", regex: /^ \s*/},
{token:"comment", regex: /^#.*/},
{token:"constant.language.escape", regex: "^regex ", next: "regex"},
{token:"constant.language.escape", regex: "^(trigger|endTrigger|name|snippet|guard|endGuard|tabTrigger|key)\\b"}
],
"regex" : [
{token:"text", regex: "\\."},
{token:"keyword", regex: "/"},
{token:"empty", regex: "$", next: "start"}
]
};
this.embedRules(SnippetHighlightRules, "sn-", [
{token: "text", regex: "^\\t", next: "sn-start"},
{onMatch: function(value, state, stack) {
stack.splice(stack.length);
return this.tokenName;
}, tokenName: "text", regex: "^(?!\t)", next: "start"}
])
};
oop.inherits(SnippetGroupHighlightRules, TextHighlightRules);
exports.SnippetGroupHighlightRules = SnippetGroupHighlightRules;
var FoldMode = require("./folding/coffee").FoldMode;
var Mode = function() {
this.HighlightRules = SnippetGroupHighlightRules;
this.foldingRules = new FoldMode();
};
oop.inherits(Mode, TextMode);
(function() {
this.$indentWithTabs = true;
this.$id = "ace/mode/snippets";
}).call(Mode.prototype);
exports.Mode = Mode;
}); | PypiClean |
/Menus-0.2.0.tar.gz/Menus-0.2.0/menus/utils.py |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import sys
import six
from menus.exceptions import MenusError
# Preventing import errors on non windows
# platforms
if sys.platform == 'win32':
import msvcrt
else:
import termios
import tty
log = logging.getLogger(__name__)
if sys.platform == 'win32':
clear_screen_cmd = 'cls'
else:
clear_screen_cmd = 'clear'
def check_commands_else_raise(options):
# We need a least one thing to display
# Using this as a teaching aid. Also making the use of
# Engine(example=Ture) very explicit
if len(options) == 0:
msg = ('You must pass a menus object or initilize '
'like -> Engine(example=True)')
raise MenusError(msg, expected=True)
# We need a list or tuple to loop through
if not isinstance(options, list):
if not isinstance(options, tuple):
msg = ('You must pass a list or tuple to menus.')
raise MenusError(msg, expected=True)
if len(options) > 9:
msg = ('Cannot have more then 8 options per menu')
raise MenusError(msg, expected=True)
for o in options:
# Ensuring each item in list/tuple is a tuple
if not isinstance(o, tuple):
raise MenusError('Item must be tuple: {}'.format(o), expected=True)
if len(o) != 2:
raise MenusError('Invalid number of tuple '
'items:\n\n{}'.format(o))
# Ensure index 0 is a str
if not isinstance(o[0], six.string_types):
msg = 'Menus are passed as [("Menu Name", MenuObject())]'
raise MenusError(msg)
return True
# Gets a single character form standard input. Does not echo to the screen
class Getch(object):
def __init__(self):
if sys.platform == 'win32':
self.impl = GetchWindows()
else:
self.impl = GetchUnix()
def __call__(self):
return self.impl()
class GetchUnix(object):
def __init__(self):
# Not sure if these imports are required here
# import tty, sys
pass
def __call__(self):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class GetchWindows(object):
def __init__(self):
# Not sure if this import is required
# import msvcrt
pass
def __call__(self):
return msvcrt.getch() | PypiClean |
/Mopidy-Spotmop-2.10.1.tar.gz/Mopidy-Spotmop-2.10.1/mopidy_spotmop/static/vendor/angular-touch.min.js | (function(y,u,z){'use strict';function s(f,k,p){n.directive(f,["$parse","$swipe",function(d,e){return function(l,m,g){function h(a){if(!b)return!1;var c=Math.abs(a.y-b.y);a=(a.x-b.x)*k;return q&&75>c&&0<a&&30<a&&.3>c/a}var c=d(g[f]),b,q,a=["touch"];u.isDefined(g.ngSwipeDisableMouse)||a.push("mouse");e.bind(m,{start:function(a,c){b=a;q=!0},cancel:function(a){q=!1},end:function(a,b){h(a)&&l.$apply(function(){m.triggerHandler(p);c(l,{$event:b})})}},a)}}])}var n=u.module("ngTouch",[]);n.factory("$swipe",
[function(){function f(d){d=d.originalEvent||d;var e=d.touches&&d.touches.length?d.touches:[d];d=d.changedTouches&&d.changedTouches[0]||e[0];return{x:d.clientX,y:d.clientY}}function k(d,e){var l=[];u.forEach(d,function(d){(d=p[d][e])&&l.push(d)});return l.join(" ")}var p={mouse:{start:"mousedown",move:"mousemove",end:"mouseup"},touch:{start:"touchstart",move:"touchmove",end:"touchend",cancel:"touchcancel"}};return{bind:function(d,e,l){var m,g,h,c,b=!1;l=l||["mouse","touch"];d.on(k(l,"start"),function(a){h=
f(a);b=!0;g=m=0;c=h;e.start&&e.start(h,a)});var q=k(l,"cancel");if(q)d.on(q,function(a){b=!1;e.cancel&&e.cancel(a)});d.on(k(l,"move"),function(a){if(b&&h){var d=f(a);m+=Math.abs(d.x-c.x);g+=Math.abs(d.y-c.y);c=d;10>m&&10>g||(g>m?(b=!1,e.cancel&&e.cancel(a)):(a.preventDefault(),e.move&&e.move(d,a)))}});d.on(k(l,"end"),function(a){b&&(b=!1,e.end&&e.end(f(a),a))})}}}]);n.config(["$provide",function(f){f.decorator("ngClickDirective",["$delegate",function(k){k.shift();return k}])}]);n.directive("ngClick",
["$parse","$timeout","$rootElement",function(f,k,p){function d(c,b,d){for(var a=0;a<c.length;a+=2){var e=c[a+1],g=d;if(25>Math.abs(c[a]-b)&&25>Math.abs(e-g))return c.splice(a,a+2),!0}return!1}function e(c){if(!(2500<Date.now()-m)){var b=c.touches&&c.touches.length?c.touches:[c],e=b[0].clientX,b=b[0].clientY;1>e&&1>b||h&&h[0]===e&&h[1]===b||(h&&(h=null),"label"===c.target.tagName.toLowerCase()&&(h=[e,b]),d(g,e,b)||(c.stopPropagation(),c.preventDefault(),c.target&&c.target.blur()))}}function l(c){c=
c.touches&&c.touches.length?c.touches:[c];var b=c[0].clientX,d=c[0].clientY;g.push(b,d);k(function(){for(var a=0;a<g.length;a+=2)if(g[a]==b&&g[a+1]==d){g.splice(a,a+2);break}},2500,!1)}var m,g,h;return function(c,b,h){function a(){n=!1;b.removeClass("ng-click-active")}var k=f(h.ngClick),n=!1,r,s,v,w;b.on("touchstart",function(a){n=!0;r=a.target?a.target:a.srcElement;3==r.nodeType&&(r=r.parentNode);b.addClass("ng-click-active");s=Date.now();a=a.originalEvent||a;a=(a.touches&&a.touches.length?a.touches:
[a])[0];v=a.clientX;w=a.clientY});b.on("touchmove",function(b){a()});b.on("touchcancel",function(b){a()});b.on("touchend",function(c){var k=Date.now()-s,f=c.originalEvent||c,t=(f.changedTouches&&f.changedTouches.length?f.changedTouches:f.touches&&f.touches.length?f.touches:[f])[0],f=t.clientX,t=t.clientY,x=Math.sqrt(Math.pow(f-v,2)+Math.pow(t-w,2));n&&750>k&&12>x&&(g||(p[0].addEventListener("click",e,!0),p[0].addEventListener("touchstart",l,!0),g=[]),m=Date.now(),d(g,f,t),r&&r.blur(),u.isDefined(h.disabled)&&
!1!==h.disabled||b.triggerHandler("click",[c]));a()});b.onclick=function(a){};b.on("click",function(a,b){c.$apply(function(){k(c,{$event:b||a})})});b.on("mousedown",function(a){b.addClass("ng-click-active")});b.on("mousemove mouseup",function(a){b.removeClass("ng-click-active")})}}]);s("ngSwipeLeft",-1,"swipeleft");s("ngSwipeRight",1,"swiperight")})(window,window.angular);
//# sourceMappingURL=angular-touch.min.js.map | PypiClean |
/Moose-0.9.9b3.tar.gz/Moose-0.9.9b3/moose/utils/treelib/node.py | import uuid
from .exceptions import NodePropertyError
class Node(object):
"""
Nodes are elementary objects which are stored a `_nodes` dictionary of a Tree.
Use `data` attribute to store node-specific data.
"""
#: ADD, DELETE, INSERT constants :
(ADD, DELETE, INSERT, REPLACE) = list(range(4))
def __init__(self, tag=None, identifier=None, expanded=True, data=None):
"""Create a new Node object to be placed inside a Tree object"""
#: if given as a parameter, must be unique
self._identifier = None
self._set_identifier(identifier)
#: None or something else
#: if None, self._identifier will be set to the identifier's value.
if tag is None:
self._tag = self._identifier
else:
self._tag = tag
#: boolean
self.expanded = expanded
#: identifier of the parent's node :
self._bpointer = None
#: identifier(s) of the soons' node(s) :
self._fpointer = list()
#: None or whatever given as a parameter
self.data = data
def __lt__(self, other):
return self.tag < other.tag
def _set_identifier(self, nid):
"""Initialize self._set_identifier"""
if nid is None:
self._identifier = str(uuid.uuid1())
else:
self._identifier = nid
@property
def bpointer(self):
"""Return the value of `_bpointer`."""
return self._bpointer
@bpointer.setter
def bpointer(self, nid):
"""Set the value of `_bpointer`."""
if nid is not None:
self._bpointer = nid
else:
# print("WARNING: the bpointer of node %s " \
# "is set to None" % self._identifier)
self._bpointer = None
@property
def fpointer(self):
"""Return the value of `_fpointer`."""
return self._fpointer
@fpointer.setter
def fpointer(self, value):
"""Set the value of `_fpointer`."""
if value is None:
self._fpointer = list()
elif isinstance(value, list):
self._fpointer = value
elif isinstance(value, dict):
self._fpointer = list(value.keys())
elif isinstance(value, set):
self._fpointer = list(value)
else: # TODO: add deprecated routine
pass
@property
def identifier(self):
"""Return the value of `_identifier`."""
return self._identifier
@identifier.setter
def identifier(self, value):
"""Set the value of `_identifier`."""
if value is None:
print("WARNING: node ID can not be None")
else:
self._set_identifier(value)
def is_leaf(self):
"""Return true if current node has no children."""
if len(self.fpointer) == 0:
return True
else:
return False
def is_root(self):
"""Return true if self has no parent, i.e. as root."""
return self._bpointer is None
@property
def tag(self):
"""Return the value of `_tag`."""
return self._tag
@tag.setter
def tag(self, value):
"""Set the value of `_tag`."""
self._tag = value if value is not None else None
def update_bpointer(self, nid):
"""Update parent node."""
self.bpointer = nid
def update_fpointer(self, nid, mode=ADD, replace=None):
"""Update all children nodes."""
if nid is None:
return
if mode is self.ADD:
self._fpointer.append(nid)
elif mode is self.DELETE:
if nid in self._fpointer:
self._fpointer.remove(nid)
elif mode is self.INSERT: # deprecate to ADD mode
print("WARNING: INSERT is deprecated to ADD mode")
self.update_fpointer(nid)
elif mode is self.REPLACE:
if replace is None:
raise NodePropertyError(
'Argument "repalce" should be provided when mode is {}'.format(mode)
)
ind = self._fpointer.index(nid)
self._fpointer[ind] = replace
def __repr__(self):
name = self.__class__.__name__
kwargs = [
"tag={0}".format(self.tag),
"identifier={0}".format(self.identifier),
"data={0}".format(self.data),
]
return "%s(%s)" % (name, ", ".join(kwargs)) | PypiClean |
/Gbtestapi0.2-0.1a10.tar.gz/Gbtestapi0.2-0.1a10/src/gailbot/services/organizer/settings/interface/whisperInterface.py | from pydantic import BaseModel, ValidationError
from typing import Dict, List, Union
from .engineSettingInterface import EngineSettingInterface
from gailbot.core.utils.logger import makelogger
logger = makelogger("whisperInterface")
class ValidateWhisper(BaseModel):
engine: str
language: str = None
detect_speakers: bool = False
class Init(BaseModel):
pass
class TranscribeSetting(BaseModel):
language: str = None
detect_speakers: bool = False
class WhisperInterface(EngineSettingInterface):
"""
Interface for the Whisper speech to text engine
"""
transcribe: TranscribeSetting
init: Init = None
engine: str
def load_whisper_setting(
setting: Dict[str, str]
) -> Union[bool, EngineSettingInterface]:
"""given a dictionary, load the dictionary as a whisper setting
Args:
setting (Dict[str, str]): the dictionary that contains the setting data
Returns:
Union[bool , SettingInterface]: if the setting dictionary is validated
by the whisper setting interface,
return the google setting interface
as an instance of SettingInterface,
else return false
"""
logger.info("initialize whisper engine")
if not "engine" in setting.keys() or setting["engine"] != "whisper":
return False
try:
logger.info(setting)
setting = setting.copy()
validate = ValidateWhisper(**setting)
whisper_set = dict()
whisper_set["engine"] = setting.pop("engine")
whisper_set["init"] = dict()
whisper_set["transcribe"] = dict()
whisper_set["transcribe"].update(setting)
whisper_set = WhisperInterface(**whisper_set)
return whisper_set
except ValidationError as e:
logger.error(e, exc_info=e)
logger.error(f"error in validating whisper interface {e}")
return False | PypiClean |
/LFake-18.9.0.tar.gz/LFake-18.9.0/lfake/providers/job/ar_AA/__init__.py | from .. import Provider as BaseProvider
class Provider(BaseProvider):
# Source: https://learnenglish100.com/grammar/career-job/
jobs = (
"أحيائي",
"احصائي",
"اطفائي",
"بائع",
"بائع خضار وفاكهة",
"بائع زهور",
"بائعة",
"بواب",
"تاجر",
"جزار",
"جوھري",
"جيولوجي",
"حداد",
"حلاق",
"خادمة",
"خباز",
"خبير اقتصادي",
"خبير في التراث الشعبي",
"خبير في عالم الحيوان",
"خراط",
"خياط",
"خياطة",
"داية",
"رئيس طهاه",
"راقصة",
"راقصة باليه",
"رجل مباحث",
"رسام",
"روائي",
"سائق",
"سائق تاكسي",
"سائق شاحنة",
"ساعاتي",
"ساعي بريد",
"سكرتير",
"سكرتيرة",
"سمكري",
"سياسي",
"شاعر",
"شرطي",
"صائغ",
"صاحب متجر",
"صاحب مطبعة",
"صاحب مكتبة",
"صانع أدوات بصرية",
"صباغ",
"صباغ أحذية",
"صحافي",
"صحفي",
"صراف",
"صيدلي",
"ضابط شرطة",
"ضارب على الآلة الكاتبة",
"طباخ",
"طبيب",
"طبيب أسنان",
"طبيب جراح",
"طبيب عيون",
"طبيب نفساني",
"طيار",
"عارضة أزياء",
"عالم",
"عالم أرصاد جوية",
"عالم اثار",
"عالم رياضيات",
"عالم فيزياء",
"عامل",
"عامل أحذية",
"عامل بمتجر",
"عامل بناء",
"غسالة",
"فنان",
"فيلسوف",
"قائد شرطة",
"قاضي",
"كاتب",
"كاتب مسرحي",
"لغوي",
"مؤلف",
"ماسح احذية",
"مبرمج",
"مترجم",
"مجلد كتب",
"محاسب",
"محاضر",
"محام",
"محرر",
"محرر جريدة",
"مدير",
"مدير او مخرج",
"مدير بنك",
"مدير تسويق",
"مدير متجر",
"مدير موظفين",
"مذيع",
"مساعد مبيعات",
"مشتري",
"مصحح قانوني",
"مصصم",
"مصفف شعر",
"مصمم جرافيك",
"مصمم ديكور",
"مصور",
"مضيفة جوية",
"مضيفة في الطائرة",
"مطرب",
"معالج طبيعي",
"معلم",
"مغني",
"مكوى",
"ملحن",
"ممثل",
"ممثلة",
"ممرضة",
"منتج",
"منجد",
"منسق ازياء",
"موزع جرائد",
"موسيقار",
"موصل طلبيات",
"موظف استقبال",
"موظف بدالة",
"موظف حكومي",
"ميكانيكي",
"مھندس",
"نادلة",
"ناشر",
"نباتي",
"نجار",
"نحات",
"وسيط تأمين",
"وكيل سفر",
"وكيل عقارات",
) | PypiClean |
/LAMDA-SSL-1.0.2.tar.gz/LAMDA-SSL-1.0.2/LAMDA_SSL/Algorithm/Classification/S4L.py | from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator
from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin
from sklearn.base import ClassifierMixin
import torch
import numpy as np
from LAMDA_SSL.utils import class_status
from LAMDA_SSL.Augmentation.Vision.Rotate import Rotate
from LAMDA_SSL.utils import Bn_Controller
import LAMDA_SSL.Config.S4L as config
from LAMDA_SSL.Loss.Cross_Entropy import Cross_Entropy
from LAMDA_SSL.Loss.Semi_Supervised_Loss import Semi_Supervised_Loss
class S4L(InductiveEstimator,DeepModelMixin,ClassifierMixin):
def __init__(self,
lambda_u=config.lambda_u,
num_classes=config.num_classes,
p_target=config.p_target,
rotate_v_list=config.rotate_v_list,
labeled_usp=config.labeled_usp,
all_rot=config.all_rot,
mu=config.mu,
ema_decay=config.ema_decay,
weight_decay=config.weight_decay,
epoch=config.epoch,
num_it_epoch=config.num_it_epoch,
num_it_total=config.num_it_total,
eval_epoch=config.eval_epoch,
eval_it=config.eval_it,
device=config.device,
train_dataset=config.train_dataset,
labeled_dataset=config.labeled_dataset,
unlabeled_dataset=config.unlabeled_dataset,
valid_dataset=config.valid_dataset,
test_dataset=config.test_dataset,
train_dataloader=config.train_dataloader,
labeled_dataloader=config.labeled_dataloader,
unlabeled_dataloader=config.unlabeled_dataloader,
valid_dataloader=config.valid_dataloader,
test_dataloader=config.test_dataloader,
train_sampler=config.train_sampler,
train_batch_sampler=config.train_batch_sampler,
valid_sampler=config.valid_sampler,
valid_batch_sampler=config.valid_batch_sampler,
test_sampler=config.test_sampler,
test_batch_sampler=config.test_batch_sampler,
labeled_sampler=config.labeled_sampler,
unlabeled_sampler=config.unlabeled_sampler,
labeled_batch_sampler=config.labeled_batch_sampler,
unlabeled_batch_sampler=config.unlabeled_batch_sampler,
augmentation=config.augmentation,
network=config.network,
optimizer=config.optimizer,
scheduler=config.scheduler,
evaluation=config.evaluation,
parallel=config.parallel,
file=config.file,
verbose=config.verbose
):
# >> Parameter:
# >> - lambda_u: The weight of unsupervised loss.
# >> - num_classes: The number of classes.
# >> - p_target: The target distribution of labeled data.
# >> - rotate_v_list: A list of rotation angles.
# >> - labeled_usp: Whether to use labeled data when computing the unsupervised loss.
# >> - all_rot: Whether to rotate samples by all angles in rotate_v_list.
DeepModelMixin.__init__(self,train_dataset=train_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
train_dataloader=train_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
train_sampler=train_sampler,
train_batch_sampler=train_batch_sampler,
valid_sampler=valid_sampler,
valid_batch_sampler=valid_batch_sampler,
test_sampler=test_sampler,
test_batch_sampler=test_batch_sampler,
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
labeled_batch_sampler=labeled_batch_sampler,
unlabeled_batch_sampler=unlabeled_batch_sampler,
epoch=epoch,
num_it_epoch=num_it_epoch,
num_it_total=num_it_total,
eval_epoch=eval_epoch,
eval_it=eval_it,
mu=mu,
weight_decay=weight_decay,
ema_decay=ema_decay,
optimizer=optimizer,
scheduler=scheduler,
device=device,
evaluation=evaluation,
parallel=parallel,
file=file,
verbose=verbose
)
self.ema_decay=ema_decay
self.lambda_u=lambda_u
self.weight_decay=weight_decay
self.num_classes=num_classes
self.rotate_v_list=rotate_v_list
self.p_model = None
self.p_target=p_target
self.labeled_usp=labeled_usp
self.all_rot=all_rot
self.bn_controller = Bn_Controller()
self._estimator_type = ClassifierMixin._estimator_type
def init_transform(self):
self._train_dataset.add_transform(self.weak_augmentation,dim=1,x=0,y=0)
self._train_dataset.add_unlabeled_transform(self.weak_augmentation,dim=1,x=0,y=0)
def start_fit(self):
self.num_classes = self.num_classes if self.num_classes is not None else \
class_status(self._train_dataset.labeled_dataset.y).num_classes
self._network.zero_grad()
self._network.train()
def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
lb_X=lb_X[0] if isinstance(lb_X,(tuple,list)) else lb_X
lb_y=lb_y[0] if isinstance(lb_y,(tuple,list)) else lb_y
ulb_X = ulb_X[0] if isinstance(ulb_X, (tuple, list)) else ulb_X
lb_logits = self._network(lb_X)[0]
rot_X = torch.Tensor().to(self.device)
rot_y = []
for item in ulb_X:
if self.all_rot:
for _v in self.rotate_v_list:
rot_X = torch.cat((rot_X, Rotate(v=_v).fit_transform(item).unsqueeze(0)), dim=0)
rot_y.append(self.rotate_v_list.index(_v))
else:
_v = np.random.choice(self.rotate_v_list, 1).item()
rot_X = torch.cat((rot_X, Rotate(v=_v).fit_transform(item).unsqueeze(0)), dim=0)
rot_y.append(self.rotate_v_list.index(_v))
if self.labeled_usp:
for item in lb_X:
if self.all_rot:
for _v in self.rotate_v_list:
rot_X = torch.cat((rot_X, Rotate(v=_v).fit_transform(item).unsqueeze(0)), dim=0)
rot_y.append(self.rotate_v_list.index(_v))
else:
_v = np.random.choice(self.rotate_v_list, 1).item()
rot_X = torch.cat((rot_X, Rotate(v=_v).fit_transform(item).unsqueeze(0)), dim=0)
rot_y.append(self.rotate_v_list.index(_v))
rot_y = torch.LongTensor(rot_y).to(self.device)
rot_logits = self._network(rot_X)[1]
return lb_logits,lb_y,rot_logits,rot_y
def get_loss(self,train_result,*args,**kwargs):
lb_logits,lb_y,rot_logits,rot_y=train_result
sup_loss = Cross_Entropy(reduction='mean')(lb_logits, lb_y) # CE_loss for labeled data
rot_loss = Cross_Entropy(reduction='mean')(rot_logits, rot_y)
loss = Semi_Supervised_Loss(self.lambda_u)(sup_loss,rot_loss)
return loss
def predict(self,X=None,valid=None):
return DeepModelMixin.predict(self,X=X,valid=valid) | PypiClean |
/JES12132018_AssetInspectionApps-1.0.tar.gz/JES12132018_AssetInspectionApps-1.0/JES12132018_AssetInspectionApps/main_Script_stormdrain.py | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMessageBox
from PyQt5.QtCore import QCoreApplication
import gui_Project_stormdrain, core_function_stormdrain
# =======================================
# GUI event handler and related functions OpenManholeShapefileLE
# =======================================
#
# query and direct input functions
def selectStormdrainShapefile():
"""open file dialog to select exising shapefile and if accepted, update GUI accordingly"""
fileName, _ = QFileDialog.getOpenFileName(mainWindow,"Select shapefile", "","Shapefile (*.shp)")
if fileName:
ui.OpenStormdrainShapefileLE.setText(fileName)
def selectInspectionAreaShapefile():
"""open file dialog to select exising shapefile and if accepted, update GUI accordingly"""
fileName, _ = QFileDialog.getOpenFileName(mainWindow,"Select shapefile", "","Shapefile (*.shp)")
if fileName:
ui.OpenStormBasinShapefileLE.setText(fileName)
def makeAreaList():
"""update sewer area combo box with sewer basin names"""
ui.PickStormBasinCB.clear()
areas = ['ALCOVY', 'APALACHEE', 'BEAVER RUIN', 'BIG HAYNES', 'BROMOLOW', 'BRUSHY FORK', 'CAMP CREEK', 'CEDAR CREEK',
'CROOKED CREEK', 'HOPKINS CREEK', 'JACKS CREEK', 'JACKSON CREEK', 'LEVEL CREEK', 'LITTLE SUWANEE',
'LOWER CHATTAHOOCHEE', 'LOWER YELLOW', 'MULBERRY', 'NO BUSINESS', 'NORTH FORK', 'PEW CREEK', 'POUND CREEK',
'RICHLAND', 'SHETLEY', 'SHOAL', 'SUWANEE', 'SWEETWATER', 'TURKEY', 'UPPER CHATTAHOOCHEE 1', 'UPPER CHATTAHOOCHEE 2',
'UPPER CHATTAHOOCHEE 3', 'UPPER YELLOW', 'WATSON CREEK']
for basin in areas:
ui.PickStormBasinCB.addItem(str(basin))
def createNewShapefile():
try:
ui.statusbar.clearMessage()
ui.statusbar.showMessage('Creating shapefile... please wait!')
stormdrainFC = ui.OpenStormdrainShapefileLE.text()
areaFC = ui.OpenStormBasinShapefileLE.text()
path = ui.outputPathLE.text()
name = ui.fileNameLE.text()
area = ui.PickStormBasinCB.currentText()
core_function_stormdrain.createStormdrainFeatureLayer(stormdrainFC)
core_function_stormdrain.createInspectionArea(areaFC)
core_function_stormdrain.createShapefile(area, path, name)
ui.statusbar.showMessage('New shapefile has been created. You may now close the form.')
except Exception as e:
QMessageBox.information(mainWindow, 'Operation failed', 'Creating new shapefile failed with '+ str(e.__class__) + ': ' + str(e), QMessageBox.Ok )
ui.statusbar.clearMessage()
def selectFileLocation():
"""open file dialog to select exising file directrory where the shapefile will be saved"""
pathName = QFileDialog.getExistingDirectory(mainWindow, "Select Output Folder")
if pathName:
ui.outputPathLE.setText(pathName)
#==========================================
# create app and main window + dialog GUI
# =========================================
app = QCoreApplication.instance()
if app is None:
app= QApplication(sys.argv)
app.aboutToQuit.connect(app.deleteLater)
# set up main window
mainWindow = QMainWindow()
ui = gui_Project_stormdrain.Ui_MainWindow()
ui.setupUi(mainWindow)
#==========================================
# connect signals
#==========================================
makeAreaList()
ui.OpenStormdrainShapefileTB.clicked.connect(selectStormdrainShapefile)
ui.OpenStormBasinShapefileTB.clicked.connect(selectInspectionAreaShapefile)
ui.outputPathTB.clicked.connect(selectFileLocation)
ui.shapefileCreateNewPB.clicked.connect(createNewShapefile)
#============================================
# test availability and if run as script tool
#============================================
arcpyAvailable = core_function_stormdrain.importArcpyIfAvailable()
if not arcpyAvailable:
ui.statusbar.showMessage('arcpy not available. Adding to shapefiles and layers has been disabled.')
else:
import arcpy
if core_function_stormdrain.runningAsScriptTool():
runningAsScriptTool = True
createNewShapefile()
else:
ui.statusbar.showMessage(ui.statusbar.currentMessage() + 'Not running as a script tool in ArcMap. Shapefile will still be created.')
#=======================================
# run app
#=======================================
mainWindow.show()
sys.exit(app.exec_()) | PypiClean |
/DCA-0.3.4.tar.gz/DCA-0.3.4/dca/__main__.py |
import os, sys, argparse
def parse_args():
parser = argparse.ArgumentParser(description='Autoencoder')
parser.add_argument('input', type=str, help='Input is raw count data in TSV/CSV '
'or H5AD (anndata) format. '
'Row/col names are mandatory. Note that TSV/CSV files must be in '
'gene x cell layout where rows are genes and cols are cells (scRNA-seq '
'convention).'
'Use the -t/--transpose option if your count matrix in cell x gene layout. '
'H5AD files must be in cell x gene format (stats and scanpy convention).')
parser.add_argument('outputdir', type=str, help='The path of the output directory')
# IO and norm options
parser.add_argument('--normtype', type=str, default='zheng',
help='Type of size factor estimation. Possible values: deseq, zheng.'
' (default: zheng)')
parser.add_argument('-t', '--transpose', dest='transpose',
action='store_true', help='Transpose input matrix (default: False)')
parser.add_argument('--testsplit', dest='testsplit',
action='store_true', help="Use one fold as a test set (default: False)")
# training options
parser.add_argument('--type', type=str, default='nb-conddisp',
help="Type of autoencoder. Possible values: normal, poisson, nb, "
"nb-shared, nb-conddisp (default), nb-fork, zinb, "
"zinb-shared, zinb-conddisp( zinb-fork")
parser.add_argument('--threads', type=int, default=None,
help='Number of threads for training (default is all cores)')
parser.add_argument('-b', '--batchsize', type=int, default=32,
help="Batch size (default:32)")
parser.add_argument('--sizefactors', dest='sizefactors',
action='store_true', help="Normalize means by library size (default: True)")
parser.add_argument('--nosizefactors', dest='sizefactors',
action='store_false', help="Do not normalize means by library size")
parser.add_argument('--norminput', dest='norminput',
action='store_true', help="Zero-mean normalize input (default: True)")
parser.add_argument('--nonorminput', dest='norminput',
action='store_false', help="Do not zero-mean normalize inputs")
parser.add_argument('--loginput', dest='loginput',
action='store_true', help="Log-transform input (default: True)")
parser.add_argument('--nologinput', dest='loginput',
action='store_false', help="Do not log-transform inputs")
parser.add_argument('-d', '--dropoutrate', type=str, default='0.0',
help="Dropout rate (default: 0)")
parser.add_argument('--batchnorm', dest='batchnorm', action='store_true',
help="Batchnorm (default: True)")
parser.add_argument('--nobatchnorm', dest='batchnorm', action='store_false',
help="Do not use batchnorm")
parser.add_argument('--l2', type=float, default=0.0,
help="L2 regularization coefficient (default: 0.0)")
parser.add_argument('--l1', type=float, default=0.0,
help="L1 regularization coefficient (default: 0.0)")
parser.add_argument('--l2enc', type=float, default=0.0,
help="Encoder-specific L2 regularization coefficient (default: 0.0)")
parser.add_argument('--l1enc', type=float, default=0.0,
help="Encoder-specific L1 regularization coefficient (default: 0.0)")
parser.add_argument('--ridge', type=float, default=0.0,
help="L2 regularization coefficient for dropout probabilities (default: 0.0)")
parser.add_argument('--gradclip', type=float, default=5.0,
help="Clip grad values (default: 5.0)")
parser.add_argument('--activation', type=str, default='relu',
help="Activation function of hidden units (default: relu)")
parser.add_argument('--optimizer', type=str, default='RMSprop',
help="Optimization method (default: RMSprop)")
parser.add_argument('--init', type=str, default='glorot_uniform',
help="Initialization method for weights (default: glorot_uniform)")
parser.add_argument('-e', '--epochs', type=int, default=300,
help="Max number of epochs to continue training in case of no "
"improvement on validation loss (default: 300)")
parser.add_argument('--earlystop', type=int, default=15,
help="Number of epochs to stop training if no improvement in loss "
"occurs (default: 15)")
parser.add_argument('--reducelr', type=int, default=10,
help="Number of epochs to reduce learning rate if no improvement "
"in loss occurs (default: 10)")
parser.add_argument('-s', '--hiddensize', type=str, default='64,32,64',
help="Size of hidden layers (default: 64,32,64)")
parser.add_argument('--inputdropout', type=float, default=0.0,
help="Input layer dropout probability"),
parser.add_argument('-r', '--learningrate', type=float, default=None,
help="Learning rate (default: 0.001)")
parser.add_argument('--saveweights', dest='saveweights',
action='store_true', help="Save weights (default: False)")
parser.add_argument('--no-saveweights', dest='saveweights',
action='store_false', help="Do not save weights")
parser.add_argument('--hyper', dest='hyper',
action='store_true', help="Optimizer hyperparameters (default: False)")
parser.add_argument('--hypern', dest='hypern', type=int, default=1000,
help="Number of samples drawn from hyperparameter distributions during optimization. "
"(default: 1000)")
parser.add_argument('--hyperepoch', dest='hyperepoch', type=int, default=100,
help="Number of epochs used in each hyperpar optimization iteration. "
"(default: 100)")
parser.add_argument('--debug', dest='debug',
action='store_true', help="Enable debugging. Checks whether every term in "
"loss functions is finite. (default: False)")
parser.add_argument('--tensorboard', dest='tensorboard',
action='store_true', help="Use tensorboard for saving weight distributions and "
"visualization. (default: False)")
parser.add_argument('--checkcounts', dest='checkcounts', action='store_true',
help="Check if the expression matrix has raw (unnormalized) counts (default: True)")
parser.add_argument('--nocheckcounts', dest='checkcounts', action='store_false',
help="Do not check if the expression matrix has raw (unnormalized) counts")
parser.add_argument('--denoisesubset', dest='denoisesubset', type=str,
help='Perform denoising only for the subset of genes '
'in the given file. Gene names should be line '
'separated.')
parser.set_defaults(transpose=False,
testsplit=False,
saveweights=False,
sizefactors=True,
batchnorm=True,
checkcounts=True,
norminput=True,
hyper=False,
debug=False,
tensorboard=False,
loginput=True)
return parser.parse_args()
def main():
args = parse_args()
try:
import tensorflow as tf
except ImportError:
raise ImportError('DCA requires TensorFlow v2+. Please follow instructions'
' at https://www.tensorflow.org/install/ to install'
' it.')
# import tf and the rest after parse_args() to make argparse help faster
from . import train
train.train_with_args(args) | PypiClean |
/NeuroTorch-0.0.1b2.tar.gz/NeuroTorch-0.0.1b2/src/neurotorch/metrics/losses.py | import torch
import torch.nn as nn
from ..transforms.base import to_tensor
class RMSELoss(torch.nn.Module):
"""
Class used to compute the RMSE loss.
:math:`\\text{RMSE}(x, y) = \\sqrt{\\frac{1}{n}\\sum_{i=1}^n (x_i - y_i)^2}`
:Attributes:
- **criterion** (nn.MSELoss): The MSE loss.
"""
def __init__(self):
"""
Constructor for the RMSELoss class.
"""
super(RMSELoss, self).__init__()
self.criterion = nn.MSELoss()
def forward(self, x, y):
"""
Calculate the RMSE loss.
:param x: The first input.
:param y: The second input.
:return: The RMSE loss.
"""
loss = self.criterion(x, y)
loss = torch.pow(loss + 1e-8, 0.5)
return loss
class PVarianceLoss(torch.nn.Module):
"""
Class used to compute the P-Variance loss.
:math:`\\text{P-Variance}(x, y) = 1 - \\frac{\\text{MSE}(x, y)}{\\text{Var}(y)}`
:Attributes:
- :attr:`criterion` (nn.MSELoss): The MSE loss.
- :attr:`negative` (bool): Whether to return the negative P-Variance loss.
- :attr:`reduction` (str): The reduction method to use. If 'mean', the output will be averaged. If 'feature', the
output will be the shape of the last dimension of the input. If 'none', the output will be the same shape as
the input.
"""
def __init__(self, negative: bool = False, reduction: str = 'mean', **kwargs):
"""
Constructor for the PVarianceLoss class.
:param negative: Whether to return the negative P-Variance loss.
:type negative: bool
:param reduction: The reduction method to use. If 'mean', the output will be averaged. If 'feature', the output
will be the shape of the last dimension of the input. If 'none', the output will be the same shape as the
input. Defaults to 'mean'.
:type reduction: str
:keyword arguments : epsilon: The epsilon value to use to prevent division by zero. Defaults to 1e-5.
"""
super(PVarianceLoss, self).__init__()
assert reduction in ['mean', 'feature', 'none'], 'Reduction must be one of "mean", "feature", or "none".'
self.reduction = reduction
mse_reduction = 'mean' if reduction == 'mean' else 'none'
self.criterion = nn.MSELoss(
reduction=mse_reduction
)
self.negative = negative
self.epsilon = kwargs.get("epsilon", 1e-5)
def forward(self, x, y):
"""
Calculate the P-Variance loss.
:param x: The first input.
:param y: The second input.
:return: The P-Variance loss.
"""
x, y = to_tensor(x), to_tensor(y)
if self.reduction == 'feature':
x_reshape, y_reshape = x.reshape(-1, x.shape[-1]), y.reshape(-1, y.shape[-1])
else:
x_reshape, y_reshape = x, y
mse_loss = self.criterion(x_reshape, y_reshape)
if self.reduction == 'feature':
mse_loss = mse_loss.mean(dim=0)
var = y_reshape.var(dim=0)
else:
var = y_reshape.var()
loss = 1 - mse_loss / (var + self.epsilon)
if self.negative:
loss = -loss
return loss
def mean_std_over_batch(self, x, y):
"""
Calculate the mean and standard deviation of the P-Variance loss over the batch.
:param x: The first input.
:param y: The second input.
:return: The mean and standard deviation of the P-Variance loss over the batch.
"""
x, y = to_tensor(x), to_tensor(y)
x_reshape, y_reshape = x.reshape(x.shape[0], -1), y.reshape(y.shape[0], -1)
mse_loss = torch.mean((x_reshape - y_reshape)**2, dim=-1)
var = y_reshape.var(dim=-1)
loss = 1 - mse_loss / (var + self.epsilon)
if self.negative:
loss = -loss
return loss.mean(), loss.std() | PypiClean |
/AK_SSL-0.0.1-py3-none-any.whl/AK_SSL/models/byol.py | import torch
import copy
import torch.nn as nn
from AK_SSL.models.modules.heads import BYOLPredictionHead, BYOLProjectionHead
class BYOL(nn.Module):
"""
BYOL: Bootstrap your own latent: A new approach to self-supervised Learning
Link: https://arxiv.org/abs/2006.07733
Implementation: https://github.com/deepmind/deepmind-research/tree/master/byol
"""
def __init__(
self,
backbone: nn.Module,
feature_size: int,
projection_dim: int = 256,
hidden_dim: int = 4096,
moving_average_decay: float = 0.99,
**kwargs
):
super().__init__()
self.backbone = backbone
self.feature_size = feature_size
self.projection_dim = projection_dim
self.hidden_dim = hidden_dim
self.moving_average_decay = moving_average_decay
self.projection_head = BYOLProjectionHead(
feature_size, hidden_dim, projection_dim
)
self.prediction_head = BYOLPredictionHead(
projection_dim, hidden_dim, projection_dim
)
self.online_encoder = self.encoder = nn.Sequential(
self.backbone, self.projection_head
)
self.target_encoder = copy.deepcopy(
self.online_encoder
) # target must be a deepcopy of online, since we will use the backbone trained by online
self._init_target_encoder()
def _init_target_encoder(self):
for param_o, param_t in zip(
self.online_encoder.parameters(), self.target_encoder.parameters()
):
param_t.data.copy_(param_o.data)
param_t.requires_grad = False
@torch.no_grad()
def _momentum_update_target_encoder(self):
for param_o, param_t in zip(
self.online_encoder.parameters(), self.target_encoder.parameters()
):
param_t.data = self.moving_average_decay * param_t.data + (1.0 - self.moving_average_decay) * param_o.data
def forward(self, x0: torch.Tensor, x1: torch.Tensor):
z0_o, z1_o = self.online_encoder(x0), self.online_encoder(x1)
p0_o, p1_o = self.prediction_head(z0_o), self.prediction_head(z1_o)
with torch.no_grad():
self._momentum_update_target_encoder()
z0_t, z1_t = self.target_encoder(x0), self.target_encoder(x1)
return (p0_o, z0_t), (p1_o, z1_t) | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/htdocs/static/djblets/js/forms/views/listEditView.es6.c9a2205bcb73.js | (function() {
const entryTemplate = _.template(dedent`
<li class="djblets-c-list-edit-widget__entry"
data-list-index="<%- index %>">
<%= renderedDefaultRow %>
<a href="#" class="djblets-c-list-edit-widget__remove-item"
role="button" title="<%- removeText %>">
<span class="fa fa-times"></span>
</a>
</li>
`);
/**
* A view for editing a list of elements.
*
* This is the JavaScript view for
* :py:class:`djblets.forms.widgets.ListEditWidget`.
*/
Djblets.Forms.ListEditView = Backbone.View.extend({
events: {
'click .djblets-c-list-edit-widget__add-item': '_addItem',
'click .djblets-c-list-edit-widget__remove-item': '_removeItem',
},
/**
* Initialize the view.
*
* Version Changed:
* 3.0:
* * Removed the `inputAttrs` option.
* * Removed the `sep` option.
* * Added the `fieldName` option.
* * Added the `renderedDefaultRow` option.
*
* Args:
* options (object):
* The view options.
*
* Option Args:
* removeText (string):
* The localized text for removing an item.
*
* fieldName (string):
* The form field name corresponding to this ListEditWidget.
*
* renderedDefaultRow (string):
* The HTML for a default item in the list. This is used to
* render a default item when adding a new item to the list.
*/
initialize(options) {
this._removeText = options.removeText;
this._fieldName = options.fieldName;
this._renderedDefaultRow = options.renderedDefaultRow;
this._numItems = 0;
},
/**
* Render the view.
*
* Since most of the view is rendered by Django, this just sets up some
* event listeners.
*
* Returns:
* Djblets.Forms.ListEditView:
* This view.
*/
render() {
this.$el.data('djblets-list-edit-view', this);
this._$list = this.$el.children(
'.djblets-c-list-edit-widget__entries');
this._numItems = this._$list.find('.djblets-c-list-edit-widget__entry').length;
$(`input[name="${this._fieldName}_num_rows"]`).val(this._numItems);
this._$addBtn = this.$el.children(
'.djblets-c-list-edit-widget__add-item');
return this;
},
/**
* Create and format the HTML for a new entry in the list.
*
* Args:
* index (int):
* The index of the new entry in the list of entries.
*
* Returns:
* jQuery:
* The HTML for the new entry in the list.
*/
_createDefaultEntry(index) {
const $entry = $(entryTemplate({
index: index,
renderedDefaultRow: this._renderedDefaultRow,
removeText: this._removeText,
}));
this._updateEntryInputName($entry, index);
$(`input[name="${this._fieldName}_num_rows"]`).val(index+1);
$entry.find('.djblets-c-list-edit-widget__add-item')
.on('click', e => this._addItem(e));
return $entry;
},
/**
* Update the name(s) of the entry's .djblets-c-list-edit-widget__input(s)
* so that they contain the appropriate index for the entry.
*
* Args:
* $entry (jQuery):
* The entry to update.
*
* index (int):
* The index of the entry in the list of entries.
*/
_updateEntryInputName($entry, index) {
const $inputs = $entry.find('.djblets-c-list-edit-widget__input');
// The entry may have more than one "input".
if ($inputs.length > 1) {
$inputs.each((idx, el) =>
$(el).attr('name',
`${this._fieldName}_value[${index}]_${idx}`));
} else {
$inputs.attr('name', `${this._fieldName}_value[${index}]`);
}
},
/**
* Add an item to the list.
*
* Args:
* e (Event):
* The click event that triggered this event handler.
*/
_addItem(e) {
e.preventDefault();
e.stopPropagation();
this._$list.append(this._createDefaultEntry(this._numItems));
this._numItems += 1;
},
/**
* Remove an item.
*
* When there is only a single item in the list, we clear it instead of
* removing it so there is always at least one item in the list.
*
* Args:
* e (Event):
* The click event that triggered this event handler.
*/
_removeItem(e) {
e.preventDefault();
e.stopPropagation();
const $target = $(e.target);
const $entry = $target.closest('.djblets-c-list-edit-widget__entry');
if (this._numItems > 1) {
$entry.remove();
this._numItems -= 1;
this._$list.find('.djblets-c-list-edit-widget__entry')
.each((idx, el) => {
const $el = $(el);
$el.attr('data-list-index', idx);
this._updateEntryInputName($el, idx);
});
$(`input[name="${this._fieldName}_num_rows"]`)
.val(this._numItems);
} else {
const $defaultEntry = this._createDefaultEntry(0);
$entry.replaceWith($defaultEntry);
}
},
});
})(); | PypiClean |
/LinkChecker-10.2.1-py3-none-any.whl/linkcheck/logger/gxml.py | from .xmllog import _XMLLogger
from .graph import _GraphLogger
class GraphXMLLogger(_XMLLogger, _GraphLogger):
"""XML output mirroring the GML structure. Easy to parse with any XML
tool."""
LoggerName = 'gxml'
LoggerArgs = {
"filename": "linkchecker-out.gxml",
}
def __init__(self, **kwargs):
"""Initialize graph node list and internal id counter."""
args = self.get_args(kwargs)
super().__init__(**args)
self.nodes = {}
self.nodeid = 0
def start_output(self):
"""Write start of checking info as xml comment."""
super().start_output()
self.xml_start_output()
self.xml_starttag('GraphXML')
self.xml_starttag('graph', attrs={"isDirected": "true"})
self.flush()
def log_url(self, url_data):
"""Write one node and all possible edges."""
node = self.get_node(url_data)
if node:
self.xml_starttag('node', attrs={"name": "%d" % node["id"]})
self.xml_tag("label", node["label"])
if self.has_part("realurl"):
self.xml_tag("url", node["url"])
self.xml_starttag("data")
if node["dltime"] >= 0 and self.has_part("dltime"):
self.xml_tag("dltime", "%f" % node["dltime"])
if node["size"] >= 0 and self.has_part("dlsize"):
self.xml_tag("size", "%d" % node["size"])
if node["checktime"] and self.has_part("checktime"):
self.xml_tag("checktime", "%f" % node["checktime"])
if self.has_part("extern"):
self.xml_tag("extern", "%d" % node["extern"])
self.xml_endtag("data")
self.xml_endtag("node")
def write_edge(self, node):
"""Write one edge."""
attrs = {
"source": "%d" % self.nodes[node["parent_url"]]["id"],
"target": "%d" % node["id"],
}
self.xml_starttag("edge", attrs=attrs)
self.xml_tag("label", node["label"])
self.xml_starttag("data")
if self.has_part("result"):
self.xml_tag("valid", "%d" % node["valid"])
self.xml_endtag("data")
self.xml_endtag("edge")
def end_output(self, **kwargs):
"""Finish graph output, and print end of checking info as xml
comment."""
self.xml_endtag("graph")
self.xml_endtag("GraphXML")
self.xml_end_output()
self.close_fileoutput() | PypiClean |
/JT_Techfield-0.0.11-py3-none-any.whl/JT/Layers.py | from collections import defaultdict
from copy import copy
import numpy as np
from scipy.signal import correlate
import JT.Initializers
import JT.Gradients
class Layer:
def InitWeights(self):
pass
def Forward(self, z_in):
pass
def Backward(self, core_in, learn_rate):
pass
def Predict(self, z_in):
pass
class Hidden_Layer(Layer):
def __init__(self, activation, size, normalize_output = None, next_layer = None, prev_layer = None, batch_norm_parameter = 0.1, initializer = JT.Initializers.Initializer(), gradient = JT.Gradients.Gradient()):
self.activation = activation
self.next_layer = next_layer
self.prev_layer = prev_layer
self.size = size
self.normalize_output = normalize_output
self.InitWeights()
self.mu_h = 0
self.sig_h = 1
self.alpha = batch_norm_parameter
self.gradient = gradient
self.initializer = initializer
def Forward(self, z_in):
self.z_in = z_in
self.h = self.z_in @ self.weights + self.bias
if self.normalize_output == 'before':
mu_batch = np.mean(self.h, axis = 0, keepdims = True)
sig_batch = np.std(self.h, axis = 0, keepdims = True)
self.mu_h = self.alpha * mu_batch + (1 - self.alpha) * self.mu_h
self.sig_h = self.alpha * sig_batch + (1 - self.alpha) * self.sig_h
self.denom = np.sqrt(self.sig_h ** 2 + 1e-7)
self.h_bar_0 = (self.h - self.mu_h) / self.denom
self.h_bar_1 = self.gamma * self.h_bar_0 + self.beta
self.z_out = self.activation(self.h_bar_1)
else:
self.z_out = self.activation(self.h)
if self.normalize_output == 'after':
self.mu_z = np.mean(self.z_out, axis = 1, keepdims = True)
self.sig_z = np.std(self.z_out, axis = 1, keepdims = True)
self.denom = np.sqrt(self.sig_z ** 2 )
self.z_out = (self.z_out - self.mu_z) / self.denom
return self.next_layer.Forward(self.z_out)
def Predict(self, z_in):
h = z_in @ (self.weights - self.gradients['weights'].change) + (self.bias - self.gradients['bias'].change)
if self.normalize_output == 'before':
h_bar_0 = (h - self.mu_h) / self.denom
h_bar_1 = (self.gamma - self.gradients['gamma'].change) * h_bar_0 + (self.beta - self.gradients['beta'].change)
z_out = self.activation(h_bar_1)
else:
z_out = self.activation(h)
if self.normalize_output == 'after':
z_out = (z_out - self.mu_z) / self.denom
return self.next_layer.Predict(z_out)
def Backward(self, core_in, learn_rate):
core = core_in * self.activation.D(self.h)
if not (self.normalize_output is None):
gamma_grad = np.sum(core * self.h_bar_0, axis = 0, keepdims = True)
beta_grad = np.sum(core, axis = 0, keepdims = True)
core *= (self.gamma / self.denom)
self.gamma -= self.gradients['gamma'].Evaluate(gamma_grad, learn_rate)
self.beta -= self.gradients['beta'].Evaluate(beta_grad, learn_rate)
core_out = core @ self.weights.T
weights_grad = self.z_in.T @ core
bias_grad = np.sum(core, axis = 0, keepdims = True)
self.weights -= self.gradients['weights'].Evaluate(weights_grad, learn_rate)
self.bias -= self.gradients['bias'].Evaluate(bias_grad, learn_rate)
self.prev_layer.Backward(core_out, learn_rate)
def InitWeights(self):
if not self.prev_layer is None:
self.weights = self.initializer.Initialize(self.prev_layer.size, self.size)
self.bias = np.random.randn(1, self.size)
self.gamma = np.random.randn(1, self.size)
self.beta = np.random.randn(1, self.size)
self.gradients = defaultdict(lambda: copy(self.gradient))
class Output_Layer(Layer):
def __init__(self, size, activation, prev_layer = None, initializer = JT.Initializers.Initializer()):
self.size = size
self.prev_layer = prev_layer
self.activation = activation
self.initializer = initializer
def Forward(self, z_in):
self.z_in = z_in
self.h = self.z_in @ self.weights + self.bias
self.p_hat = self.activation(self.h)
return self.p_hat
def Predict(self, z_in):
h = z_in @ self.weights + self.bias
p_hat = self.activation(h)
return p_hat
def Backward(self, p_hat, y, learn_rate):
core = (p_hat - y)
core_out = core @ self.weights.T
self.weights -= learn_rate * self.z_in.T @ core
self.bias -= learn_rate * np.sum(core, axis = 0, keepdims = True)
self.prev_layer.Backward(core_out, learn_rate)
def InitWeights(self):
if not self.prev_layer is None:
self.weights = self.initializer.Init(self.prev_layer.size, self.size)
self.bias = np.random.randn(1, self.size)
class Input_Layer(Layer):
def __init__(self, size, next_layer = None):
self.size = size
self.next_layer = next_layer
def Forward(self, x):
if x.shape[1] != self.size:
raise NameError('Input not correct shape')
return self.next_layer.Forward(x)
def Backward(self, *args):
pass
def Predict(self, x):
if x.shape[1] != self.size:
raise NameError('Input not correct shape')
return self.next_layer.Predict(x)
class Normalize_Layer(Layer):
"""
ADD GAMMA AND BETA
"""
def __init__(self, eta = 1e-8, next_layer = None, prev_layer = None):
self.eta = eta
self.next_layer = next_layer
self.prev_layer = prev_layer
def Forward(self, z_in):
self.z_in = z_in
self.mu = np.mean(self.z_in, axis = 1, keepdims = True)
self.sig = np.std(self.z_in, axis = 1, keepdims = True)
self.denominator = np.sqrt(self.sig ** 2 + self.eta)
self.z_out = (self.z_in - self.mu) / self.denominator
return self.next_layer.Forward(self.z_out)
def Backward(self, core_in, learn_rate):
core_out = core_in / self.denominator
self.prev_layer.Backward(core_out, learn_rate)
def Predict(self, z_in):
self.z_out = (z_in - self.mu) / self.denominator
return self.next_layer.Forward(self.z_out)
def InitWeights(self):
self.size = self.prev_layer.size
class Noise_Injector(Layer):
def __init__(self, noise_param = 0.1, moving_avg_param = 0.1, next_layer = None, prev_layer = None):
self.noise_param = noise_param
self.moving_avg_param = moving_avg_param
self.next_layer = next_layer
self.prev_layer = prev_layer
self.mu = 0
self.sigma = 1
def Forward(self, z_in):
mu_batch = np.mean(z_in, axis = 0, keepdims = True)
sig_batch = np.std(z_in, axis = 0, keepdims = True)
self.mu = self.moving_avg_param * mu_batch + (1 - self.moving_avg_param) * self.mu
self.sigma = self.moving_avg_param * sig_batch + (1 - self.moving_avg_param) * self.sigma
noise = self.noise_param * (np.random.randn(*z_in.shape) * self.sigma + self.mu)
z_out = z_in + noise
return self.next_layer.Forward(z_out)
def Backward(self, core_in, learn_rate):
self.prev_layer.Backward(core_in, learn_rate)
def Predict(self, z_in):
return self.next_layer.Predict(z_in)
def InitWeights(self):
self.size = self.prev_layer.size
class Dropout_Layer(Layer):
def __init__(self, dropout_rate = 0.1, next_layer = None, prev_layer = None):
self.dropout_rate = dropout_rate
self.next_layer = next_layer
self.prev_layer = prev_layer
def Forward(self, z_in):
z_out = z_in * (np.random.random(z_in.shape) > self.dropout_rate)
return self.next_layer.Forward(z_out)
def Backward(self, core_in, learn_rate):
self.prev_layer.Backward(core_in, learn_rate)
def Predict(self, z_in):
z_out = z_in * (1 - self.dropout_rate)
return self.next_layer.Predict(z_out)
def InitWeights(self):
self.size = self.prev_layer.size
class Convolution_Layer(Layer):
def __init__(self, kernel_size, kernel_num, next_layer = None, prev_layer = None):
self.kernel_size = kernel_size
self.kernel_num = kernel_num
self.next_layer = next_layer
self.prev_layer = prev_layer
def InitWeights(self):
self.depth = self.prev_layer.kernel_num
self.kernels = np.random.rand(self.kernel_num, self.kernel_size, self.kernel_size, self.depth)
def Forward(self, z_in):
out = []
for kernel in self.kernels:
out.append(correlate(z_in, kernel, mode = 'same')[:, :, (self.depth // 2):-(self.depth // 2)])
return np.dstack(out)
def Backward(self, core_in, learn_rate):
# TODO: Write this backward method
pass | PypiClean |
/FileBackedArray-0.0.4.tar.gz/FileBackedArray-0.0.4/CONTRIBUTING.md | ```{todo} THIS IS SUPPOSED TO BE AN EXAMPLE. MODIFY IT ACCORDING TO YOUR NEEDS!
The document assumes you are using a source repository service that promotes a
contribution model similar to [GitHub's fork and pull request workflow].
While this is true for the majority of services (like GitHub, GitLab,
BitBucket), it might not be the case for private repositories (e.g., when
using Gerrit).
Also notice that the code examples might refer to GitHub URLs or the text
might use GitHub specific terminology (e.g., *Pull Request* instead of *Merge
Request*).
Please make sure to check the document having these assumptions in mind
and update things accordingly.
```
```{todo} Provide the correct links/replacements at the bottom of the document.
```
```{todo} You might want to have a look on [PyScaffold's contributor's guide],
especially if your project is open source. The text should be very similar to
this template, but there are a few extra contents that you might decide to
also include, like mentioning labels of your issue tracker or automated
releases.
```
# Contributing
Welcome to `FileBackedArray` contributor's guide.
This document focuses on getting any potential contributor familiarized with
the development processes, but [other kinds of contributions] are also appreciated.
If you are new to using [git] or have never collaborated in a project previously,
please have a look at [contribution-guide.org]. Other resources are also
listed in the excellent [guide created by FreeCodeCamp] [^contrib1].
Please notice, all users and contributors are expected to be **open,
considerate, reasonable, and respectful**. When in doubt,
[Python Software Foundation's Code of Conduct] is a good reference in terms of
behavior guidelines.
## Issue Reports
If you experience bugs or general issues with `FileBackedArray`, please have a look
on the [issue tracker].
If you don't see anything useful there, please feel free to fire an issue report.
:::{tip}
Please don't forget to include the closed issues in your search.
Sometimes a solution was already reported, and the problem is considered
**solved**.
:::
New issue reports should include information about your programming environment
(e.g., operating system, Python version) and steps to reproduce the problem.
Please try also to simplify the reproduction steps to a very minimal example
that still illustrates the problem you are facing. By removing other factors,
you help us to identify the root cause of the issue.
## Documentation Improvements
You can help improve `FileBackedArray` docs by making them more readable and coherent, or
by adding missing information and correcting mistakes.
`FileBackedArray` documentation uses [Sphinx] as its main documentation compiler.
This means that the docs are kept in the same repository as the project code, and
that any documentation update is done in the same way was a code contribution.
```{todo} Don't forget to mention which markup language you are using.
e.g., [reStructuredText] or [CommonMark] with [MyST] extensions.
```
```{todo} If your project is hosted on GitHub, you can also mention the following tip:
:::{tip}
Please notice that the [GitHub web interface] provides a quick way of
propose changes in `FileBackedArray`'s files. While this mechanism can
be tricky for normal code contributions, it works perfectly fine for
contributing to the docs, and can be quite handy.
If you are interested in trying this method out, please navigate to
the `docs` folder in the source [repository], find which file you
would like to propose changes and click in the little pencil icon at the
top, to open [GitHub's code editor]. Once you finish editing the file,
please write a message in the form at the bottom of the page describing
which changes have you made and what are the motivations behind them and
submit your proposal.
:::
```
When working on documentation changes in your local machine, you can
compile them using [tox] :
```
tox -e docs
```
and use Python's built-in web server for a preview in your web browser
(`http://localhost:8000`):
```
python3 -m http.server --directory 'docs/_build/html'
```
## Code Contributions
```{todo} Please include a reference or explanation about the internals of the project.
An architecture description, design principles or at least a summary of the
main concepts will make it easy for potential contributors to get started
quickly.
```
### Submit an issue
Before you work on any non-trivial code contribution it's best to first create
a report in the [issue tracker] to start a discussion on the subject.
This often provides additional considerations and avoids unnecessary work.
### Create an environment
Before you start coding, we recommend creating an isolated [virtual environment]
to avoid any problems with your installed Python packages.
This can easily be done via either [virtualenv]:
```
virtualenv <PATH TO VENV>
source <PATH TO VENV>/bin/activate
```
or [Miniconda]:
```
conda create -n FileBackedArray python=3 six virtualenv pytest pytest-cov
conda activate FileBackedArray
```
### Clone the repository
1. Create an user account on GitHub if you do not already have one.
2. Fork the project [repository]: click on the *Fork* button near the top of the
page. This creates a copy of the code under your account on GitHub.
3. Clone this copy to your local disk:
```
git clone git@github.com:YourLogin/FileBackedArray.git
cd FileBackedArray
```
4. You should run:
```
pip install -U pip setuptools -e .
```
to be able to import the package under development in the Python REPL.
```{todo} if you are not using pre-commit, please remove the following item:
```
5. Install [pre-commit]:
```
pip install pre-commit
pre-commit install
```
`FileBackedArray` comes with a lot of hooks configured to automatically help the
developer to check the code being written.
### Implement your changes
1. Create a branch to hold your changes:
```
git checkout -b my-feature
```
and start making changes. Never work on the main branch!
2. Start your work on this branch. Don't forget to add [docstrings] to new
functions, modules and classes, especially if they are part of public APIs.
3. Add yourself to the list of contributors in `AUTHORS.rst`.
4. When you’re done editing, do:
```
git add <MODIFIED FILES>
git commit
```
to record your changes in [git].
```{todo} if you are not using pre-commit, please remove the following item:
```
Please make sure to see the validation messages from [pre-commit] and fix
any eventual issues.
This should automatically use [flake8]/[black] to check/fix the code style
in a way that is compatible with the project.
:::{important}
Don't forget to add unit tests and documentation in case your
contribution adds an additional feature and is not just a bugfix.
Moreover, writing a [descriptive commit message] is highly recommended.
In case of doubt, you can check the commit history with:
```
git log --graph --decorate --pretty=oneline --abbrev-commit --all
```
to look for recurring communication patterns.
:::
5. Please check that your changes don't break any unit tests with:
```
tox
```
(after having installed [tox] with `pip install tox` or `pipx`).
You can also use [tox] to run several other pre-configured tasks in the
repository. Try `tox -av` to see a list of the available checks.
### Submit your contribution
1. If everything works fine, push your local branch to the remote server with:
```
git push -u origin my-feature
```
2. Go to the web page of your fork and click "Create pull request"
to send your changes for review.
```{todo} if you are using GitHub, you can uncomment the following paragraph
Find more detailed information in [creating a PR]. You might also want to open
the PR as a draft first and mark it as ready for review after the feedbacks
from the continuous integration (CI) system or any required fixes.
```
### Troubleshooting
The following tips can be used when facing problems to build or test the
package:
1. Make sure to fetch all the tags from the upstream [repository].
The command `git describe --abbrev=0 --tags` should return the version you
are expecting. If you are trying to run CI scripts in a fork repository,
make sure to push all the tags.
You can also try to remove all the egg files or the complete egg folder, i.e.,
`.eggs`, as well as the `*.egg-info` folders in the `src` folder or
potentially in the root of your project.
2. Sometimes [tox] misses out when new dependencies are added, especially to
`setup.cfg` and `docs/requirements.txt`. If you find any problems with
missing dependencies when running a command with [tox], try to recreate the
`tox` environment using the `-r` flag. For example, instead of:
```
tox -e docs
```
Try running:
```
tox -r -e docs
```
3. Make sure to have a reliable [tox] installation that uses the correct
Python version (e.g., 3.7+). When in doubt you can run:
```
tox --version
# OR
which tox
```
If you have trouble and are seeing weird errors upon running [tox], you can
also try to create a dedicated [virtual environment] with a [tox] binary
freshly installed. For example:
```
virtualenv .venv
source .venv/bin/activate
.venv/bin/pip install tox
.venv/bin/tox -e all
```
4. [Pytest can drop you] in an interactive session in the case an error occurs.
In order to do that you need to pass a `--pdb` option (for example by
running `tox -- -k <NAME OF THE FALLING TEST> --pdb`).
You can also setup breakpoints manually instead of using the `--pdb` option.
## Maintainer tasks
### Releases
```{todo} This section assumes you are using PyPI to publicly release your package.
If instead you are using a different/private package index, please update
the instructions accordingly.
```
If you are part of the group of maintainers and have correct user permissions
on [PyPI], the following steps can be used to release a new version for
`FileBackedArray`:
1. Make sure all unit tests are successful.
2. Tag the current commit on the main branch with a release tag, e.g., `v1.2.3`.
3. Push the new tag to the upstream [repository],
e.g., `git push upstream v1.2.3`
4. Clean up the `dist` and `build` folders with `tox -e clean`
(or `rm -rf dist build`)
to avoid confusion with old builds and Sphinx docs.
5. Run `tox -e build` and check that the files in `dist` have
the correct version (no `.dirty` or [git] hash) according to the [git] tag.
Also check the sizes of the distributions, if they are too big (e.g., >
500KB), unwanted clutter may have been accidentally included.
6. Run `tox -e publish -- --repository pypi` and check that everything was
uploaded to [PyPI] correctly.
[^contrib1]: Even though, these resources focus on open source projects and
communities, the general ideas behind collaborating with other developers
to collectively create software are general and can be applied to all sorts
of environments, including private companies and proprietary code bases.
[black]: https://pypi.org/project/black/
[commonmark]: https://commonmark.org/
[contribution-guide.org]: http://www.contribution-guide.org/
[creating a pr]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request
[descriptive commit message]: https://chris.beams.io/posts/git-commit
[docstrings]: https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
[first-contributions tutorial]: https://github.com/firstcontributions/first-contributions
[flake8]: https://flake8.pycqa.org/en/stable/
[git]: https://git-scm.com
[github web interface]: https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/editing-files-in-your-repository
[github's code editor]: https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/editing-files-in-your-repository
[github's fork and pull request workflow]: https://guides.github.com/activities/forking/
[guide created by freecodecamp]: https://github.com/freecodecamp/how-to-contribute-to-open-source
[miniconda]: https://docs.conda.io/en/latest/miniconda.html
[myst]: https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html
[other kinds of contributions]: https://opensource.guide/how-to-contribute
[pre-commit]: https://pre-commit.com/
[pypi]: https://pypi.org/
[pyscaffold's contributor's guide]: https://pyscaffold.org/en/stable/contributing.html
[pytest can drop you]: https://docs.pytest.org/en/stable/usage.html#dropping-to-pdb-python-debugger-at-the-start-of-a-test
[python software foundation's code of conduct]: https://www.python.org/psf/conduct/
[restructuredtext]: https://www.sphinx-doc.org/en/master/usage/restructuredtext/
[sphinx]: https://www.sphinx-doc.org/en/master/
[tox]: https://tox.readthedocs.io/en/stable/
[virtual environment]: https://realpython.com/python-virtual-environments-a-primer/
[virtualenv]: https://virtualenv.pypa.io/en/stable/
```{todo} Please review and change the following definitions:
```
[repository]: https://github.com/<USERNAME>/FileBackedArray
[issue tracker]: https://github.com/<USERNAME>/FileBackedArray/issues
| PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/resources/coverages/registration/base.py |
import re
from django.db.models import ForeignKey
from django.contrib.gis.geos import Polygon
from django.contrib.gis.gdal import SpatialReference, CoordTransform
from django.utils.six import string_types
from eoxserver.backends.access import vsi_open
from eoxserver.backends.util import resolve_storage
from eoxserver.resources.coverages import models
from eoxserver.resources.coverages.metadata.coverage_formats import (
get_reader_by_test
)
from eoxserver.resources.coverages.registration.exceptions import (
RegistrationError
)
class RegistrationReport(object):
def __init__(self, coverage, replaced, metadata_parsers,
retrieved_metadata):
self.coverage = coverage
self.replaced = replaced
self.metadata_parsers = metadata_parsers
self.retrieved_metadata = retrieved_metadata
class BaseRegistrator(object):
""" Abstract base component to be used by specialized registrators.
"""
abstract = True
metadata_keys = frozenset((
"identifier",
# "footprint", "begin_time", "end_time",
"size", "origin", "grid"
))
def register(self, data_locations, metadata_locations,
coverage_type_name=None, footprint_from_extent=False,
overrides=None, identifier_template=None,
highest_resolution=False, replace=False, cache=None,
use_subdatasets=False, simplify_footprint_tolerance=None,
statistics=None):
""" Main registration method
:param data_locations:
:param data_semantics: Either a list of strings (one for each data
location in ``data_locations``) or ``None``,
in which case the semantics will be filled
by best guess.
:param metadata_locations:
:param overrides:
:returns: A registration report
:rtype: `RegistrationReport`
"""
replaced = False
retrieved_metadata = overrides or {}
# fetch the coverage type if a type name was specified
coverage_type = None
if coverage_type_name:
try:
coverage_type = models.CoverageType.objects.get(
name=coverage_type_name
)
except models.CoverageType.DoesNotExist:
raise RegistrationError(
'No such coverage type %r' % coverage_type_name
)
# create MetaDataItems for each item that is metadata
metadata_items = [
models.MetaDataItem(
location=location[-1],
storage=resolve_storage(location[:-1])
)
for location in metadata_locations
]
# prepare ArrayDataItems for each given location
arraydata_items = []
arraydata_item_statistics = []
statistics = statistics or [None] * len(data_locations)
if len(statistics) != len(data_locations):
raise ValueError("Invalid number of statistics passed")
for location, stats in zip(data_locations, statistics):
# handle storages and/or subdataset specifiers
path = location[-1]
# parts = path.split(':')
parts = [
part.replace('\\:', ':')
for part in re.split(r'(?<!\\):', path)
]
subdataset_type = None
subdataset_locator = None
if use_subdatasets and len(parts) > 1:
path = parts[1]
subdataset_type = parts[0]
subdataset_locator = ":".join(parts[2:])
else:
path = path.replace('\\:', ':')
arraydata_items.append(
models.ArrayDataItem(
location=path,
storage=resolve_storage(location[:-1]),
subdataset_type=subdataset_type,
subdataset_locator=subdataset_locator,
)
)
band_stats = None
if stats:
band_stats = []
for i, stat in enumerate(stats, start=1):
band_stats.append(
models.BandStatistics(
band_index=i,
mean=stat.get('mean'),
minimum=stat.get('minimum'),
maximum=stat.get('maximum'),
stddev=stat.get('stddev'),
valid_percent=stat.get('valid_percent'),
histogram=stat.get('histogram'),
)
)
arraydata_item_statistics.append(band_stats)
metadata_parsers = []
# read metadata until we are satisfied or run out of metadata items
for metadata_item in metadata_items:
if not self.missing_metadata_keys(retrieved_metadata):
break
metadata_parsers.append(
self._read_metadata(
metadata_item, retrieved_metadata, cache
)
)
# check the coverage type for expected amount of fields
if coverage_type:
num_fields = coverage_type.field_types.count()
if len(arraydata_items) != 1 \
and len(arraydata_items) != num_fields:
raise RegistrationError(
'Invalid number of data files specified. Expected 1 or %d '
'got %d.'
% (num_fields, len(arraydata_items))
)
# TODO: lookup actual band counts
if len(arraydata_items) == 1:
arraydata_items[0].band_count = num_fields
else:
for i, arraydata_item in enumerate(arraydata_items):
arraydata_item.field_index = i
arraydata_item.band_count = 1
elif len(arraydata_items) != 1:
raise RegistrationError(
'Invalid number of data files specified.'
)
# TODO find actual bands
# if there is still some metadata missing, read it from the data
for arraydata_item in arraydata_items:
if not self.missing_metadata_keys(retrieved_metadata) and \
not highest_resolution:
break
metadata_parsers.append(
self._read_metadata_from_data(
arraydata_item, retrieved_metadata, cache,
highest_resolution
)
)
if self.missing_metadata_keys(retrieved_metadata):
raise RegistrationError(
"Missing metadata keys %s."
% ", ".join(self.missing_metadata_keys(retrieved_metadata))
)
identifier = retrieved_metadata["identifier"]
if identifier_template:
identifier = identifier_template.format(**retrieved_metadata)
retrieved_metadata["identifier"] = identifier
collections = []
product = None
if replace:
try:
# get a list of all collections the coverage was in.
coverage = models.Coverage.objects.get(
identifier=identifier
)
product = coverage.parent_product
collections = list(models.Collection.objects.filter(
coverages=coverage.pk
))
coverage.delete()
replaced = True
except models.Coverage.DoesNotExist:
pass
# calculate the footprint from the extent
if footprint_from_extent:
footprint = self._footprint_from_grid(
retrieved_metadata['grid'], retrieved_metadata['origin'],
retrieved_metadata['size']
)
retrieved_metadata['footprint'] = footprint
if simplify_footprint_tolerance is not None and \
retrieved_metadata.get('footprint'):
footprint = retrieved_metadata.get('footprint')
retrieved_metadata['footprint'] = footprint.simplify(
simplify_footprint_tolerance, preserve_topology=True
)
coverage = self._create_coverage(
identifier=identifier,
footprint=retrieved_metadata.get('footprint'),
begin_time=retrieved_metadata.get('begin_time'),
end_time=retrieved_metadata.get('end_time'),
size=retrieved_metadata['size'],
origin=retrieved_metadata['origin'],
grid=retrieved_metadata['grid'],
coverage_type_name=coverage_type_name,
arraydata_items=arraydata_items,
metadata_items=metadata_items,
arraydata_item_statistics=arraydata_item_statistics,
)
# when we replaced the coverage, re-insert the newly created coverage
# to the collections and/or product
for collection in collections:
models.collection_insert_eo_object(collection, coverage)
if product:
models.product_add_coverage(product, coverage)
return RegistrationReport(
coverage, replaced, metadata_parsers, retrieved_metadata
)
def _read_metadata(self, metadata_item, retrieved_metadata, cache):
""" Read all available metadata of a ``data_item`` into the
``retrieved_metadata`` :class:`dict`.
"""
with vsi_open(metadata_item) as f:
content = f.read()
reader = get_reader_by_test(content)
if reader:
values = reader.read(content)
format_ = values.pop("format", None)
if format_:
metadata_item.format = format_
for key, value in values.items():
retrieved_metadata.setdefault(key, value)
if values:
return reader, values
return None
def _read_metadata_from_data(self, data_item, retrieved_metadata, cache,
highest_resolution):
"Interface method to be overridden in subclasses"
raise NotImplementedError
def _footprint_from_grid(self, grid, origin, size):
"Calculate the footprint from the grid"
if grid['axis_types'][:2] != ['spatial', 'spatial']:
raise RegistrationError("Cannot compute footprint from given grid")
x1, y1 = origin[:2]
dx, dy = grid['axis_offsets']
sx, sy = size[:2]
x2, y2 = (x1 + sx * dx, y1 + sy * dy)
footprint = Polygon.from_bbox((
min(x1, x2), min(y1, y2),
max(x1, x2), max(y1, y2)
))
footprint.transform(
CoordTransform(
SpatialReference(grid['coordinate_reference_system']),
SpatialReference(4326)
)
)
return footprint
def _create_coverage(self, identifier, footprint, begin_time, end_time,
size, origin, grid, coverage_type_name,
arraydata_items, metadata_items,
arraydata_item_statistics):
coverage_type = None
if coverage_type_name:
try:
coverage_type = models.CoverageType.objects.get(
name=coverage_type_name
)
except models.CoverageType.DoesNotExist:
raise RegistrationError(
'Coverage type %r does not exist' % coverage_type_name
)
grid = self._get_grid(grid)
if len(size) < 4:
size = list(size) + [None] * (4 - len(size))
elif len(size) > 4:
raise RegistrationError('Highest dimension number is 4.')
if len(origin) < 4:
origin = list(origin) + [None] * (4 - len(origin))
elif len(origin) > 4:
raise RegistrationError('Highest dimension number is 4.')
(axis_1_size, axis_2_size, axis_3_size, axis_4_size) = size
(axis_1_origin, axis_2_origin, axis_3_origin, axis_4_origin) = origin
coverage = models.Coverage(
identifier=identifier, footprint=footprint,
begin_time=begin_time, end_time=end_time,
coverage_type=coverage_type,
grid=grid,
axis_1_origin=axis_1_origin,
axis_2_origin=axis_2_origin,
axis_3_origin=axis_3_origin,
axis_4_origin=axis_4_origin,
axis_1_size=axis_1_size,
axis_2_size=axis_2_size,
axis_3_size=axis_3_size,
axis_4_size=axis_4_size,
)
coverage.full_clean()
coverage.save()
# attach all data items
for metadata_item in metadata_items:
metadata_item.eo_object = coverage
metadata_item.full_clean()
metadata_item.save()
for arraydata_item, band_statistics in \
zip(arraydata_items, arraydata_item_statistics):
arraydata_item.coverage = coverage
arraydata_item.full_clean()
arraydata_item.save()
if band_statistics:
for band_stats in band_statistics:
band_stats.arraydata_item = arraydata_item
band_stats.full_clean()
band_stats.save()
return coverage
def _create_metadata(self, coverage, metadata_values):
metadata_values = dict(
(name, convert(name, value, models.CoverageMetadata))
for name, value in metadata_values.items()
if value is not None
)
models.CoverageMetadata.objects.create(
coverage=coverage, **metadata_values
)
def missing_metadata_keys(self, retrieved_metadata):
""" Return a :class:`frozenset` of metadata keys still missing.
"""
return self.metadata_keys - frozenset(retrieved_metadata.keys())
def _get_grid(self, definition):
return get_grid(definition)
def get_grid(definition):
""" Get or create a grid according to our defintion
"""
grid = None
if isinstance(definition, string_types):
try:
grid = models.Grid.objects.get(name=definition)
except models.Grid.DoesNotExist:
raise RegistrationError(
'Grid %r does not exist' % definition
)
elif isinstance(definition, models.Grid):
grid = definition
elif definition:
axis_names = definition.get('axis_names', [])
axis_types = definition['axis_types']
axis_offsets = definition['axis_offsets']
# check lengths and destructure
if len(axis_types) != len(axis_offsets):
raise RegistrationError('Dimensionality mismatch')
elif axis_names and len(axis_names) != len(axis_types):
raise RegistrationError('Dimensionality mismatch')
if len(axis_types) < 4:
axis_types = list(axis_types) + [None] * (4 - len(axis_types))
elif len(axis_types) > 4:
raise RegistrationError('Highest dimension number is 4.')
if len(axis_offsets) < 4:
axis_offsets = (
list(axis_offsets) + [None] * (4 - len(axis_offsets))
)
elif len(axis_offsets) > 4:
raise RegistrationError('Highest dimension number is 4.')
# translate axis type name to ID
axis_type_names_to_id = {
name: id_
for id_, name in models.Grid.AXIS_TYPES
}
axis_types = [
axis_type_names_to_id[axis_type] if axis_type else None
for axis_type in axis_types
]
for name, offset in zip(axis_names, axis_offsets):
if offset == 0:
raise RegistrationError(
'Invalid offset for axis %s: %s.' % (name, offset)
)
# unwrap axis types, offsets, names
(type_1, type_2, type_3, type_4) = axis_types
(offset_1, offset_2, offset_3, offset_4) = axis_offsets
# TODO: use names like 'time', or 'x'/'y', etc
axis_names = axis_names or [
'%d' % i if i < len(axis_types) else None
for i in range(len(axis_types))
]
(name_1, name_2, name_3, name_4) = (
axis_names + [None] * (4 - len(axis_names))
)
# try to find a suitable grid: with the given axis types,
# offsets and coordinate reference system
grid = models.Grid.objects.filter(
coordinate_reference_system=definition[
'coordinate_reference_system'
],
axis_1_type=type_1,
axis_2_type=type_2,
axis_3_type=type_3,
axis_4_type=type_4,
axis_1_offset=offset_1,
axis_2_offset=offset_2,
axis_3_offset=offset_3,
axis_4_offset=offset_4,
).first()
if grid is None:
# create a new grid from the given definition
grid = models.Grid.objects.create(
coordinate_reference_system=definition[
'coordinate_reference_system'
],
axis_1_name=name_1,
axis_2_name=name_2,
axis_3_name=name_3,
axis_4_name=name_4,
axis_1_type=type_1,
axis_2_type=type_2,
axis_3_type=type_3,
axis_4_type=type_4,
axis_1_offset=offset_1,
axis_2_offset=offset_2,
axis_3_offset=offset_3,
axis_4_offset=offset_4,
resolution=definition.get('resolution')
)
return grid
def is_common_value(field):
try:
if isinstance(field, ForeignKey):
field.related_model._meta.get_field('value')
return True
except Exception:
pass
return False
def convert(name, value, model_class):
field = model_class._meta.get_field(name)
if is_common_value(field):
return field.related_model.objects.get_or_create(
value=value
)[0]
elif field.choices:
return dict((v, k) for k, v in field.choices)[value]
return value | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/static/admin/simpleui-x/elementui/locale/lang/pt.js | 'use strict';
exports.__esModule = true;
exports.default = {
el: {
colorpicker: {
confirm: 'Confirmar',
clear: 'Limpar'
},
datepicker: {
now: 'Agora',
today: 'Hoje',
cancel: 'Cancelar',
clear: 'Limpar',
confirm: 'Confirmar',
selectDate: 'Selecione a data',
selectTime: 'Selecione a hora',
startDate: 'Data de inicio',
startTime: 'Hora de inicio',
endDate: 'Data de fim',
endTime: 'Hora de fim',
prevYear: 'Previous Year', // to be translated
nextYear: 'Next Year', // to be translated
prevMonth: 'Previous Month', // to be translated
nextMonth: 'Next Month', // to be translated
year: '',
month1: 'Janeiro',
month2: 'Fevereiro',
month3: 'Março',
month4: 'Abril',
month5: 'Maio',
month6: 'Junho',
month7: 'Julho',
month8: 'Agosto',
month9: 'Setembro',
month10: 'Outubro',
month11: 'Novembro',
month12: 'Dezembro',
// week: 'semana',
weeks: {
sun: 'Dom',
mon: 'Seg',
tue: 'Ter',
wed: 'Qua',
thu: 'Qui',
fri: 'Sex',
sat: 'Sab'
},
months: {
jan: 'Jan',
feb: 'Fev',
mar: 'Mar',
apr: 'Abr',
may: 'Mai',
jun: 'Jun',
jul: 'Jul',
aug: 'Ago',
sep: 'Set',
oct: 'Out',
nov: 'Nov',
dec: 'Dez'
}
},
select: {
loading: 'A carregar',
noMatch: 'Sem correspondência',
noData: 'Sem dados',
placeholder: 'Selecione'
},
cascader: {
noMatch: 'Sem correspondência',
loading: 'A carregar',
placeholder: 'Selecione',
noData: 'Sem dados'
},
pagination: {
goto: 'Ir para',
pagesize: '/pagina',
total: 'Total {total}',
pageClassifier: ''
},
messagebox: {
title: 'Mensagem',
confirm: 'Confirmar',
cancel: 'Cancelar',
error: 'Erro!'
},
upload: {
deleteTip: 'press delete to remove', // to be translated
delete: 'Apagar',
preview: 'Previsualizar',
continue: 'Continuar'
},
table: {
emptyText: 'Sem dados',
confirmFilter: 'Confirmar',
resetFilter: 'Limpar',
clearFilter: 'Todos',
sumText: 'Sum' // to be translated
},
tree: {
emptyText: 'Sem dados'
},
transfer: {
noMatch: 'Sem correspondência',
noData: 'Sem dados',
titles: ['List 1', 'List 2'], // to be translated
filterPlaceholder: 'Enter keyword', // to be translated
noCheckedFormat: '{total} items', // to be translated
hasCheckedFormat: '{checked}/{total} checked' // to be translated
},
image: {
error: 'FAILED' // to be translated
},
pageHeader: {
title: 'Back' // to be translated
}
}
}; | PypiClean |
/Flask-MDEditor-0.1.4.tar.gz/Flask-MDEditor-0.1.4/flask_mdeditor/static/mdeditor/js/lib/codemirror/addon/comment/comment.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
var noOptions = {};
var nonWS = /[^\s\u00a0]/;
var Pos = CodeMirror.Pos;
function firstNonWS(str) {
var found = str.search(nonWS);
return found == -1 ? 0 : found;
}
CodeMirror.commands.toggleComment = function(cm) {
var minLine = Infinity, ranges = cm.listSelections(), mode = null;
for (var i = ranges.length - 1; i >= 0; i--) {
var from = ranges[i].from(), to = ranges[i].to();
if (from.line >= minLine) continue;
if (to.line >= minLine) to = Pos(minLine, 0);
minLine = from.line;
if (mode == null) {
if (cm.uncomment(from, to)) mode = "un";
else { cm.lineComment(from, to); mode = "line"; }
} else if (mode == "un") {
cm.uncomment(from, to);
} else {
cm.lineComment(from, to);
}
}
};
CodeMirror.defineExtension("lineComment", function(from, to, options) {
if (!options) options = noOptions;
var self = this, mode = self.getModeAt(from);
var commentString = options.lineComment || mode.lineComment;
if (!commentString) {
if (options.blockCommentStart || mode.blockCommentStart) {
options.fullLines = true;
self.blockComment(from, to, options);
}
return;
}
var firstLine = self.getLine(from.line);
if (firstLine == null) return;
var end = Math.min(to.ch != 0 || to.line == from.line ? to.line + 1 : to.line, self.lastLine() + 1);
var pad = options.padding == null ? " " : options.padding;
var blankLines = options.commentBlankLines || from.line == to.line;
self.operation(function() {
if (options.indent) {
var baseString = firstLine.slice(0, firstNonWS(firstLine));
for (var i = from.line; i < end; ++i) {
var line = self.getLine(i), cut = baseString.length;
if (!blankLines && !nonWS.test(line)) continue;
if (line.slice(0, cut) != baseString) cut = firstNonWS(line);
self.replaceRange(baseString + commentString + pad, Pos(i, 0), Pos(i, cut));
}
} else {
for (var i = from.line; i < end; ++i) {
if (blankLines || nonWS.test(self.getLine(i)))
self.replaceRange(commentString + pad, Pos(i, 0));
}
}
});
});
CodeMirror.defineExtension("blockComment", function(from, to, options) {
if (!options) options = noOptions;
var self = this, mode = self.getModeAt(from);
var startString = options.blockCommentStart || mode.blockCommentStart;
var endString = options.blockCommentEnd || mode.blockCommentEnd;
if (!startString || !endString) {
if ((options.lineComment || mode.lineComment) && options.fullLines != false)
self.lineComment(from, to, options);
return;
}
var end = Math.min(to.line, self.lastLine());
if (end != from.line && to.ch == 0 && nonWS.test(self.getLine(end))) --end;
var pad = options.padding == null ? " " : options.padding;
if (from.line > end) return;
self.operation(function() {
if (options.fullLines != false) {
var lastLineHasText = nonWS.test(self.getLine(end));
self.replaceRange(pad + endString, Pos(end));
self.replaceRange(startString + pad, Pos(from.line, 0));
var lead = options.blockCommentLead || mode.blockCommentLead;
if (lead != null) for (var i = from.line + 1; i <= end; ++i)
if (i != end || lastLineHasText)
self.replaceRange(lead + pad, Pos(i, 0));
} else {
self.replaceRange(endString, to);
self.replaceRange(startString, from);
}
});
});
CodeMirror.defineExtension("uncomment", function(from, to, options) {
if (!options) options = noOptions;
var self = this, mode = self.getModeAt(from);
var end = Math.min(to.ch != 0 || to.line == from.line ? to.line : to.line - 1, self.lastLine()), start = Math.min(from.line, end);
// Try finding line comments
var lineString = options.lineComment || mode.lineComment, lines = [];
var pad = options.padding == null ? " " : options.padding, didSomething;
lineComment: {
if (!lineString) break lineComment;
for (var i = start; i <= end; ++i) {
var line = self.getLine(i);
var found = line.indexOf(lineString);
if (found > -1 && !/comment/.test(self.getTokenTypeAt(Pos(i, found + 1)))) found = -1;
if (found == -1 && (i != end || i == start) && nonWS.test(line)) break lineComment;
if (found > -1 && nonWS.test(line.slice(0, found))) break lineComment;
lines.push(line);
}
self.operation(function() {
for (var i = start; i <= end; ++i) {
var line = lines[i - start];
var pos = line.indexOf(lineString), endPos = pos + lineString.length;
if (pos < 0) continue;
if (line.slice(endPos, endPos + pad.length) == pad) endPos += pad.length;
didSomething = true;
self.replaceRange("", Pos(i, pos), Pos(i, endPos));
}
});
if (didSomething) return true;
}
// Try block comments
var startString = options.blockCommentStart || mode.blockCommentStart;
var endString = options.blockCommentEnd || mode.blockCommentEnd;
if (!startString || !endString) return false;
var lead = options.blockCommentLead || mode.blockCommentLead;
var startLine = self.getLine(start), endLine = end == start ? startLine : self.getLine(end);
var open = startLine.indexOf(startString), close = endLine.lastIndexOf(endString);
if (close == -1 && start != end) {
endLine = self.getLine(--end);
close = endLine.lastIndexOf(endString);
}
if (open == -1 || close == -1 ||
!/comment/.test(self.getTokenTypeAt(Pos(start, open + 1))) ||
!/comment/.test(self.getTokenTypeAt(Pos(end, close + 1))))
return false;
// Avoid killing block comments completely outside the selection.
// Positions of the last startString before the start of the selection, and the first endString after it.
var lastStart = startLine.lastIndexOf(startString, from.ch);
var firstEnd = lastStart == -1 ? -1 : startLine.slice(0, from.ch).indexOf(endString, lastStart + startString.length);
if (lastStart != -1 && firstEnd != -1 && firstEnd + endString.length != from.ch) return false;
// Positions of the first endString after the end of the selection, and the last startString before it.
firstEnd = endLine.indexOf(endString, to.ch);
var almostLastStart = endLine.slice(to.ch).lastIndexOf(startString, firstEnd - to.ch);
lastStart = (firstEnd == -1 || almostLastStart == -1) ? -1 : to.ch + almostLastStart;
if (firstEnd != -1 && lastStart != -1 && lastStart != to.ch) return false;
self.operation(function() {
self.replaceRange("", Pos(end, close - (pad && endLine.slice(close - pad.length, close) == pad ? pad.length : 0)),
Pos(end, close + endString.length));
var openEnd = open + startString.length;
if (pad && startLine.slice(openEnd, openEnd + pad.length) == pad) openEnd += pad.length;
self.replaceRange("", Pos(start, open), Pos(start, openEnd));
if (lead) for (var i = start + 1; i <= end; ++i) {
var line = self.getLine(i), found = line.indexOf(lead);
if (found == -1 || nonWS.test(line.slice(0, found))) continue;
var foundEnd = found + lead.length;
if (pad && line.slice(foundEnd, foundEnd + pad.length) == pad) foundEnd += pad.length;
self.replaceRange("", Pos(i, found), Pos(i, foundEnd));
}
});
return true;
});
}); | PypiClean |
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/coverage_detection.py | import copy
def coverage_comparison(cover, cover_sets, poss, first, strand, cover_pos):
'''Seaching the lowest and highest coverage'''
if first:
first = False
cover_sets["high"] = cover
cover_sets["low"] = cover
poss["high"] = cover_pos
poss["low"] = cover_pos
else:
if cover_sets["high"] < cover:
cover_sets["high"] = cover
poss["high"] = cover_pos
poss["low"] = cover_pos
cover_sets["low"] = cover
if ((strand == "+") and (poss["low"] >= poss["high"])) or \
((strand == "-") and (poss["low"] <= poss["high"])):
if cover_sets["low"] > cover:
cover_sets["low"] = cover
poss["low"] = cover_pos
elif ((strand == "+") and (poss["low"] < poss["high"])) or \
((strand == "-") and (poss["low"] > poss["high"])):
poss["low"] = cover_pos
cover_sets["low"] = cover
return first
def get_repmatch(replicates, cond):
'''deal with the replicate match'''
detect_all = False
for rep in replicates:
if "all" in rep:
detect_all = True
rep = int(rep.split("_")[-1])
break
if not detect_all:
for match in replicates:
if cond.split("_")[0] == match.split("_")[0]:
rep = int(match.split("_")[-1])
return rep
def define_cutoff(coverages, median, utr_type):
'''get the cutoff'''
cutoffs = {}
if coverages[utr_type] == "mean":
for track, values in median.items():
cutoffs[track] = values["mean"]
else:
for track, values in median.items():
cutoffs[track] = values["median"]
return cutoffs
def check_notex(cover, texs, cutoff, notex):
'''Check the cutoff of average coverage for TEX+ and TEX- libs'''
if notex is not None:
if len(texs) != 0:
for keys in texs.keys():
tracks = keys.split("@AND@")
if cover["track"] == tracks[0]:
if cover["avg"] > cutoff:
return True
elif cover["track"] == tracks[1]:
if cover["avg"] > notex:
return True
else:
if cover["avg"] > cutoff:
return True
else:
if cover["avg"] > cutoff:
return True
def run_tex(cover, texs, check_texs, tex_notex, type_,
detect_num, poss, target_datas):
'''Check the position of different libs'''
if (cover["type"] == "tex") or (cover["type"] == "notex"):
for key in texs.keys():
if cover["track"] in key:
texs[key] += 1
check_texs[key].append(cover)
if texs[key] >= tex_notex:
if type_ == "sRNA_utr_derived":
if detect_num == 0:
poss["start"] = cover["final_start"]
poss["end"] = cover["final_end"]
else:
exchange_start_end(poss, cover)
detect_num += 1
if cover not in target_datas:
target_datas.append(cover)
if tex_notex != 1:
if check_texs[key][0] not in target_datas:
target_datas.append(check_texs[key][0])
if type_ == "sRNA_utr_derived":
exchange_start_end(poss, check_texs[key][0])
elif cover["type"] == "frag":
if type_ == "sRNA_utr_derived":
if detect_num == 0:
poss["start"] = cover["final_start"]
poss["end"] = cover["final_end"]
else:
exchange_start_end(poss, cover)
detect_num += 1
target_datas.append(cover)
return detect_num
def check_tex(template_texs, covers, target_datas, notex, type_, poss, median,
coverages, utr_type, cutoff_coverage, tex_notex):
'''Check the candidates for TEX+/- libs
(should be detected in one or both of libs)'''
detect_num = 0
check_texs = {}
texs = copy.deepcopy(template_texs)
for key, num in texs.items():
check_texs[key] = []
for cover in covers:
run_check_tex = False
if type_ == "sRNA_utr_derived":
cutoffs = define_cutoff(coverages, median, utr_type)
if cover["track"] in cutoffs.keys():
if cover["avg"] > cutoffs[cover["track"]]:
run_check_tex = True
else:
run_check_tex = True
elif type_ == "sORF":
if cover["avg"] > coverages[cover["track"]]:
run_check_tex = True
elif (type_ == "terminator"):
run_check_tex = True
elif (type_ == "normal"):
run_check_tex = check_notex(cover, texs, cutoff_coverage,
notex)
else:
if cover["avg"] > cutoff_coverage:
run_check_tex = True
if run_check_tex:
detect_num = run_tex(cover, texs, check_texs, tex_notex,
type_, detect_num, poss, target_datas)
return detect_num
def exchange_start_end(poss, cover):
'''modify the start and end point. get the long one'''
if poss["start"] > cover["final_start"]:
poss["start"] = cover["final_start"]
if poss["end"] < cover["final_end"]:
poss["end"] = cover["final_end"]
def replicate_comparison(args_srna, srna_covers, strand, type_, median,
coverages, utr_type, notex, cutoff_coverage, texs):
'''Check the number of replicates which fit the cutoff in order to remove
the candidates which only can be detected in few replicates.'''
srna_datas = {"best": 0, "high": 0, "low": 0, "start": -1,
"end": -1, "track": "", "detail": [], "conds": {}}
tmp_poss = {"start": -1, "end": -1, "pos": -1,
"all_start": [], "all_end": []}
detect = False
for cond, covers in srna_covers.items():
detect_num = check_tex(
texs, covers, srna_datas["detail"], notex, type_, tmp_poss,
median, coverages, utr_type, cutoff_coverage,
args_srna.tex_notex)
if ("texnotex" in cond):
tex_rep = get_repmatch(args_srna.replicates["tex"], cond)
if detect_num >= tex_rep:
detect = True
elif ("frag" in cond):
frag_rep = get_repmatch(args_srna.replicates["frag"], cond)
if detect_num >= frag_rep:
detect = True
if detect:
detect = False
if type_ == "sRNA_utr_derived":
tmp_poss["all_start"].append(tmp_poss["start"])
tmp_poss["all_end"].append(tmp_poss["end"])
else:
if strand == "+":
sort_datas = sorted(srna_datas["detail"],
key=lambda k: (k["pos"]))
else:
sort_datas = sorted(srna_datas["detail"],
key=lambda k: (k["pos"]), reverse=True)
srna_datas["pos"] = sort_datas[-1]["pos"]
sort_datas = sorted(srna_datas["detail"], key=lambda k: (k["avg"]))
avg = sort_datas[-1]["avg"]
srna_datas["conds"][cond] = str(detect_num)
if (avg > srna_datas["best"]):
srna_datas["high"] = sort_datas[-1]["high"]
srna_datas["low"] = sort_datas[-1]["low"]
srna_datas["best"] = avg
srna_datas["track"] = sort_datas[-1]["track"]
if type_ == "sRNA_utr_derived":
if len(tmp_poss["all_start"]) != 0:
srna_datas["start"] = min(tmp_poss["all_start"])
srna_datas["end"] = max(tmp_poss["all_end"])
else:
srna_datas["start"] = -1
srna_datas["end"] = -1
return srna_datas | PypiClean |
/Heterogeneous_Highway_Env-0.0.3-py3-none-any.whl/Heterogeneous_Highway_Env/envs/parking_env.py | from abc import abstractmethod
from gym import Env
from gym.envs.registration import register
import numpy as np
from highway_env.envs.common.abstract import AbstractEnv
from highway_env.envs.common.observation import MultiAgentObservation, observation_factory
from highway_env.road.lane import StraightLane, LineType
from highway_env.road.road import Road, RoadNetwork
from highway_env.vehicle.objects import Landmark
class GoalEnv(Env):
"""
Interface for A goal-based environment.
This interface is needed by agents such as Stable Baseline3's Hindsight Experience Replay (HER) agent.
It was originally part of https://github.com/openai/gym, but was later moved
to https://github.com/Farama-Foundation/gym-robotics. We cannot add gym-robotics to this project's dependencies,
since it does not have an official PyPi package, PyPi does not allow direct dependencies to git repositories.
So instead, we just reproduce the interface here.
A goal-based environment. It functions just as any regular OpenAI Gym environment but it
imposes a required structure on the observation_space. More concretely, the observation
space is required to contain at least three elements, namely `observation`, `desired_goal`, and
`achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.
`achieved_goal` is the goal that it currently achieved instead. `observation` contains the
actual observations of the environment as per usual.
"""
@abstractmethod
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: dict) -> float:
"""Compute the step reward. This externalizes the reward function and makes
it dependent on a desired goal and the one that was achieved. If you wish to include
additional rewards that are independent of the goal, you can include the necessary values
to derive it in 'info' and compute it accordingly.
Args:
achieved_goal (object): the goal that was achieved during execution
desired_goal (object): the desired goal that we asked the agent to attempt to achieve
info (dict): an info dictionary with additional information
Returns:
float: The reward that corresponds to the provided achieved goal w.r.t. to the desired
goal. Note that the following should always hold true:
ob, reward, done, info = env.step()
assert reward == env.compute_reward(ob['achieved_goal'], ob['desired_goal'], info)
"""
raise NotImplementedError
class ParkingEnv(AbstractEnv, GoalEnv):
"""
A continuous control environment.
It implements a reach-type task, where the agent observes their position and speed and must
control their acceleration and steering so as to reach a given goal.
Credits to Munir Jojo-Verge for the idea and initial implementation.
"""
# For parking env with GrayscaleObservation, the env need
# this PARKING_OBS to calculate the reward and the info.
# Bug fixed by Mcfly(https://github.com/McflyWZX)
PARKING_OBS = {"observation": {
"type": "KinematicsGoal",
"features": ['x', 'y', 'vx', 'vy', 'cos_h', 'sin_h'],
"scales": [100, 100, 5, 5, 1, 1],
"normalize": False
}}
def __init__(self, config: dict = None) -> None:
super().__init__(config)
self.observation_type_parking = None
@classmethod
def default_config(cls) -> dict:
config = super().default_config()
config.update({
"observation": {
"type": "KinematicsGoal",
"features": ['x', 'y', 'vx', 'vy', 'cos_h', 'sin_h'],
"scales": [100, 100, 5, 5, 1, 1],
"normalize": False
},
"action": {
"type": "ContinuousAction"
},
"reward_weights": [1, 0.3, 0, 0, 0.02, 0.02],
"success_goal_reward": 0.12,
"collision_reward": -5,
"steering_range": np.deg2rad(45),
"simulation_frequency": 15,
"policy_frequency": 5,
"duration": 100,
"screen_width": 600,
"screen_height": 300,
"centering_position": [0.5, 0.5],
"scaling": 7,
"controlled_vehicles": 1
})
return config
def define_spaces(self) -> None:
"""
Set the types and spaces of observation and action from config.
"""
super().define_spaces()
self.observation_type_parking = observation_factory(self, self.PARKING_OBS["observation"])
def _info(self, obs, action) -> dict:
info = super(ParkingEnv, self)._info(obs, action)
if isinstance(self.observation_type, MultiAgentObservation):
success = tuple(self._is_success(agent_obs['achieved_goal'], agent_obs['desired_goal']) for agent_obs in obs)
else:
obs = self.observation_type_parking.observe()
success = self._is_success(obs['achieved_goal'], obs['desired_goal'])
info.update({"is_success": success})
return info
def _reset(self):
self._create_road()
self._create_vehicles()
def _create_road(self, spots: int = 15) -> None:
"""
Create a road composed of straight adjacent lanes.
:param spots: number of spots in the parking
"""
net = RoadNetwork()
width = 4.0
lt = (LineType.CONTINUOUS, LineType.CONTINUOUS)
x_offset = 0
y_offset = 10
length = 8
for k in range(spots):
x = (k - spots // 2) * (width + x_offset) - width / 2
net.add_lane("a", "b", StraightLane([x, y_offset], [x, y_offset+length], width=width, line_types=lt))
net.add_lane("b", "c", StraightLane([x, -y_offset], [x, -y_offset-length], width=width, line_types=lt))
self.road = Road(network=net,
np_random=self.np_random,
record_history=self.config["show_trajectories"])
def _create_vehicles(self) -> None:
"""Create some new random vehicles of a given type, and add them on the road."""
self.controlled_vehicles = []
for i in range(self.config["controlled_vehicles"]):
vehicle = self.action_type.vehicle_class(self.road, [i*20, 0], 2*np.pi*self.np_random.rand(), 0)
self.road.vehicles.append(vehicle)
self.controlled_vehicles.append(vehicle)
lane = self.np_random.choice(self.road.network.lanes_list())
self.goal = Landmark(self.road, lane.position(lane.length/2, 0), heading=lane.heading)
self.road.objects.append(self.goal)
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: dict, p: float = 0.5) -> float:
"""
Proximity to the goal is rewarded
We use a weighted p-norm
:param achieved_goal: the goal that was achieved
:param desired_goal: the goal that was desired
:param dict info: any supplementary information
:param p: the Lp^p norm used in the reward. Use p<1 to have high kurtosis for rewards in [0, 1]
:return: the corresponding reward
"""
return -np.power(np.dot(np.abs(achieved_goal - desired_goal), np.array(self.config["reward_weights"])), p)
def _reward(self, action: np.ndarray) -> float:
obs = self.observation_type_parking.observe()
obs = obs if isinstance(obs, tuple) else (obs,)
return sum(self.compute_reward(agent_obs['achieved_goal'], agent_obs['desired_goal'], {})
for agent_obs in obs)
def _is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> bool:
return self.compute_reward(achieved_goal, desired_goal, {}) > -self.config["success_goal_reward"]
def _is_terminal(self) -> bool:
"""The episode is over if the ego vehicle crashed or the goal is reached."""
time = self.time >= self.config["duration"]
crashed = any(vehicle.crashed for vehicle in self.controlled_vehicles)
obs = self.observation_type_parking.observe()
obs = obs if isinstance(obs, tuple) else (obs,)
success = all(self._is_success(agent_obs['achieved_goal'], agent_obs['desired_goal']) for agent_obs in obs)
return bool(time or crashed or success)
class ParkingEnvActionRepeat(ParkingEnv):
def __init__(self):
super().__init__({"policy_frequency": 1, "duration": 20})
register(
id='parking-v0',
entry_point='highway_env.envs:ParkingEnv',
)
register(
id='parking-ActionRepeat-v0',
entry_point='highway_env.envs:ParkingEnvActionRepeat'
) | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/core/handlers/exception.py | import logging
import sys
from functools import wraps
from django.conf import settings
from django.core import signals
from django.core.exceptions import (
PermissionDenied, RequestDataTooBig, SuspiciousOperation,
TooManyFieldsSent,
)
from django.http import Http404
from django.http.multipartparser import MultiPartParserError
from django.urls import get_resolver, get_urlconf
from django.utils.log import log_response
from django.views import debug
def convert_exception_to_response(get_response):
"""
Wrap the given get_response callable in exception-to-response conversion.
All exceptions will be converted. All known 4xx exceptions (Http404,
PermissionDenied, MultiPartParserError, SuspiciousOperation) will be
converted to the appropriate response, and all other exceptions will be
converted to 500 responses.
This decorator is automatically applied to all middleware to ensure that
no middleware leaks an exception and that the next middleware in the stack
can rely on getting a response instead of an exception.
"""
@wraps(get_response)
def inner(request):
try:
response = get_response(request)
except Exception as exc:
response = response_for_exception(request, exc)
return response
return inner
def response_for_exception(request, exc):
if isinstance(exc, Http404):
if settings.DEBUG:
response = debug.technical_404_response(request, exc)
else:
response = get_exception_response(request, get_resolver(get_urlconf()), 404, exc)
elif isinstance(exc, PermissionDenied):
response = get_exception_response(request, get_resolver(get_urlconf()), 403, exc)
log_response(
'Forbidden (Permission denied): %s', request.path,
response=response,
request=request,
exc_info=sys.exc_info(),
)
elif isinstance(exc, MultiPartParserError):
response = get_exception_response(request, get_resolver(get_urlconf()), 400, exc)
log_response(
'Bad request (Unable to parse request body): %s', request.path,
response=response,
request=request,
exc_info=sys.exc_info(),
)
elif isinstance(exc, SuspiciousOperation):
if isinstance(exc, (RequestDataTooBig, TooManyFieldsSent)):
# POST data can't be accessed again, otherwise the original
# exception would be raised.
request._mark_post_parse_error()
# The request logger receives events for any problematic request
# The security logger receives events for all SuspiciousOperations
security_logger = logging.getLogger('django.security.%s' % exc.__class__.__name__)
security_logger.error(
str(exc),
extra={'status_code': 400, 'request': request},
)
if settings.DEBUG:
response = debug.technical_500_response(request, *sys.exc_info(), status_code=400)
else:
response = get_exception_response(request, get_resolver(get_urlconf()), 400, exc)
elif isinstance(exc, SystemExit):
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
else:
signals.got_request_exception.send(sender=None, request=request)
response = handle_uncaught_exception(request, get_resolver(get_urlconf()), sys.exc_info())
log_response(
'%s: %s', response.reason_phrase, request.path,
response=response,
request=request,
exc_info=sys.exc_info(),
)
# Force a TemplateResponse to be rendered.
if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)):
response = response.render()
return response
def get_exception_response(request, resolver, status_code, exception):
try:
callback, param_dict = resolver.resolve_error_handler(status_code)
response = callback(request, **{**param_dict, 'exception': exception})
except Exception:
signals.got_request_exception.send(sender=None, request=request)
response = handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def handle_uncaught_exception(request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses).
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve_error_handler(500)
return callback(request, **param_dict) | PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/mathics/core/streams.py | import os
import os.path as osp
import sys
import tempfile
from io import open as io_open
from typing import Optional, Tuple
import requests
from mathics.settings import ROOT_DIR
HOME_DIR = osp.expanduser("~")
PATH_VAR = [".", HOME_DIR, osp.join(ROOT_DIR, "data"), osp.join(ROOT_DIR, "packages")]
def create_temporary_file(prefix="Mathics3-", suffix=None, delete=True):
if suffix == "":
suffix = None
fp = tempfile.NamedTemporaryFile(delete=delete, suffix=suffix)
result = fp.name
fp.close()
return result
def urlsave_tmp(url, location=None, **kwargs):
suffix = ""
strip_url = url.split("/")
if len(strip_url) > 3:
strip_url = strip_url[-1]
if strip_url != "":
suffix = strip_url[len(strip_url.split(".")[0]) :]
try:
r = requests.get(url, allow_redirects=True)
if location is None:
location = create_temporary_file(prefix="Mathics3-url-", suffix=suffix)
with open(location, "wb") as fp:
fp.write(r.content)
result = fp.name
return result
except Exception:
return None
return None
def path_search(filename: str) -> Tuple[str, bool]:
"""
Search for a Mathics `filename` possibly adding extensions ".mx", or ".m"
or as a file under directory PATH_VAR or as an Internet address.
Return the resolved file name and True if this is a file in the
a temporary file created, which happens for Internet addresses,
or False if the file is a file in the filesystem.
"""
# For names of the form "name`", search for name.mx and name.m
is_temporary_file = False
if filename[-1] == "`":
filename = filename[:-1].replace("`", osp.sep)
for ext in [".mx", ".m"]:
result, is_temporary_file = path_search(filename + ext)
if result is not None:
filename = None
break
if filename is not None:
result = None
# If filename is an Internet address, download the file
# and store it in a tempory file
lenfn = len(filename)
if (
(lenfn > 7 and filename[:7] == "http://")
or (lenfn > 8 and filename[:8] == "https://")
or (lenfn > 6 and filename[:6] == "ftp://")
):
result = urlsave_tmp(filename)
is_temporary_file = True
else:
for p in PATH_VAR + [""]:
path = osp.join(p, filename)
if osp.exists(path):
result = path
break
# If `result` resolves to a dir, search within for Kernel/init.m and init.m
if result is not None and osp.isdir(result):
for ext in [osp.join("Kernel", "init.m"), "init.m"]:
tmp = osp.join(result, ext)
if osp.isfile(tmp):
return tmp, is_temporary_file
return result, is_temporary_file
class Stream:
"""
Opens a stream
This can be used in a context_manager like this:
with Stream(pypath, "r") as f:
...
However see StreamManager and MathicsOpen which wraps this.
"""
def __init__(
self,
name: str,
mode="r",
encoding=None,
io=None,
channel_num=None,
is_temporary_file: bool = False,
):
if channel_num is None:
channel_num = stream_manager.next
if mode is None:
mode = "r"
self.name = name
self.mode = mode
self.encoding = encoding
self.io = io
self.n = channel_num
self.is_temporary_file = is_temporary_file
if mode not in ["r", "w", "a", "rb", "wb", "ab"]:
raise ValueError("Can't handle mode {0}".format(mode))
def __enter__(self):
# find path
path, is_temporary_file = path_search(self.name)
if path is None and self.mode in ["w", "a", "wb", "ab"]:
path = self.name
if path is None:
raise IOError
# determine encoding
if "b" not in self.mode:
encoding = self.encoding
else:
encoding = None
# open the stream
fp = io_open(path, self.mode, encoding=encoding)
stream_manager.add(
name=path,
mode=self.mode,
encoding=encoding,
io=fp,
is_temporary_file=is_temporary_file,
)
return fp
def __exit__(self, type, value, traceback):
if self.io is not None:
self.io.close()
# Leave around self.io so we can call closed() to query its status.
stream_manager.delete(self.n)
class StreamsManager:
__instance = None
STREAMS = {}
@staticmethod
def get_instance():
"""Static access method."""
if StreamsManager.__instance is None:
StreamsManager()
return StreamsManager.__instance
def __init__(self):
"""Virtually private constructor."""
if StreamsManager.__instance is not None:
raise Exception("this class is a singleton!")
else:
StreamsManager.__instance = self
def add(
self,
name: str,
mode: Optional[str] = None,
encoding=None,
io=None,
num: Optional[int] = None,
is_temporary_file: bool = False,
) -> Optional["Stream"]:
if num is None:
num = self.next
# In theory in this branch we won't find num.
# sanity check num
found = self.lookup_stream(num)
if found and found is not None:
raise Exception(f"Stream {num} already open")
stream = Stream(name, mode, encoding, io, num, is_temporary_file)
self.STREAMS[num] = stream
return stream
def delete(self, n: int) -> bool:
stream = self.lookup_stream(n)
if stream is None:
return False
self.delete_stream(stream)
return True
def delete_stream(self, stream: Stream):
"""
Delete `stream` from the list of streams we
keep track of.
"""
is_temporary_file = stream.is_temporary_file
if is_temporary_file:
os.unlink(stream.name)
del self.STREAMS[stream.n]
def lookup_stream(self, n: int) -> Optional[Stream]:
"""
Find and return a stream given is stream number `n`.
None is returned if no stream found.
"""
return self.STREAMS.get(n, None)
@property
def next(self):
numbers = [stream.n for stream in self.STREAMS.values()] + [2]
return max(numbers) + 1
stream_manager = StreamsManager()
stream_manager.add("stdin", mode="r", num=0, io=sys.stdin)
stream_manager.add("stdout", mode="w", num=1, io=sys.stdout)
stream_manager.add("stderr", mode="w", num=2, io=sys.stderr) | PypiClean |
/GenIce-1.0.11.tar.gz/GenIce-1.0.11/genice/lattices/c1te.py | from logging import getLogger
import numpy as np
def usage():
logger = getLogger()
logger.info(__doc__)
desc={"ref": {"C1": "Page 12 of the Supplemenrary Material of P. Teeratchanan and A. Hermann, Computational phase diagrams of noble gas hydrates under pressure, J. Chem. Phys. 143, 154507 (2015); https://doi.org/10.1063/1.4933371"},
"usage": usage(),
"brief": "Hydrogen-ordered hydrogen hydrate C1 by Teeratchanan. (Positions of guests are supplied.)"
}
def pick_atoms(atoms, names, repeat=(1,1,1)):
nrep = np.array(repeat)
for atomname, fracpos in atoms:
if atomname in names:
for x in range(repeat[0]):
for y in range(repeat[1]):
for z in range(repeat[2]):
yield atomname, (fracpos+np.array([x,y,z]))/nrep
def argparser(arg):
global pairs, fixed, waters, coord, density, cell, cagepos, cagetype
logger = getLogger()
# Ref. C1
atoms="""
O1 0.2228 0.1966 0.0454
O2 0.5236 0.8974 0.1466
H1 0.8184 0.5344 0.3198
H2 0.2244 0.2188 0.2045
H3 0.4384 0.8815 0.1496
H4 0.5729 -0.0253 0.2297
Ne1 0.0000 0.0000 0.7361
"""
# Ref. C1
# space group: R-3 No. 148
# in a rhombus cell
symops="""
x, y, z
-y, x-y, z
-x+y, -x, z
-x, -y, -z
y, -x+y, -z
x-y, x, -z
x+2/3, y+1/3, z+1/3
-y+2/3, x-y+1/3, z+1/3
-x+y+2/3, -x+1/3, z+1/3
-x+2/3, -y+1/3, -z+1/3
y+2/3, -x+y+1/3, -z+1/3
x-y+2/3, x+1/3, -z+1/3
x+1/3, y+2/3, z+2/3
-y+1/3, x-y+2/3, z+2/3
-x+y+1/3, -x+2/3, z+2/3
-x+1/3, -y+2/3, -z+2/3
y+1/3, -x+y+2/3, -z+2/3
x-y+1/3, x+2/3, -z+2/3
""".replace(',', ' ')
# Ref. C1
a=12.673 / 10.0 #nm
c= 6.017 / 10.0 #nm
C= 120
from genice.cell import cellvectors
cell = cellvectors(a,a,c,C=C)
# helper routines to make from CIF-like data
from genice import CIF
atomd = CIF.atomdic(atoms)
atoms = CIF.fullatoms(atomd, CIF.symmetry_operators(symops))
cagetype = []
cagepos = []
for name, pos in pick_atoms(atoms, ("Ne1",)):
cagetype.append(name)
cagepos.append(pos)
sops = CIF.symmetry_operators(symops)
waters, fixed = CIF.waters_and_pairs(cell, atomd, sops)
# set pairs in this way for hydrogen-ordered ices.
pairs = fixed
density = 18*len(waters)/6.022e23 / (np.linalg.det(cell)*1e-21)
coord = "relative" | PypiClean |
/ColabGitlabSetup-0.0.5.tar.gz/ColabGitlabSetup-0.0.5/colab_gitlab_setup/colab_gitlab_setup.py | import os
import subprocess
import re
from google.colab import drive, files
class ColabGitlabSetup:
def __init__(self,auto=False, mount_folder="/content/drive", git_folder="gitlabs",ssh_host="gitlab.com",
ssh_tar_file="ssh.tar.gz",ssh_tar_folder="ssh-colab",ssh_install_folder="/root/.ssh",
repo_account="dtime-ai",repo_group="admin",repo_name="google-colab-integration"):
self.mount_folder = mount_folder
self.git_folder = f"{self.mount_folder}/MyDrive/{git_folder}"
self.repo_folder = f"{self.git_folder}/{repo_name}"
self.ssh_host = ssh_host
self.ssh_tar_file = ssh_tar_file
self.ssh_tar_folder = ssh_tar_folder
self.ssh_install_folder = ssh_install_folder
self.repo_account = repo_account
self.repo_group = repo_group
self.repo_name = repo_name
if auto:
self.install_ssh_keys()
self.load_private_key()
self.mount_gdrive()
def install_ssh_keys(self):
if os.path.isdir(self.ssh_install_folder):
os.system(f"rm -rf {self.ssh_install_folder}")
os.system(f"mkdir {self.ssh_install_folder}")
os.chdir(self.ssh_install_folder)
print(f"Please select the SSH key tarball file to upload...")
from google.colab import files # cough, cough!
uploaded = files.upload()
print("Installing ssh keys...")
os.system(f"tar xvzf {self.ssh_tar_file}")
os.system(f"cp {self.ssh_tar_folder}/* . && rm -rf {self.ssh_tar_folder} && rm -rf {self.ssh_tar_file}")
os.system("chmod 700 .")
print("...Installed")
def load_private_key(self):
print("Identifying the name of the private ssh key file...")
os.chdir(self.ssh_install_folder)
files = [i for i in os.listdir(".") if i.endswith('.pub')]
self.private_key_file = f"{self.ssh_install_folder}/{os.path.splitext(files[0])[0]}"
print(f"...Identified as {self.private_key_file}")
print(f"Loading {self.private_key_file} into an ssh-agent...")
self.ssh_agent_setup()
self.ssh_agent_addkey( self.private_key_file )
self.ssh_agent_list()
#self.ssh_agent_kill()
print("...Loaded")
def mount_gdrive(self):
print(f"Mounting google drive to {self.mount_folder}...")
if not os.path.isdir(self.mount_folder):
drive.mount(self.mount_folder)
print("...Mounted")
print(f"Creating {self.git_folder} to clone repositories into...")
if not os.path.isdir(self.git_folder):
os.system(f"mkdir {self.git_folder}")
print("... Git folder created")
def clone(self):
os.chdir(self.git_folder)
if not os.path.isdir(self.repo_folder):
os.system(f"git clone git@gitlab.com:{self.repo_account}/{self.repo_group}/{self.repo_name}.git")
def ssh_agent_setup(self):
if os.environ.get( 'SSH_AUTH_SOCK' ) is None:
process = subprocess.run( [ 'ssh-agent', '-s' ], stdout = subprocess.PIPE, universal_newlines = True )
OUTPUT_PATTERN = re.compile( 'SSH_AUTH_SOCK=(?P<socket>[^;]+).*SSH_AGENT_PID=(?P<pid>\d+)', re.MULTILINE | re.DOTALL )
match = OUTPUT_PATTERN.search( process.stdout )
agentData = match.groupdict()
os.environ[ 'SSH_AUTH_SOCK' ] = agentData[ 'socket' ]
os.environ[ 'SSH_AGENT_PID' ] = agentData[ 'pid' ]
def ssh_agent_addkey(self,keyFile):
process = subprocess.run( [ 'ssh-add', keyFile ] )
print(process)
def ssh_agent_list(self):
process = subprocess.run( [ 'ssh-add', "-l" ] )
print(process)
def ssh_agent_kill(self):
process = subprocess.run( [ 'ssh-agent', '-k' ] )
print(process)
del os.environ[ 'SSH_AUTH_SOCK' ]
del os.environ[ 'SSH_AGENT_PID' ]
def git_config_globals(self,user_name="",user_email=""):
os.system(f"git config --global user.name '{user_name}'")
os.system(f"git config --global user.email '{user_email}'") | PypiClean |
/Cactus-3.3.3.tar.gz/Cactus-3.3.3/cactus/skeleton/locale/README.md | Internationalization
====================
Using internationalization with Cactus
--------------------------------------
To enable internationalization for your project:
1. Add a `locale` key to (one of your) configuration file(s)
2. Mark strings for translation in your site (using `{% trans %}`)
3. Run `cactus messages:make`
4. Edit the .po file that was created with translations.
Multiple languages with Cactus
------------------------------
To make the best of translations, you'll need multiple configuration files: one per language you'd like to support.
This lets you transparently deploy multiple versions of your website to multiple buckets (one per language).
| PypiClean |
/Effulge-0.0.1.tar.gz/Effulge-0.0.1/src/effulge/scatter_tree_nodes.py | from enum import Enum
class NodeType(Enum):
"""
Enumeration Class for Type of Node in a Scatter Tree
"""
PRIMITIVE = 1
LIST_OR_TUPLE = 2
DICTIONARY = 3
KEY_VALUE = 4
class Marker(Enum):
"""
Enumeration Class to Flag a Scatter Tree Node
- GREEN, when a node is not matched yet
- RED, when a node has been matched
"""
RED = False
GREEN = True
class Node:
"""
Basic Node class.
Actual Nodes of a Scatter tree will inherit this class.
Structure:
+------------+
| type |
|------------|
| marker |
+------------+
"""
def __init__(self, node_type):
if node_type in NodeType:
self.type = node_type
else:
raise TypeError("Invalid value for 'node_type'")
self.marker = Marker.GREEN
def get_type(self):
"""
Getter method for attribute 'type'
"""
return self.type
def get_marker(self):
"""
Getter method for attribute 'marker'
"""
return self.marker
def set_marker(self):
"""
Setter method for attribute 'marker'
"""
if self.marker is Marker.GREEN:
self.marker = Marker.RED
else:
raise ValueError("Flag is already marked. Cannot mark again.")
class PrimitiveNode(Node):
"""
Class to denote a Node representing Primitive Datatype.
Structure:
+------------+
| type |
|------------|
| marker |
|------------|
| value |
+------------+
"""
def __init__(self, value):
super().__init__(NodeType.PRIMITIVE)
self.value = value
def get_value(self):
"""
Getter method for attribute 'value'
"""
return self.value
class CollectionNode(Node):
"""
Class to denote a Node representing a Collection Datatype.
Structure:
+------------+
| type |
|------------|
| marker |
|------------|
| size |
|------------|
| members |
+------------+
"""
def __init__(self, collectionType, size):
if collectionType in (NodeType.LIST_OR_TUPLE, NodeType.DICTIONARY):
super().__init__(collectionType)
else:
raise TypeError("Invalid value for 'collectionType'")
self.size = size
self.members = []
def get_size(self):
"""
Getter method for attribute 'size'
"""
return self.size
def get_members(self):
"""
Getter method for attribute 'members'
"""
if len(self.members) != self.size:
raise ValueError("Attribute 'members' is incomplete")
return self.members
def add_member(self, node):
"""
Setter method for attribute 'members'
"""
if len(self.members) == self.size:
raise OverflowError("cannot add members more than the initialized size")
self.members.append(node)
return True
class ListNode(CollectionNode):
"""
Class to denote a Node representing a List or Tuple.
Structure:
+------------+
| type |
|------------|
| marker |
|------------|
| size |
|------------|
| members |
+------------+
"""
def __init__(self, size):
super().__init__(NodeType.LIST_OR_TUPLE, size)
def filter_collection_members(self, member_type, member_length):
"""
Method to filter selective 'members'
which are collections with known length
"""
if member_type not in (NodeType.LIST_OR_TUPLE, NodeType.DICTIONARY):
raise TypeError("Invalid value for 'member_type'")
match = [ m for m in self.get_members()
if m.get_type() is member_type
and m.get_size() == member_length
and m.get_marker()
]
return match
def filter_primitive_members(self, member_value):
"""
Method to filter selective 'members'
which are primitive with known value
"""
match = [ m for m in self.get_members()
if m.get_type() is NodeType.PRIMITIVE
and m.get_value() == member_value
and m.get_marker()
]
return match
class DictNode(CollectionNode):
"""
Class to denote a Node representing a Dictionary.
Structure:
+------------+
| type |
|------------|
| marker |
|------------|
| size |
|------------|
| members |
+------------+
"""
def __init__(self, size):
super().__init__(NodeType.DICTIONARY, size)
def filter_similar_members(self, sample_member):
"""
Method to filter selective 'members'
which are similar to a given key value node
"""
#
key_type = sample_member.get_key_reference().get_type()
if key_type is NodeType.PRIMITIVE:
is_key_collection = False
key_filter = sample_member.get_key_reference().get_value()
else:
is_key_collection = True
key_filter = sample_member.get_key_reference().get_size()
#
value_type = sample_member.get_value_reference().get_type()
if value_type is NodeType.PRIMITIVE:
is_value_collection = False
value_filter = sample_member.get_value_reference().get_value()
else:
is_value_collection = True
value_filter = sample_member.get_value_reference().get_size()
#
# filter matching for K node type and V node type
both_types_match = [ m for m in self.get_members()
if m.get_key_reference().get_type() == key_type
and m.get_value_reference().get_type() == value_type
]
# filter matching for K node size or K node value
only_key_match = [ m for m in both_types_match
if ( is_key_collection
and m.get_key_reference().get_size() == key_filter
)
or ( (not is_key_collection)
and m.get_key_reference().get_value() == key_filter
)
]
# filter matching for V node size or V node value
match = [ m for m in only_key_match
if ( is_value_collection
and m.get_value_reference().get_size() == value_filter
)
or ( (not is_value_collection)
and m.get_value_reference().get_value() == value_filter
)
]
return match
class KeyValNode(Node):
"""
Class to denote a Node representing a Key Value pair.
Structure:
+-------------------+
| type |
|-------------------|
| marker |
|-------------------|
| key_reference |
|-------------------|
| value_reference |
+-------------------+
"""
def __init__(self):
super().__init__(NodeType.KEY_VALUE)
self.key_reference = None
self.value_reference = None
def get_key_reference(self):
"""
Getter method for attribute 'key_reference'
"""
return self.key_reference
def get_value_reference(self):
"""
Getter method for attribute 'value_reference'
"""
return self.value_reference
def set_key_reference(self, node):
"""
Setter method for attribute 'key_reference'
"""
self.key_reference = node
return True
def set_value_reference(self, node):
"""
Setter method for attribute 'value_reference'
"""
self.value_reference = node
return True | PypiClean |
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/account_type_property.py | import re # noqa: F401
import sys # noqa: F401
from firefly_iii_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from firefly_iii_client.exceptions import ApiAttributeError
class AccountTypeProperty(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'DEFAULT_ACCOUNT': "Default account",
'CASH_ACCOUNT': "Cash account",
'ASSET_ACCOUNT': "Asset account",
'EXPENSE_ACCOUNT': "Expense account",
'REVENUE_ACCOUNT': "Revenue account",
'INITIAL_BALANCE_ACCOUNT': "Initial balance account",
'BENEFICIARY_ACCOUNT': "Beneficiary account",
'IMPORT_ACCOUNT': "Import account",
'RECONCILIATION_ACCOUNT': "Reconciliation account",
'LOAN': "Loan",
'DEBT': "Debt",
'MORTGAGE': "Mortgage",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""AccountTypeProperty - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str):, must be one of ["Default account", "Cash account", "Asset account", "Expense account", "Revenue account", "Initial balance account", "Beneficiary account", "Import account", "Reconciliation account", "Loan", "Debt", "Mortgage", ] # noqa: E501
Keyword Args:
value (str):, must be one of ["Default account", "Cash account", "Asset account", "Expense account", "Revenue account", "Initial balance account", "Beneficiary account", "Import account", "Reconciliation account", "Loan", "Debt", "Mortgage", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""AccountTypeProperty - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str):, must be one of ["Default account", "Cash account", "Asset account", "Expense account", "Revenue account", "Initial balance account", "Beneficiary account", "Import account", "Reconciliation account", "Loan", "Debt", "Mortgage", ] # noqa: E501
Keyword Args:
value (str):, must be one of ["Default account", "Cash account", "Asset account", "Expense account", "Revenue account", "Initial balance account", "Beneficiary account", "Import account", "Reconciliation account", "Loan", "Debt", "Mortgage", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self | PypiClean |
/Fivana%20CloudTrails%20SDK-3.2.0.tar.gz/Fivana CloudTrails SDK-3.2.0/fivana/cloudtrailsdk/decorators/api_gateway_tracker.py | import logging
from functools import wraps
from fivana.cloudtrailsdk.utils.functions import send_custom_logger, send_exception_logger
import json
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def build_apigateway_response(response):
"""
:param response:
:return:
"""
return {
'statusCode': response['code'],
'body': json.dumps(response)
}
def cloudtrails_apigatweay(app_name="undefined", app_version="undefined", tracker_environment=None):
"""
Decorador para handlers que implementan lambdas que procesan eventos de AWS ApiGateway
:param app_name:
:param app_version:
:return:
"""
def cloudtrails_apigatweay_tracker_decorator(func):
"""
Funcion interna para el decorador
:param func:
:return:
"""
@wraps(func)
def apigatweay_wrapper(event, context):
"""
Wrapper que extrae los datos del request a partir del parametro Event de ApiGateway
:param event:
:param context:
:return:
"""
# Intentar obtener el resultado de la funcion original
result = {}
try:
result = func(event, context)
except Exception as e:
# Si la funcion original dio error logear un evento exception y retornar un 500 generico
send_exception_logger(app_name, app_version)
logger.error(e)
error_response = {'message': 'Ha ocurrido un error no controlado', 'code': 500}
result = build_apigateway_response(error_response)
#Extraer los datos del evento Apigateway Proxy Integration
function_name = func.__name__
properties = {
"EventType": "WebApiCall",
'FunctionName': function_name,
"RequestMethod": event['httpMethod'],
"RequestScheme": "https",
"RequestHost": event['headers'].get('Host', ''),
"RequestPath": event.get('path', ''),
"RequestQueryString": json.dumps(event.get('queryStringParameters', {})),
"ClientIP": event['headers'].get('X-Forwarded-For', ''),
"UserAgent": event['headers'].get('User-Agent', ''),
"RequestPayload": json.dumps(event.get('body', {})),
"ResponsePayload": json.dumps(result.get('body', {})),
"ResponseHttpStatus": result.get('statusCode', '0'),
"Headers": json.dumps(event.get('headers', {}))
}
#Poner Custom Dimensions si hay alguna
if result.get('cloudtrails_dimensions', None) is not None:
properties['Dimensions'] = result.get('cloudtrails_dimensions', None)
result.pop('cloudtrails_dimensions', None)
#Poner Custom properties si hay alguna
if result.get('cloudtrails_properties', None) is not None:
properties.update(result.get('cloudtrails_properties', None))
result.pop('cloudtrails_properties', None)
send_custom_logger(app_name=app_name, app_version=app_version, tracker_environment=tracker_environment, **properties)
# Retornar el resultado final
return result
return apigatweay_wrapper
return cloudtrails_apigatweay_tracker_decorator | PypiClean |
/Fittness-1.2.tar.gz/Fittness-1.2/README.md | # data533Lab4
**Build Stamp**
[](https://travis-ci.com/RaineShen/data533Lab4)
**PyPi Link**
https://pypi.org/project/Fittness/
**Screenshot of code coverage for Intake Package**

**Screenshot of code coverage for Burn Package**

### **Subpackage - calories_intake**
This subpackage contains two modules: *__nutrients__* and *__visualization__*.
#### **nutrients** module:
- a super class **Nutrients** has self, name and amount attributes, and a display method. **Protein**, **Fat**, and **Carbohydrate** are 3 subclasses of **Nutrients** class, which inherent attributes and methods from **Nutrients** and also have attribute calPerGram (calories per gram of the nutrient) as well as their own method to calculate and display total calories.
- **entry()** function obtains information from the user for daily protein, fat, and carbohydrate intakes in gram, as well as user's weight(kg), height(cm), age, sex, and activity level.
- **calCalories(protI, fatI, carbI)** function takes the user inputs for protein, fat, and carbohydrate intakes and creates an object of each nutrients class to print the total calories from all three nutrients intake.
- **bodyNeeds(w, h, s, age, proAmt, fatAmt, carbAmt, fac)** function takes user's weight(g), height(cm), sex, age, and activity level (by choose one of the options) as well as daily protein, fat, and carbohydrate intake to print the summaries and suggestions for three different nutrients intake, and for the total calorie intake based on activity level and dietray intake.
#### **visualization** module:
- **entry()** function obtains amount of each of three nutrients daily intake in a certain of time period into a list. This function asks for 3 list input, each of them needs to be at least 7 elements long and all 3 lists need to have the same length. Otherwise, errors will be thrown. It also calculates the calories intake based on the 3 nutrients.
- **nutriTrack(lst1, lst2, lst3, lst4)** function takes all 3 lists of protein, fat, and carbohydrate intake, as well as the sequential number of days to show 3 linked plots. Each of the plot tracks the amount change of the nutrient over time in that particular period.
- **calorieTrack(lst1, lst2, num)** function takes the list of daily calories intake calulated by the **entry()** function in a particular period, as well as the sequential number of days to show the graph of daily total calories intake over time. The calorie intake for a specific day can be obtained when hover on each point on the graph.
In the subpackage **calories_burned**, it has two modules, *records* and *monitoring*. *Records* is the super class of *Monitoring* and it has 5 funcitons.
***Records***
1. In the initializing stage. It takes name,gender,age,height,weight as arguments
2. **Records.display()** takes self as argument and print out the details such as name, gender,age ,height and weight in the initializing stage
3. **Records.BMI()** calculates the body mass index based on generated perosnal information and tells the weight status in the ragne from underweiht to obse.
4. **Records.BMR()** calcualtes the Basal metabolic rate. BMR is a measurement of the number of calories needed to perform a person's most basic funcitons such as breathing. Each gender has different formular to calculate BMR
- ***Female_bmr= 655 + (9.6 * self.weight) + (1.8* self.height- (4.7* self.age))**
- ***Male_bmr= 66 + (13.7*self.weight) + (5*self.height) - (6.8*self.age)***
5. **Records.totalcal()** calcuates the total borned calories based on BMR, intensity of excerise and the excerise time.Excersise intensity and time are getting from the user's input, for simlicity reason, intensity has only 3 levels 3(Light),4(Moderate),7(Vigorous) and time is measured in minutes,require BMR to be calcualted.
**Prerequisite**
Have pygal installed
pip install pygal
***Monitoring***
Monitoring is the sub class of Records, It could be used to monitory weight and calory changes in weekly or monthly basis
1. In the initializing stage. It takes all the arguemtns from Records and an additional new arguments calory
2. **Monitoring.new_weight(newweight)** append daily weight to weight_list
3. **Monitoring.new_calory(newcalory)** append daily burned calory to calory_list
4. **Monitoring.weight_change_plot()** create a line chart which shows the daily weight and the changes overtime
5. **Monitoring.calory_change_plot()** create a line chart which shows the daily burned calories and the changes overtime
6. **Monitoring.weight_calory_plot()** create a bar chart that contians both daily weight and burned calory in the same graph
| PypiClean |
/MAVR-0.93.tar.gz/MAVR-0.93/scripts/alignment/blast/blast_all_vs_all.py | __author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Tools.BLAST import DustMasker, MakeBLASTDb, BLASTn, BLASTp
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input", required=True,
help="Input file with sequences")
parser.add_argument("-m", "--mask", action="store_true", dest="mask",
help="Mask sequences with dustmasker")
parser.add_argument("-n", "--name", action="store", dest="name", default="tmp_database",
help="Database name")
parser.add_argument("-y", "--database_type", action="store", dest="database_type", default="nucleotide",
help="Database type. Allowed types: protein and nucleotide. Default: nucleotide")
parser.add_argument("-t", "--threads", action="store", dest="threads", default=1, type=int,
help="Number of threads to use")
parser.add_argument("-b", "--other_blast_options", action="store", dest="other_options",
help="Other blast options")
parser.add_argument("-e", "--evalue", action="store", dest="evalue",
help="E-value cutoff")
parser.add_argument("-o", "--output", action="store", dest="output", required=True,
help="E-value cutoff")
parser.add_argument("-f", "--output_format", action="store", dest="output_format", default=6, type=int,
help="Output format:"
"0 = pairwise,"
"1 = query-anchored showing identities,"
"2 = query-anchored no identities,"
"3 = flat query-anchored, show identities,"
"4 = flat query-anchored, no identities,"
"5 = XML Blast output,"
"6 = tabular,"
"7 = tabular with comment lines,"
"8 = Text ASN.1,"
"9 = Binary ASN.1,"
"10 = Comma-separated values,"
"11 = BLAST archive format (ASN.1)")
args = parser.parse_args()
if args.mask:
mask_file = args.input + ".asnb"
DustMasker.mask(args.input_file, mask_file)
if args.database_type == "nucleotide":
MakeBLASTDb.make_nucleotide_db(args.input, args.name, mask_file if args.mask else None,
output_file=args.name)
BLASTn.parallel_blastn(args.input, args.name, outfile=args.output,
blast_options=args.other_options, split_dir="splited_fasta",
splited_output_dir="splited_output_dir",
evalue=args.evalue, output_format=args.output_format,
threads=args.threads,
combine_output_to_single_file=True)
elif args.database_type == "protein":
MakeBLASTDb.make_protein_db(args.input, args.name, mask_file if args.mask else None,
output_file=args.name)
BLASTp.parallel_blastp(args.input, args.name, outfile=args.output,
blast_options=args.other_options, split_dir="splited_fasta",
splited_output_dir="splited_output_dir",
evalue=args.evalue, output_format=args.output_format,
threads=args.threads,
combine_output_to_single_file=True) | PypiClean |
/qe/qe.py |
#-------import------------
import subprocess
import numpy as np
import copy
from xml.etree.ElementTree import parse
#from pathlib import Path
from ase.io.cube import read_cube_data
from ase.atoms import Atoms
from ase.io import read
from ase.units import Bohr
#------uniform wavefunction----------
def uniform(atoms, data=None, origin=None):
# return data (np.array[i,j,k])
dx=np.array([[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]])
if data is None:
data = np.ones((2, 2, 2))
data = np.asarray(data)
if data.dtype == complex:
data = np.abs(data)
if origin is None:
origin = np.zeros(3)
else:
origin = np.asarray(origin) / Bohr
for i in range(3):
n = data.shape[i]
d = atoms.cell[i] / n / Bohr
dx[i] = d
s=np.linalg.det(dx)*np.sum(data)
data=data/s
return data
#-------write cube file function----------
def write_cube(fileobj, atoms, data=None, origin=None, comment=None):
if data is None:
data = np.ones((2, 2, 2))
data = np.asarray(data)
if data.dtype == complex:
data = np.abs(data)
if comment is None:
comment = 'Cube file from ASE, written on ' + time.strftime('%c')
else:
comment = comment.strip()
fileobj.write(comment)
fileobj.write('\nOUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z\n')
if origin is None:
origin = np.zeros(3)
else:
origin = np.asarray(origin) / Bohr
fileobj.write('{0:5}{1:12.6f}{2:12.6f}{3:12.6f}\n'
.format(len(atoms), *origin))
for i in range(3):
n = data.shape[i]
d = atoms.cell[i] / n / Bohr
fileobj.write('{0:5}{1:12.6f}{2:12.6f}{3:12.6f}\n'.format(n, *d))
positions = atoms.positions / Bohr
numbers = atoms.numbers
for Z, (x, y, z) in zip(numbers, positions):
fileobj.write('{0:5}{1:12.6f}{2:12.6f}{3:12.6f}{4:12.6f}\n'
.format(Z, 0.0, x, y, z))
k=len(data[0,0,:])
data=np.reshape(data,(-1,k))
for i in range(0,len(data[:,0])):
for j in range(0,k):
fileobj.write(f" {data[i,j]:e}")
if (j+1) % 6 == 0:
fileobj.write("\n")
if k % 6 != 0:
fileobj.write("\n")
fileobj.write("\n")
#-------get QE parameters--------------
def get_paraments(prefix,outdir):
# return para (dict{})
fileobj=open(outdir+'/'+prefix+'.xml')
et = parse(fileobj)
root = et.getroot()
# get ispin
lsda=root.findall('./output/band_structure/lsda')[0].text
if lsda=='false':
ispin=1
else:
ispin=2
# get efermi
ef=float(root.findall('./output/band_structure/fermi_energy')[0].text)*27.2114
# get kpoint
kpoint=int(root.findall('./output/band_structure/nks')[0].text)
# band number
if ispin==1:
band=int(root.findall('./output/band_structure/nbnd')[0].text)
else:
band=int(root.findall('./output/band_structure/nbnd_up')[0].text)
# # get weight
kweight=np.array([])
for weight in root.findall('./output/band_structure/ks_energies/k_point'):
kweight=np.append(kweight,float(weight.attrib['weight']))
# get eigenvalues
eigen1=np.array([])
eigen2=np.array([])
eigen=np.array([])
for eigenvalues in root.findall('./output/band_structure/ks_energies/eigenvalues'):
result=eigenvalues.text.split()
result=list(map(float,result))
result1=result[0:int(len(result)/ispin)]
result2=result[int(len(result)/ispin):len(result)]
eigen1=np.append(eigen1,result1)
eigen2=np.append(eigen2,result2)
eigen=np.append(eigen1,eigen2)
eigen=eigen*27.2114
fileobj.close()
paraments={'NKPTS':kpoint,
'NBANDS':band,
'Ef':[ef,ef],
'ISPIN':ispin,
'EIGENVAL':eigen,
'WEIGHT':kweight}
return paraments
#-------generate wavefunction.cube-------
def run_pp_wfn(para,k_index,band_index,ispin,spin,prefix,outdir,pp_laucher):
# band_index from 1 to band_number
# k_index from 1 to kpoint_number
ispin=para['ISPIN']
kpoint=para['NKPTS']
real_k_index=k_index+(spin-1)*kpoint
if ispin==1:
# p1=Path(f"WFN_SQUARED_B{band_index:04d}_K{k_index:04d}.vasp.cube")
# p2=p1
tmp=subprocess.getstatusoutput(f"ls WFN_SQUARED_B{band_index:04d}_K{k_index:04d}.vasp.cube")
else:
# p1=Path(f"WFN_SQUARED_B{band_index:04d}_K{k_index:04d}_UP.vasp.cube")
# p2=Path(f"WFN_SQUARED_B{band_index:04d}_K{k_index:04d}_DW.vasp.cube")
tmp=subprocess.getstatusoutput(f"ls WFN_SQUARED_B{band_index:04d}_K{k_index:04d}_UP.vasp.cube WFN_SQUARED_B{band_index:04d}_K{k_index:04d}_DW.vasp.cube")
# if p1.exists() and p2.exists():
# return 0
if "No such file or directory" not in tmp[1]:
return 0
else:
pp_ini=open('wfn.inp','w')
pp_ini.write(f'''&INPUTPP
prefix = '{prefix:s}',
outdir = '{outdir:s}',
filplot = 'wfn.pp',
plot_num = 7,
kpoint = {real_k_index:d},
kband = {band_index:d},
lsign = .false.,
/
&PLOT
fileout = 'wfn.cube',
iflag = 3,
nfile = 1,
weight(1) = 1.0,
output_format = 6,
/
''')
pp_ini.close()
tmp=subprocess.getstatusoutput(pp_laucher+" -i wfn.inp > wfn.out")
if tmp[0] != 0 :
print("\n\t**** !!!! Running pp.x error! check the wfn.out !!!! ****")
exit()
else:
if ispin==1:
tmp=subprocess.getstatusoutput(f"mv wfn.cube WFN_SQUARED_B{band_index:04d}_K{k_index:04d}.vasp.cube")
else:
s=(real_k_index-1)//kpoint+1
if s == 1:
tmp=subprocess.getstatusoutput(f"mv wfn.cube WFN_SQUARED_B{band_index:04d}_K{k_index:04d}_UP.vasp.cube")
else:
tmp=subprocess.getstatusoutput(f"mv wfn.cube WFN_SQUARED_B{band_index:04d}_K{k_index:04d}_DW.vasp.cube")
#------get eigenvalue-----------
def get_eigenvalue(para,k_index,band_index,spin):
# band_index from 1 to band_number
# k_index from 1 to kpoint_number
# spin: 1 or 2
# return eigenvalue_k_n (float)
kpoint_number=para['NKPTS']
band_number=para['NBANDS']
eigen=para['EIGENVAL']
return eigen[(spin-1)*kpoint_number*band_number+(k_index-1)*band_number+band_index-1]
#-------calculate LFS----------
def calc_lfs(para,prefix,outdir,kbT,dfdd_threshold,intermediate_file_options,pp_laucher):
# return fs (np.array[i,j,k]) , atoms (ase.Atoms)
kpoint_number=para['NKPTS']
band_number=para['NBANDS']
ispin=para['ISPIN']
ef=para['Ef']
kweight=para['WEIGHT']
if ispin==2:
tagspin=['_UP','_DW']
else:
tagspin=['']
fs=[[],[]]
for s in range(ispin):
i=0
spin=s+1
print(f'\n\tStart calculating intergral of spin={spin:d}:\n\tKpoint\tBand\tE-Ef/eV\t\t-dFDD\t\t\tweight')
for k in range(kpoint_number):
for b in range(band_number):
k_index=k+1
band_index=b+1
e_ef=get_eigenvalue(para,k_index,band_index,spin)-ef[s]
dfdd=(1.0/kbT)*np.exp(e_ef/kbT)/(np.exp(e_ef/kbT)+1)/(np.exp(e_ef/kbT)+1)
if dfdd >= dfdd_threshold:
run_pp_wfn(para,k_index,band_index,ispin,spin,prefix,outdir,pp_laucher)
i=i+1
else:
continue
data, atoms = read_cube_data(f'WFN_SQUARED_B{band_index:04d}_K{k_index:04d}'+tagspin[s]+'.vasp.cube')
data=uniform(atoms,data)*dfdd
if intermediate_file_options==False:
# p=Path('.')
# wfn=list(p.glob(f'WFN_SQUARED_B{band_index:04d}_K{k_index:04d}*'))
# for q in wfn:
# q.unlink(True)
tmp=subprocess.getstatusoutput(f"rm WFN_SQUARED_B{band_index:04d}_K{k_index:04d}*.vasp.cube")
print(f"\t{k_index:d}\t{band_index:d}\t{e_ef:.6f}\t{dfdd:.8e}\t\t{kweight[k]:.6f}")
if i==1:
fs[s]=data*kweight[k]
else:
fs[s]=fs[s]+data*kweight[k]
return fs,atoms
#-------write LFScube-----------
def write_lfs(para,fs,atoms,tag=''):
ispin=para['ISPIN']
if ispin==2:
tagspin=['_UP','_DW']
else:
tagspin=['']
for s in range(len(tagspin)):
fs_file=open("LFS"+tagspin[s]+tag+".cube",'w')
write_cube(fs_file,atoms,fs[s],[0.0,0.0,0.0],"Fermi_Softness"+tagspin[s]+tag)
#-------write FSCAR----------
def write_fscar(para,bader_dir,tag=''):
ispin=para['ISPIN']
if ispin==1:
subprocess.getstatusoutput(bader_dir+' LFS'+tag+'.cube')
# p=Path('ACF.dat')
# target=Path('FSCAR'+tag)
# p.rename(target)
subprocess.getstatusoutput('mv ACF.dat FSCAR'+tag)
else:
subprocess.getstatusoutput(bader_dir+' LFS_UP'+tag+'.cube')
# p=Path('ACF.dat')
# target=Path('FSCAR_UP'+tag)
# p.rename(target)
subprocess.getstatusoutput('mv ACF.dat FSCAR_UP'+tag)
subprocess.getstatusoutput(bader_dir+' LFS_DW'+tag+'.cube')
# p=Path('ACF.dat')
# target=Path('FSCAR_DW'+tag)
# p.rename(target)
subprocess.getstatusoutput('mv ACF.dat FSCAR_DW'+tag)
#-------FS modudle-----------
def run_fs(prefix,outdir,kbT,dfdd_threshold,band_gap,intermediate_file_options,bader_dir,pp_laucher):
#----------Initialization---------
print('''
####################################################
#
# Fermi-Softness Calculation v1.2
#
# Author: Qiaosong Lin
# Wuhan University, China
#
# Notice:
# 1) You have already finished Non-SCF calculation
# 2) Make sure vaspkit and bader are in your $PATH
# 3) Make sure INCAR OUTCAR WAVECAR POSCAR vasprun.xml exist
# 4) Make sure ASE was installed correctly
#
# Website:
# https://github.com/Linqiaosong/Fermi-Softness-for-VASP
#
#####################################################
''')
para=get_paraments(prefix,outdir)
kpoint_number=para['NKPTS']
band_number=para['NBANDS']
ispin=para['ISPIN']
ef=para['Ef']
kweight=para['WEIGHT']
if ispin != 1 and ispin != 2:
print('\n\t**** !!!! ISPIN error !!!! ****')
exit()
print(f'''
Parameters:
Electron temperature = {kbT:.6f} eV
dFDD threshold = {dfdd_threshold:f}
Fermi Energy = {ef[0]:.6f} eV
ISPIN = {ispin:d}
Kpoint Numbers = {kpoint_number:d}
Band Numbers = {band_number:d}
CBM Energy = {band_gap['CBM']} eV
VBM Energy = {band_gap['VBM']} eV
Save Intermediate Files = {intermediate_file_options}
Bader PATH = {bader_dir:s}
pp.x Laucher = {pp_laucher:s}
Initialization is complete, start calculating:
''')
print('\tKpoint\tWeight')
for i in range(len(kweight)):
print(f'\t{i+1}\t{kweight[i]}')
if intermediate_file_options==True:
# p=Path('WFNSQR')
# wfn=list(p.glob('WFN_SQUARED_*'))
# for q in wfn:
# target=q.name
# q.link_to(target)
# q.unlink(True)
tmp=subprocess.getstatusoutput(f"ls ./WFNSQR/WFN_SQUARED_*.vasp.cube")
if "No such file or directory" not in tmp[1]:
subprocess.getstatusoutput(f"mv ./WFNSQR/WFN_SQUARED_*.vasp.cube .")
#----------End:Initialization--------------
if band_gap['CBM'] == [0.0] and band_gap['VBM'] == [0.0]:
# no gap, calculate FS
fs,atoms=calc_lfs(para,prefix,outdir,kbT,dfdd_threshold,intermediate_file_options,pp_laucher)
write_lfs(para,fs,atoms)
write_fscar(para,bader_dir)
else:
#----------calculate CB--------
para_cbm=copy.deepcopy(para)
# remove band under E_CBM
for i in range(len(para_cbm['EIGENVAL'])):
if para_cbm['EIGENVAL'][i] < min(band_gap['CBM']):
para_cbm['EIGENVAL'][i] = 99.0
# change Ef to E_CBM
para_cbm['Ef']=band_gap['CBM']
# calculate FS
fs,atoms=calc_lfs(para_cbm,prefix,outdir,kbT,dfdd_threshold,intermediate_file_options,pp_laucher)
write_lfs(para_cbm,fs,atoms,'_CB')
write_fscar(para_cbm,bader_dir,'_CB')
#---------calculate VB-----------
para_vbm=copy.deepcopy(para)
# remove band above E_VBM
for i in range(len(para_vbm['EIGENVAL'])):
if para_vbm['EIGENVAL'][i] > max(band_gap['VBM']):
para_vbm['EIGENVAL'][i] = -99.0
# change Ef to E_CBM
para_vbm['Ef']=band_gap['VBM']
# calculate FS
fs,atoms=calc_lfs(para_vbm,prefix,outdir,kbT,dfdd_threshold,intermediate_file_options,pp_laucher)
write_lfs(para_vbm,fs,atoms,'_VB')
write_fscar(para_vbm,bader_dir,'_VB')
#-----------save intermediate files-----------
if intermediate_file_options==True:
# p=Path('WFNSQR')
# p.mkdir(exist_ok=True)
# q=Path('.')
# wfn=list(q.glob('WFN_SQUARED_*'))
# for f in wfn:
# target= p / f
# f.link_to(target)
# f.unlink(True)
subprocess.getstatusoutput('mkdir WFNSQR')
subprocess.getstatusoutput('mv WFN_SQUARED* WFNSQR')
#-----------remove temp----------
# Path('vaspkit.ini').unlink(True)
# Path('vaspkit.log').unlink(True)
# Path('AVF.dat').unlink(True)
# Path('BCF.dat').unlink(True)
subprocess.getstatusoutput('rm wfn.inp wfn.out wfn.pp AVF.dat BCF.dat')
#-----------print success--------
print('\nThe calculation ends normally.')
#----------main-----------------
if __name__ == "__main__":
#-------parameters----------
prefix='pwscf'
outdir='./tmp'
kbT=0.4 # Electron temperature (eV): recommended 0.4 by B. Huang
dfdd_threshold=0.001 # Derivation of Fermi-Dirac distribution threshold: recommended 0.001 by B. Huang
intermediate_file_options=False # Save intermediate files? False or True (default: False)
bader_dir='bader' # Path of bader, if bader is in your $PATH, you don't need to change it
pp_laucher='mpirun -np 4 pp.x' # Laucher of pp.x, e.g.: 'pp.x' or 'mpirun -np 4 pp.x'
band_gap={'VBM':[0.0], # If band gap exists (You might need to confirm the occupation of VBM and CBM):
'CBM':[0.0]} # non-spin polarization: set as 'VBM':[E_VBM],'CBM':[E_CBM] (Do not minus E_fermi)
# spin polarization: set as 'VBM':[E_VBM_UP,E_VBM_DW],'CBM':[E_CBM_UP,E_CBM_DW]
# Otherwise: set as 'VBM':[0.0],'CBM':[0.0]
#----------------------------
run_fs(prefix,outdir,kbT,dfdd_threshold,band_gap,intermediate_file_options,bader_dir,pp_laucher) | PypiClean |
/GraphLearner_mits92-0.0.1-py3-none-any.whl/GraphLearner/Grid.py | import math
import networkx
from matplotlib import pyplot as plt
from itertools import count
class Grid:
def __init__(
self,
dimensions=None,
with_diagonals=False,
assignment=None,
graph=None
):
if dimensions:
self.dimensions = dimensions
self.graph = create_grid_graph(dimensions, with_diagonals)
if not assignment:
thresholds = tuple(math.floor(n / 2) for n in self.dimensions)
assignment = {
node: color_quadrants(node, thresholds) for node in self.graph.nodes
}
else:
raise Exception("Not a good way to create a Partition")
def draw_grid(self, col_attr, x=12, y=12, cmap=plt.cm.coolwarm):
plt.figure(figsize=(x, y))
gtemp = networkx.Graph(self.graph)
for edge in gtemp.edges: gtemp.edges[edge]['weight'] = 1
cds = set(networkx.get_node_attributes(gtemp, col_attr).values())
mapping = dict(zip(sorted(cds), count()))
nodes = gtemp.nodes()
colors = [mapping[gtemp.nodes[n][col_attr]] for n in nodes]
pos = networkx.spring_layout(gtemp, iterations=2000)
ec = networkx.draw_networkx_edges(gtemp, pos, alpha=0.2)
nc = networkx.draw_networkx_nodes(gtemp, pos, nodelist=nodes, node_color=colors,
with_labels=True, node_size=350, cmap=cmap)
plt.axis('off')
plt.show()
def __str__(self):
return "Welcome! Your graph has {n} by {m} nodes.\n\n" \
"Here are some useful functions/usages that assume you called your grid object G. \nEverything shown here translates to networkx usability. \n\n" \
"\t * G.graph.nodes(data=True) will give you all of your nodes and the data that exists on each of them.\n" \
"\t \t * Removing the (data=True) will simply print the node names, which are repsented by their coordinates if this were on a lattice.\n" \
"\t \t * All nodes come pre-populated with population=1, area=1, their perimeter, and if they are a boundary node or not.\n\n" \
"\t * G.graph.edges(data=True) will give you all of the edges and their associated data.\n" \
"\t \t * Removing the (data=True will simply print the edge names.\n" \
"\t \t * All edges come pre-populated their shared perimeter lengths.\n\n" \
"\t * Here is how to loop over nodes while creating a new attribute on them. The attribute we will create will be called CD.\n" \
"\t \t - for node in G.graph.nodes:\n" \
"\t \t \t G.graph.nodes(data=True)[node]['CD'] = 1\n\n" \
"\t * Here is how to loop over edges while assigning them the networkx built in keyword 'weight'.\n" \
"\t \t - for edge in G.graph.edges:\n" \
"\t \t \t G.graph.edges[edge]['weight'] = 10\n\n" \
"\t * To visualize the graph, which at least helps me in understanding what's happening, I have built a method that makes this easy.\n" \
"\t \t * G.draw_grid(col_attribute)\n" \
"\t \t \t * This has a number of attributes attributes: \n" \
"\t \t \t \t - col_attr (String, required, one of the attributes that exists on all nodes you would like to color by)\n" \
"\t \t \t \t - x (int, default: 12, controls length of x-axis for matplotlib plot)\n" \
"\t \t \t \t - y (int, default: 12, controls length of y-axis for matplotlib plot)\n" \
"\t \t \t \t - cmap (plt.cm, default: plt.cm.coolwarm, specifies color scheme. A list of available options can be found here: https://matplotlib.org/tutorials/colors/colormaps.html)\n" \
"\t \t * Here is an example using all default values passing in your attribute name.\n" \
"\t \t \t * G.draw_grid('boundary_node')\n" \
"\t \t * Here is an example setting all attributes. You need to import matplotlibs pyplot to use the cmap attribute.\n" \
"\t \t \t * G.draw_grid('boundary_node', x=10, y=10, cmap=plt.cm.viridis)".format(n=self.dimensions[0], m=self.dimensions[1])
def create_grid_graph(dimensions, with_diagonals):
if len(dimensions) != 2:
raise ValueError("Dr. Mitchel... why are you trying to draw lines?")
m, n = dimensions
graph = networkx.generators.lattice.grid_2d_graph(m, n)
networkx.set_edge_attributes(graph, 1, "shared_perim")
if with_diagonals:
nw_to_se = [
((i, j), (i + 1, j + 1)) for i in range(m - 1) for j in range(n - 1)
]
sw_to_ne = [
((i, j + 1), (i + 1, j)) for i in range(m - 1) for j in range(n - 1)
]
diagonal_edges = nw_to_se + sw_to_ne
graph.add_edges_from(diagonal_edges)
for edge in diagonal_edges:
graph.edges[edge]["shared_perim"] = 0
networkx.set_node_attributes(graph, 1, "population")
networkx.set_node_attributes(graph, 1, "area")
tag_boundary_nodes(graph, dimensions)
return graph
def tag_boundary_nodes(graph, dimensions):
m, n = dimensions
for node in graph.nodes:
if node[0] in [0, m - 1] or node[1] in [0, n - 1]:
graph.nodes[node]["boundary_node"] = True
graph.nodes[node]["boundary_perim"] = get_boundary_perim(node, dimensions)
else:
graph.nodes[node]["boundary_node"] = False
def get_boundary_perim(node, dimensions):
m, n = dimensions
if node in [(0, 0), (m - 1, 0), (0, n - 1), (m - 1, n - 1)]:
return 2
elif node[0] in [0, m - 1] or node[1] in [0, n - 1]:
return 1
else:
return 0
def color_quadrants(node, thresholds):
x, y = node
x_color = 0 if x < thresholds[0] else 1
y_color = 0 if y < thresholds[1] else 2
return x_color + y_color | PypiClean |
/Mopidy-Spotmop-2.10.1.tar.gz/Mopidy-Spotmop-2.10.1/mopidy_spotmop/static/app/discover/controller.js | 'use strict';
angular.module('spotmop.discover', [])
/**
* Routing
**/
.config(function($stateProvider){
$stateProvider
.state('discover', {
url: "/discover",
templateUrl: "app/discover/template.html"
})
.state('discover.recommendations', {
url: "/recommendations",
templateUrl: "app/discover/recommendations.template.html",
controller: 'DiscoverRecommendationsController'
})
.state('discover.similar', {
url: "/similar/:uri",
templateUrl: "app/discover/similar.template.html",
controller: 'DiscoverSimilarController'
});
})
/**
* Recommendations
**/
.controller('DiscoverRecommendationsController', function DiscoverRecommendationsController( $scope, $rootScope, $filter, SpotifyService, SettingsService, NotifyService ){
$scope.favorites = [];
$scope.current = [];
$scope.sections = [];
// Get recommended users
// Currently this is a hardcoded list of user ids as there isn't a clean API that provides
// a list of 'professional' Spotify users
var userURIs = [
'spotify:user:spotify',
'spotify:user:bbc_playlister',
'spotify:user:filtr',
'spotify:user:arminvanbuurenofficial',
'spotify:user:dominorecords',
'spotify:user:spinninrecordsofficial'
];
var users = [];
var requestsCompleted = 0;
for( var i = 0; i < userURIs.length; i++ ){
// process extra playlist data and add to our $scope
var callback = function(i){
return function( response ){
requestsCompleted++;
// make sure our response was not an error
if( typeof(response.error) === 'undefined' ) users.push( response );
// we've just completed our last request
if( requestsCompleted == userURIs.length - 1 ){
var section = {
title: 'Featured users',
artists: '',
items: users
}
$scope.sections.push( section );
}
};
}(i);
SpotifyService.getUser( userURIs[i] ).then( callback );
}
// Get my old favorites
SpotifyService.getMyFavorites('artists', 50, false, 'long_term').then( function(response){
$scope.favorites.items = $filter('shuffle')(response.items);
});
// Get my short-term top tracks
SpotifyService.getMyFavorites('tracks', 50, false, 'short_term').then( function(response){
// shuffle our tracks for interest, and limit to 5
var favoriteTracks = response.items;
favoriteTracks = $filter('shuffle')(response.items);
favoriteTracks = $filter('limitTo')(response.items, 5);
angular.forEach( favoriteTracks, function(track){
SpotifyService.getRecommendations(false, false, false, false, track.id).then( function(recommendations){
var items = [];
angular.forEach( recommendations.tracks, function( track ){
var item = track.album;
item.artists = track.artists;
items.push( item );
});
var section = {
title: 'Because you listened to ',
artists: track.artists,
items: items
}
$scope.sections.push( section );
});
});
});
/**
* Recommendations based on the currently playing track
* We need to listen for the complete loading of the currentTlTrack for this to work smoothly
**/
if( typeof( $scope.state().currentTlTrack.track ) !== 'undefined' ){
getCurrentlyPlayingRecommendations( $scope.state().currentTlTrack );
}
$rootScope.$on('spotmop:currenttrack:loaded', function(event, tlTrack){
getCurrentlyPlayingRecommendations( tlTrack );
});
// actually go get the recommendations
function getCurrentlyPlayingRecommendations( tlTrack ){
var artists = [];
var artistIds = '';
angular.forEach( tlTrack.track.artists, function(artist){
// create our template-friendly array of artists
artists.push(
{
'name': artist.name,
'name_encoded': encodeURIComponent(artist.name),
'uri': artist.uri
}
);
// build our list of seed artists (because our current track might contain multiple artists)
if( artistIds != '' ) artistIds += ',';
artistIds += SpotifyService.getFromUri('artistid',artist.uri);
});
// store our seed artists for the template
$scope.current.artists = artists;
// now get recommendations based on these artists
SpotifyService.getRecommendations(false, false, artistIds).then( function(response){
var albums = [];
angular.forEach( response.tracks, function( track ){
var album = track.album;
album.artists = track.artists;
albums.push( album );
});
$scope.current.items = albums;
});
}
})
/**
* Discover material, similar to a seed URI
**/
.controller('DiscoverSimilarController', function DiscoverSimilarController( $scope, $rootScope, $filter, $stateParams, SpotifyService, SettingsService, NotifyService ){
var seed_tracks = [];
var seed_albums = [];
var seed_artists = [];
$scope.seedObjects = [];
var uris = $stateParams.uri.split(',');
for( var i = 0; i < uris.length; i++ ){
switch( SpotifyService.uriType( uris[i] ) ){
case 'track':
seed_tracks.push( SpotifyService.getFromUri('trackid', uris[i]) );
break;
case 'album':
seed_albums.push( SpotifyService.getFromUri('albumid', uris[i]) )
break;
case 'artist':
seed_artists.push( SpotifyService.getFromUri('artistid', uris[i]) )
break;
}
}
// merge our arrays of ids back into a comma-separated string, or a null variable if empty
( seed_tracks.length > 0 ? seed_tracks = seed_tracks.join(',') : seed_tracks = null );
( seed_albums.length > 0 ? seed_albums = seed_albums.join(',') : seed_albums = null );
( seed_artists.length > 0 ? seed_artists = seed_artists.join(',') : seed_artists = null );
// go get our seed objects
if( seed_tracks != null ){
SpotifyService.getTracks( seed_tracks ).then( function(response){
$scope.seedObjects = $scope.seedObjects.concat( response.tracks );
});
}
if( seed_albums != null ){
SpotifyService.getAlbums( seed_albums ).then( function(response){
$scope.seedObjects = $scope.seedObjects.concat( response.albums );
});
}
if( seed_artists != null ){
SpotifyService.getArtists( seed_artists ).then( function(response){
$scope.seedObjects = $scope.seedObjects.concat( response.artists );
});
}
// get from spotify ( limit, offset, seed_artists, seed_albums, seed_tracks )
SpotifyService.getRecommendations( 50, 0, seed_artists, seed_albums, seed_tracks).then( function(response){
$scope.tracks = response.tracks;
});
}); | PypiClean |
/EXOSIMS-3.1.6.tar.gz/EXOSIMS-3.1.6/CONTRIBUTING.md | # Contributing to EXOSIMS
The EXOSIMS community welcomes any and all contributions of code, documentation, and tests. Please see below for guidlines on how to contribute, and please be sure to follow our [code of conduct](https://github.com/dsavransky/EXOSIMS/blob/master/CODE_OF_CONDUCT.md)
in all your interactions with the project.
## Getting Started
A great place to start is our [Issue Tracker](https://github.com/dsavransky/EXOSIMS/issues). Be sure to also read all of the documentation (https://exosims.readthedocs.io).
## Working with the Code
Start by forking the EXOSIMS repository (https://docs.github.com/en/get-started/quickstart/fork-a-repo) and cloning the fork to your local system. You may choose to work in a new branch in your fork, but all eventual pull requests back to the main repository must be to the master branch. While working, be sure to keep your fork up to date with the main repository (https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork).
You are encouraged to use automated linting tools while you are editing or writing new code. No new code will be accepted that does not pass a flake8 (https://flake8.pycqa.org/) test. An overview of available tools can be found here: https://realpython.com/python-code-quality/
New contributions to EXOSIMS are encouraged (but not required) to use type hinting (https://docs.python.org/3/library/typing.html) and to run code through a static analysis tool such as mypy (http://mypy-lang.org/).
## Coding Conventions
The following conventions are strictly enforced for all new contributions:
* All methods and classes must have google-formatted docstrings (https://google.github.io/styleguide/pyguide.html#383-functions-and-methods). All arguments and returns must be listed with their type in the docstring. All arguments to the ``__init__`` must be listed in the class docstring, along with all class attributes.
* PEP 8 must be followed. In particular, only use 4 spaces per indentation level (no tabs allowed). The only thing we don't care about are naming conventions. If you like camelCase, you do you.
* Overloaded Prototype methods may **not** change the syntax declaration (the exact same arguments/returns are required).
* Every new module implementation must inherit the prototype or an existing implementation of that module type.
* All new code must be blackened (https://black.readthedocs.io)
You can install black from pypi (`pip install black`) which will create a `black` executable on your system. Calling this executable on any python source file will reformat the file in place (i.e. `black /path/to/myfile.py`). EXOSIMS's test suite runs a black check on both the EXOSIMS directory and tests directory for every pull request. For more information on black and on text editor/IDE integrations, see the black docs.
## Linting
Your code should be run through a static analysis. EXOSIMS uses flake8 (https://flake8.pycqa.org/) preferentially, but any equivalent tool may also be used. The project flake8 settings are listed in the file .flake8 in the top level of the github repository. In particular, note that we use a line width of 88 charcters (black's default) and also universally ignore errors 741 (https://www.flake8rules.com/rules/E741.html), 731 (https://www.flake8rules.com/rules/E731.html) and 203 (https://www.flake8rules.com/rules/E203.html). You may ignore other errors in your own code via inline # noqa comments (see: https://flake8.pycqa.org/en/3.1.1/user/ignoring-errors.html) but be sure to justify why you are doing this, and be sure to list the specific errors being ignore in the comment.
flake8 can be installed from pypi (`pip install flake8`) which will create a `flake8` executable on your system. Calling this executable (with no arguments) from the root repository directory will automatically check all code. If using a different tool, be sure you are capturing the same rules as in the .flake8 file.
## Pull Requests
Code contributions must be made via pull requests to the master branch of the EXOSIMS repository. Pulls that cannot be automatically merged or that fail any tests will be rejected. Pull requests should be as small as possible, targeting a single issue/new feature. While preparing your pull request, follow this checklist:
- [ ] Sync your fork and ensure that your pull can be merged automatically by merging master onto the branch you wish to pull from.
- [ ] Ensure that all of your new additions have properly formatted docstrings (you can build the docs on your local machine and check that the resulting html is properly formatted - see: https://exosims.readthedocs.io/en/latest/docs.html)
- [ ] Ensure that all of the commits going in to your pull have informative messages
- [ ] Blacken and lint your code.
- [ ] In a clean virtual environment, and with your local cache dirs emptied (or set to empty directories) install your working copy of EXOSIMS in developer mode and run all unit tests (including any new ones you've added).
- [ ] In the same environment, run ``e2eTests``
- [ ] Create a new pull request and fill out the template. Fully describe all changes/additions
### Thank You!
| PypiClean |
/Nuitka_winsvc-1.7.10-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.3.0/SCons/CacheDir.py | import atexit
import json
import os
import stat
import sys
import uuid
import SCons.Action
import SCons.Errors
import SCons.Warnings
import SCons
cache_enabled = True
cache_debug = False
cache_force = False
cache_show = False
cache_readonly = False
cache_tmp_uuid = uuid.uuid4().hex
def CacheRetrieveFunc(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cd.requests += 1
cachedir, cachefile = cd.cachepath(t)
if not fs.exists(cachefile):
cd.CacheDebug('CacheRetrieve(%s): %s not in cache\n', t, cachefile)
return 1
cd.hits += 1
cd.CacheDebug('CacheRetrieve(%s): retrieving from %s\n', t, cachefile)
if SCons.Action.execute_actions:
if fs.islink(cachefile):
fs.symlink(fs.readlink(cachefile), t.get_internal_path())
else:
cd.copy_from_cache(env, cachefile, t.get_internal_path())
try:
os.utime(cachefile, None)
except OSError:
pass
st = fs.stat(cachefile)
fs.chmod(t.get_internal_path(), stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
return 0
def CacheRetrieveString(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if t.fs.exists(cachefile):
return "Retrieved `%s' from cache" % t.get_internal_path()
return None
CacheRetrieve = SCons.Action.Action(CacheRetrieveFunc, CacheRetrieveString)
CacheRetrieveSilent = SCons.Action.Action(CacheRetrieveFunc, None)
def CachePushFunc(target, source, env):
if cache_readonly:
return
t = target[0]
if t.nocache:
return
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if fs.exists(cachefile):
# Don't bother copying it if it's already there. Note that
# usually this "shouldn't happen" because if the file already
# existed in cache, we'd have retrieved the file from there,
# not built it. This can happen, though, in a race, if some
# other person running the same build pushes their copy to
# the cache after we decide we need to build it but before our
# build completes.
cd.CacheDebug('CachePush(%s): %s already exists in cache\n', t, cachefile)
return
cd.CacheDebug('CachePush(%s): pushing to %s\n', t, cachefile)
tempfile = "%s.tmp%s"%(cachefile,cache_tmp_uuid)
errfmt = "Unable to copy %s to cache. Cache file is %s"
try:
fs.makedirs(cachedir, exist_ok=True)
except OSError:
msg = errfmt % (str(target), cachefile)
raise SCons.Errors.SConsEnvironmentError(msg)
try:
if fs.islink(t.get_internal_path()):
fs.symlink(fs.readlink(t.get_internal_path()), tempfile)
else:
cd.copy_to_cache(env, t.get_internal_path(), tempfile)
fs.rename(tempfile, cachefile)
except EnvironmentError:
# It's possible someone else tried writing the file at the
# same time we did, or else that there was some problem like
# the CacheDir being on a separate file system that's full.
# In any case, inability to push a file to cache doesn't affect
# the correctness of the build, so just print a warning.
msg = errfmt % (str(target), cachefile)
SCons.Warnings.warn(SCons.Warnings.CacheWriteErrorWarning, msg)
CachePush = SCons.Action.Action(CachePushFunc, None)
class CacheDir:
def __init__(self, path):
"""
Initialize a CacheDir object.
The cache configuration is stored in the object. It
is read from the config file in the supplied path if
one exists, if not the config file is created and
the default config is written, as well as saved in the object.
"""
self.requests = 0
self.hits = 0
self.path = path
self.current_cache_debug = None
self.debugFP = None
self.config = dict()
if path is None:
return
self._readconfig(path)
def _readconfig(self, path):
"""
Read the cache config.
If directory or config file do not exist, create. Take advantage
of Py3 capability in os.makedirs() and in file open(): just try
the operation and handle failure appropriately.
Omit the check for old cache format, assume that's old enough
there will be none of those left to worry about.
:param path: path to the cache directory
"""
config_file = os.path.join(path, 'config')
try:
os.makedirs(path, exist_ok=True)
except FileExistsError:
pass
except OSError:
msg = "Failed to create cache directory " + path
raise SCons.Errors.SConsEnvironmentError(msg)
try:
with open(config_file, 'x') as config:
self.config['prefix_len'] = 2
try:
json.dump(self.config, config)
except Exception:
msg = "Failed to write cache configuration for " + path
raise SCons.Errors.SConsEnvironmentError(msg)
except FileExistsError:
try:
with open(config_file) as config:
self.config = json.load(config)
except ValueError:
msg = "Failed to read cache configuration for " + path
raise SCons.Errors.SConsEnvironmentError(msg)
def CacheDebug(self, fmt, target, cachefile):
if cache_debug != self.current_cache_debug:
if cache_debug == '-':
self.debugFP = sys.stdout
elif cache_debug:
def debug_cleanup(debugFP):
debugFP.close()
self.debugFP = open(cache_debug, 'w')
atexit.register(debug_cleanup, self.debugFP)
else:
self.debugFP = None
self.current_cache_debug = cache_debug
if self.debugFP:
self.debugFP.write(fmt % (target, os.path.split(cachefile)[1]))
self.debugFP.write("requests: %d, hits: %d, misses: %d, hit rate: %.2f%%\n" %
(self.requests, self.hits, self.misses, self.hit_ratio))
@classmethod
def copy_from_cache(cls, env, src, dst):
if env.cache_timestamp_newer:
return env.fs.copy(src, dst)
else:
return env.fs.copy2(src, dst)
@classmethod
def copy_to_cache(cls, env, src, dst):
try:
result = env.fs.copy2(src, dst)
fs = env.File(src).fs
st = fs.stat(src)
fs.chmod(dst, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
return result
except AttributeError as ex:
raise EnvironmentError from ex
@property
def hit_ratio(self):
return (100.0 * self.hits / self.requests if self.requests > 0 else 100)
@property
def misses(self):
return self.requests - self.hits
def is_enabled(self):
return cache_enabled and self.path is not None
def is_readonly(self):
return cache_readonly
def get_cachedir_csig(self, node):
cachedir, cachefile = self.cachepath(node)
if cachefile and os.path.exists(cachefile):
return SCons.Util.hash_file_signature(cachefile, SCons.Node.FS.File.hash_chunksize)
def cachepath(self, node):
"""
"""
if not self.is_enabled():
return None, None
sig = node.get_cachedir_bsig()
subdir = sig[:self.config['prefix_len']].upper()
dir = os.path.join(self.path, subdir)
return dir, os.path.join(dir, sig)
def retrieve(self, node):
"""
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Note that there's a special trick here with the execute flag
(one that's not normally done for other actions). Basically
if the user requested a no_exec (-n) build, then
SCons.Action.execute_actions is set to 0 and when any action
is called, it does its showing but then just returns zero
instead of actually calling the action execution operation.
The problem for caching is that if the file does NOT exist in
cache then the CacheRetrieveString won't return anything to
show for the task, but the Action.__call__ won't call
CacheRetrieveFunc; instead it just returns zero, which makes
the code below think that the file *was* successfully
retrieved from the cache, therefore it doesn't do any
subsequent building. However, the CacheRetrieveString didn't
print anything because it didn't actually exist in the cache,
and no more build actions will be performed, so the user just
sees nothing. The fix is to tell Action.__call__ to always
execute the CacheRetrieveFunc and then have the latter
explicitly check SCons.Action.execute_actions itself.
"""
if not self.is_enabled():
return False
env = node.get_build_env()
if cache_show:
if CacheRetrieveSilent(node, [], env, execute=1) == 0:
node.build(presub=0, execute=0)
return True
else:
if CacheRetrieve(node, [], env, execute=1) == 0:
return True
return False
def push(self, node):
if self.is_readonly() or not self.is_enabled():
return
return CachePush(node, [], node.get_build_env())
def push_if_forced(self, node):
if cache_force:
return self.push(node)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/domains/javascript.py | from typing import Any, Dict, Iterator, List, Tuple
from typing import cast
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.addnodes import desc_signature, pending_xref
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.domains.python import _pseudo_parse_arglist
from sphinx.environment import BuildEnvironment
from sphinx.locale import _, __
from sphinx.roles import XRefRole
from sphinx.util import logging
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import make_id, make_refnode
logger = logging.getLogger(__name__)
class JSObject(ObjectDescription):
"""
Description of a JavaScript object.
"""
#: If set to ``True`` this object is callable and a `desc_parameterlist` is
#: added
has_arguments = False
#: what is displayed right before the documentation entry
display_prefix = None # type: str
#: If ``allow_nesting`` is ``True``, the object prefixes will be accumulated
#: based on directive nesting
allow_nesting = False
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
"""Breaks down construct signatures
Parses out prefix and argument list from construct definition. The
namespace and class will be determined by the nesting of domain
directives.
"""
sig = sig.strip()
if '(' in sig and sig[-1:] == ')':
member, arglist = sig.split('(', 1)
member = member.strip()
arglist = arglist[:-1].strip()
else:
member = sig
arglist = None
# If construct is nested, prefix the current prefix
prefix = self.env.ref_context.get('js:object', None)
mod_name = self.env.ref_context.get('js:module')
name = member
try:
member_prefix, member_name = member.rsplit('.', 1)
except ValueError:
member_name = name
member_prefix = ''
finally:
name = member_name
if prefix and member_prefix:
prefix = '.'.join([prefix, member_prefix])
elif prefix is None and member_prefix:
prefix = member_prefix
fullname = name
if prefix:
fullname = '.'.join([prefix, name])
signode['module'] = mod_name
signode['object'] = prefix
signode['fullname'] = fullname
if self.display_prefix:
signode += addnodes.desc_annotation(self.display_prefix,
self.display_prefix)
if prefix:
signode += addnodes.desc_addname(prefix + '.', prefix + '.')
elif mod_name:
signode += addnodes.desc_addname(mod_name + '.', mod_name + '.')
signode += addnodes.desc_name(name, name)
if self.has_arguments:
if not arglist:
signode += addnodes.desc_parameterlist()
else:
_pseudo_parse_arglist(signode, arglist)
return fullname, prefix
def add_target_and_index(self, name_obj: Tuple[str, str], sig: str,
signode: desc_signature) -> None:
mod_name = self.env.ref_context.get('js:module')
fullname = (mod_name + '.' if mod_name else '') + name_obj[0]
node_id = make_id(self.env, self.state.document, '', fullname)
signode['ids'].append(node_id)
# Assign old styled node_id not to break old hyperlinks (if possible)
# Note: Will be removed in Sphinx-5.0 (RemovedInSphinx50Warning)
old_node_id = self.make_old_id(fullname)
if old_node_id not in self.state.document.ids and old_node_id not in signode['ids']:
signode['ids'].append(old_node_id)
self.state.document.note_explicit_target(signode)
domain = cast(JavaScriptDomain, self.env.get_domain('js'))
domain.note_object(fullname, self.objtype, node_id, location=signode)
indextext = self.get_index_text(mod_name, name_obj)
if indextext:
self.indexnode['entries'].append(('single', indextext, node_id, '', None))
def get_index_text(self, objectname: str, name_obj: Tuple[str, str]) -> str:
name, obj = name_obj
if self.objtype == 'function':
if not obj:
return _('%s() (built-in function)') % name
return _('%s() (%s method)') % (name, obj)
elif self.objtype == 'class':
return _('%s() (class)') % name
elif self.objtype == 'data':
return _('%s (global variable or constant)') % name
elif self.objtype == 'attribute':
return _('%s (%s attribute)') % (name, obj)
return ''
def before_content(self) -> None:
"""Handle object nesting before content
:py:class:`JSObject` represents JavaScript language constructs. For
constructs that are nestable, this method will build up a stack of the
nesting heirarchy so that it can be later de-nested correctly, in
:py:meth:`after_content`.
For constructs that aren't nestable, the stack is bypassed, and instead
only the most recent object is tracked. This object prefix name will be
removed with :py:meth:`after_content`.
The following keys are used in ``self.env.ref_context``:
js:objects
Stores the object prefix history. With each nested element, we
add the object prefix to this list. When we exit that object's
nesting level, :py:meth:`after_content` is triggered and the
prefix is removed from the end of the list.
js:object
Current object prefix. This should generally reflect the last
element in the prefix history
"""
prefix = None
if self.names:
(obj_name, obj_name_prefix) = self.names.pop()
prefix = obj_name_prefix.strip('.') if obj_name_prefix else None
if self.allow_nesting:
prefix = obj_name
if prefix:
self.env.ref_context['js:object'] = prefix
if self.allow_nesting:
objects = self.env.ref_context.setdefault('js:objects', [])
objects.append(prefix)
def after_content(self) -> None:
"""Handle object de-nesting after content
If this class is a nestable object, removing the last nested class prefix
ends further nesting in the object.
If this class is not a nestable object, the list of classes should not
be altered as we didn't affect the nesting levels in
:py:meth:`before_content`.
"""
objects = self.env.ref_context.setdefault('js:objects', [])
if self.allow_nesting:
try:
objects.pop()
except IndexError:
pass
self.env.ref_context['js:object'] = (objects[-1] if len(objects) > 0
else None)
def make_old_id(self, fullname: str) -> str:
"""Generate old styled node_id for JS objects.
.. note:: Old Styled node_id was used until Sphinx-3.0.
This will be removed in Sphinx-5.0.
"""
return fullname.replace('$', '_S_')
class JSCallable(JSObject):
"""Description of a JavaScript function, method or constructor."""
has_arguments = True
doc_field_types = [
TypedField('arguments', label=_('Arguments'),
names=('argument', 'arg', 'parameter', 'param'),
typerolename='func', typenames=('paramtype', 'type')),
GroupedField('errors', label=_('Throws'), rolename='err',
names=('throws', ),
can_collapse=True),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=_('Return type'), has_arg=False,
names=('rtype',)),
]
class JSConstructor(JSCallable):
"""Like a callable but with a different prefix."""
display_prefix = 'class '
allow_nesting = True
class JSModule(SphinxDirective):
"""
Directive to mark description of a new JavaScript module.
This directive specifies the module name that will be used by objects that
follow this directive.
Options
-------
noindex
If the ``noindex`` option is specified, no linkable elements will be
created, and the module won't be added to the global module index. This
is useful for splitting up the module definition across multiple
sections or files.
:param mod_name: Module name
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'noindex': directives.flag
}
def run(self) -> List[Node]:
mod_name = self.arguments[0].strip()
self.env.ref_context['js:module'] = mod_name
noindex = 'noindex' in self.options
ret = [] # type: List[Node]
if not noindex:
domain = cast(JavaScriptDomain, self.env.get_domain('js'))
node_id = make_id(self.env, self.state.document, 'module', mod_name)
domain.note_module(mod_name, node_id)
# Make a duplicate entry in 'objects' to facilitate searching for
# the module in JavaScriptDomain.find_obj()
domain.note_object(mod_name, 'module', node_id,
location=(self.env.docname, self.lineno))
target = nodes.target('', '', ids=[node_id], ismod=True)
# Assign old styled node_id not to break old hyperlinks (if possible)
# Note: Will be removed in Sphinx-5.0 (RemovedInSphinx50Warning)
old_node_id = self.make_old_id(mod_name)
if old_node_id not in self.state.document.ids and old_node_id not in target['ids']:
target['ids'].append(old_node_id)
self.state.document.note_explicit_target(target)
ret.append(target)
indextext = _('%s (module)') % mod_name
inode = addnodes.index(entries=[('single', indextext, node_id, '', None)])
ret.append(inode)
return ret
def make_old_id(self, modname: str) -> str:
"""Generate old styled node_id for JS modules.
.. note:: Old Styled node_id was used until Sphinx-3.0.
This will be removed in Sphinx-5.0.
"""
return 'module-' + modname
class JSXRefRole(XRefRole):
def process_link(self, env: BuildEnvironment, refnode: Element,
has_explicit_title: bool, title: str, target: str) -> Tuple[str, str]:
# basically what sphinx.domains.python.PyXRefRole does
refnode['js:object'] = env.ref_context.get('js:object')
refnode['js:module'] = env.ref_context.get('js:module')
if not has_explicit_title:
title = title.lstrip('.')
target = target.lstrip('~')
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1:]
if target[0:1] == '.':
target = target[1:]
refnode['refspecific'] = True
return title, target
class JavaScriptDomain(Domain):
"""JavaScript language domain."""
name = 'js'
label = 'JavaScript'
# if you add a new object type make sure to edit JSObject.get_index_string
object_types = {
'function': ObjType(_('function'), 'func'),
'method': ObjType(_('method'), 'meth'),
'class': ObjType(_('class'), 'class'),
'data': ObjType(_('data'), 'data'),
'attribute': ObjType(_('attribute'), 'attr'),
'module': ObjType(_('module'), 'mod'),
}
directives = {
'function': JSCallable,
'method': JSCallable,
'class': JSConstructor,
'data': JSObject,
'attribute': JSObject,
'module': JSModule,
}
roles = {
'func': JSXRefRole(fix_parens=True),
'meth': JSXRefRole(fix_parens=True),
'class': JSXRefRole(fix_parens=True),
'data': JSXRefRole(),
'attr': JSXRefRole(),
'mod': JSXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, node_id, objtype
'modules': {}, # modname -> docname, node_id
} # type: Dict[str, Dict[str, Tuple[str, str]]]
@property
def objects(self) -> Dict[str, Tuple[str, str, str]]:
return self.data.setdefault('objects', {}) # fullname -> docname, node_id, objtype
def note_object(self, fullname: str, objtype: str, node_id: str,
location: Any = None) -> None:
if fullname in self.objects:
docname = self.objects[fullname][0]
logger.warning(__('duplicate %s description of %s, other %s in %s'),
objtype, fullname, objtype, docname, location=location)
self.objects[fullname] = (self.env.docname, node_id, objtype)
@property
def modules(self) -> Dict[str, Tuple[str, str]]:
return self.data.setdefault('modules', {}) # modname -> docname, node_id
def note_module(self, modname: str, node_id: str) -> None:
self.modules[modname] = (self.env.docname, node_id)
def clear_doc(self, docname: str) -> None:
for fullname, (pkg_docname, node_id, _l) in list(self.objects.items()):
if pkg_docname == docname:
del self.objects[fullname]
for modname, (pkg_docname, node_id) in list(self.modules.items()):
if pkg_docname == docname:
del self.modules[modname]
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
# XXX check duplicates
for fullname, (fn, node_id, objtype) in otherdata['objects'].items():
if fn in docnames:
self.objects[fullname] = (fn, node_id, objtype)
for mod_name, (pkg_docname, node_id) in otherdata['modules'].items():
if pkg_docname in docnames:
self.modules[mod_name] = (pkg_docname, node_id)
def find_obj(self, env: BuildEnvironment, mod_name: str, prefix: str, name: str,
typ: str, searchorder: int = 0) -> Tuple[str, Tuple[str, str, str]]:
if name[-2:] == '()':
name = name[:-2]
searches = []
if mod_name and prefix:
searches.append('.'.join([mod_name, prefix, name]))
if mod_name:
searches.append('.'.join([mod_name, name]))
if prefix:
searches.append('.'.join([prefix, name]))
searches.append(name)
if searchorder == 0:
searches.reverse()
newname = None
for search_name in searches:
if search_name in self.objects:
newname = search_name
return newname, self.objects.get(newname)
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
typ: str, target: str, node: pending_xref, contnode: Element
) -> Element:
mod_name = node.get('js:module')
prefix = node.get('js:object')
searchorder = 1 if node.hasattr('refspecific') else 0
name, obj = self.find_obj(env, mod_name, prefix, target, typ, searchorder)
if not obj:
return None
return make_refnode(builder, fromdocname, obj[0], obj[1], contnode, name)
def resolve_any_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
target: str, node: pending_xref, contnode: Element
) -> List[Tuple[str, Element]]:
mod_name = node.get('js:module')
prefix = node.get('js:object')
name, obj = self.find_obj(env, mod_name, prefix, target, None, 1)
if not obj:
return []
return [('js:' + self.role_for_objtype(obj[2]),
make_refnode(builder, fromdocname, obj[0], obj[1], contnode, name))]
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
for refname, (docname, node_id, typ) in list(self.objects.items()):
yield refname, refname, typ, docname, node_id, 1
def get_full_qualified_name(self, node: Element) -> str:
modname = node.get('js:module')
prefix = node.get('js:object')
target = node.get('reftarget')
if target is None:
return None
else:
return '.'.join(filter(None, [modname, prefix, target]))
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_domain(JavaScriptDomain)
return {
'version': 'builtin',
'env_version': 2,
'parallel_read_safe': True,
'parallel_write_safe': True,
} | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/editor/plugins/nls/it/latinEntities.js | ({"le":"minore di o uguale a","prod":"prodotto n-ario\nsegno di prodotto","zwj":"congiuntore di larghezza zero","mdash":"trattino em","frasl":"barra obliqua di frazione","upsih":"simbolo della upsilon greca con uncino","prop":"proporzionale a","middot":"punto in mezzo\nvirgola georgiana\npunto centrale greco","hellip":"puntini di sospensione\ntre puntini di guida","eta":"lettera greca minuscola eta","iacute":"lettera latina i minuscola con accento acuto","yen":"simbolo dello yen\nsimbolo dello yuan","rlm":"segno da-destra-a-sinistra","macr":"macron\nmacron con spaziatura\nlinea orizzontale superiore\nbarra sovrapposta APL","ldquo":"virgolette doppie sinistre","Icirc":"lettera latina I maiuscola con accento circonflesso","OElig":"legatura latina OE maiuscola","hArr":"doppia freccia sinistra destra","eth":"lettera latina eth minuscola","divide":"segno di divisione","chi":"lettera greca minuscola chi","eacute":"lettera latina e minuscola con accento acuto","icirc":"lettera latina i minuscola con accento circonflesso","iexcl":"punto esclamativo capovolto","ETH":"lettera latina ETH maiuscola","acute":"accento acuto\nacuto con spaziatura","crarr":"freccia verso il basso con angolo a sinistra\nritorno a capo","mu":"lettera greca minuscola mu","AElig":"lettera latina AE maiuscola\nlegatura latina maiuscola AE","aacute":"lettera latina a minuscola con accento acuto","lambda":"lettera greca minuscola lambda","THORN":"lettera latina THORN maiuscola","asymp":"quasi uguale a\nasintotico a","fnof":"f latina minuscola con uncino\nfunzione\nfiorino","lang":"parentesi angolare sinistra","cup":"unione\ncoppa","ne":"non uguale a","Sigma":"lettera greca maiuscola sigma","oelig":"legatura latina oe minuscola","cent":"simbolo del centesimo","ni":"contiene come membro","dagger":"croce latina","permil":"segno di per mille","Omicron":"lettera greca maiuscola omicron","sigma":"lettera greca minuscola sigma","euro":"simbolo dell'euro","Yacute":"lettera latina Y maiuscola con accento acuto","thorn":"lettera latina thorn minuscola","lceil":"soffitto sinistro\ngradino in su APL","Ograve":"lettera latina O maiuscola con accento grave","rarr":"freccia verso destra","nu":"lettera greca minuscola nu","emsp":"spazio em","Theta":"lettera greca maiuscola theta","lArr":"doppia freccia verso sinistra","tau":"lettera greca minuscola tau","aelig":"lettera latina ae minuscola\nlegatura latina minuscola ae","ccedil":"lettera latina c minuscola con cediglia","Ntilde":"lettera latina N maiuscola con tilde","cong":"approssimativamente uguale a","Uacute":"lettera latina U maiuscola con accento acuto","theta":"lettera greca minuscola theta","darr":"freccia verso il basso","Uuml":"lettera latina U maiuscola con dieresi","bdquo":"virgolette doppie in basso a destra","Aring":"lettera latina A maiuscola con sormontata da anello\nlettera latina A maiuscola anellata","sigmaf":"lettera greca minuscola sigma finale","pound":"simbolo della sterlina","uArr":"doppia freccia verso l'alto","sub":"sottoinsieme di","aring":"lettera latina a minuscola sormontata da anello\nlettera latina a minuscola anellata","sdot":"operatore punto","thinsp":"spazio finissimo","or":"o logico\nvi","Eacute":"lettera latina E maiuscola con accento acuto","shy":"trattino debole\ntrattino discrezionale","curren":"simbolo di valuta","loz":"losanga","not":"simbolo di negazione","tilde":"tilde piccola","sum":"sommatoria n-aria","spades":"seme di picche nero","Psi":"lettera greca maiuscola psi","ndash":"trattino en","sup":"sovrainsieme di","atilde":"lettera latina a minuscola con tilde","clubs":"seme di fiori nero\ntrifoglio","uuml":"lettera latina u minuscola con dieresi","Aacute":"lettera latina A maiuscola con accento acuto","rsaquo":"virgoletta angolare singola rivolta a destra","otimes":"per cerchiato\nprodotto vettoriale","lfloor":"pavimento sinistro\ngradino in giù APL","zwnj":"non-congiuntore di larghezza zero","sim":"operatore tilde\nvaria con\nsimile a","Iota":"lettera greca maiuscola iota","Iacute":"lettera latina I maiuscola con accento acuto","pi":"lettera greca minuscola pi","ordf":"indicatore ordinale femminile","frac12":"frazione semplice un mezzo\nfrazione un mezzo","frac14":"frazione semplice un quarto\nfrazione un quarto","alefsym":"simbolo alef\nprimo cardinale transfinito","bull":"pallino\npiccolo cerchio nero","deg":"simbolo dei gradi","ordm":"indicatore ordinale maschile","epsilon":"lettera greca minuscola epsilon","equiv":"identico a","Dagger":"croce latina doppia","brvbar":"barra interrotta\nbarra verticale interrotta","harr":"freccia sinistra destra","ugrave":"lettera latina u minuscola con accento grave","oslash":"lettera latina o minuscola con barra obliqua\nlettera latina o minuscola barrata","Yuml":"lettera latina Y maiuscola con dieresi","hearts":"seme di cuori nero\ninnamorato","Xi":"lettera greca maiuscola xi","Prime":"doppio apice\nsecondi\npollici","iota":"lettera greca minuscola iota","Ccedil":"lettera latina C maiuscola con cediglia","Lambda":"lettera greca maiuscola lambda","raquo":"virgolette doppie angolari indicanti a destra\n guillemet indicante a destra","Phi":"lettera greca maiuscola phi","prime":"apice\nminuti\npiedi","nsub":"non un sottoinsieme di","copy":"simbolo del copyright","yuml":"lettera latina y minuscola con dieresi","Rho":"lettera greca maiuscola rho","Ucirc":"lettera latina U maiuscola con accento circonflesso","Kappa":"lettera greca maiuscola kappa","ucirc":"lettera latina u minuscola con accento circonflesso","sbquo":"virgoletta singola in basso a destra","igrave":"lettera latina i minuscola con accento grave","reg":"simbolo di registrazione\nsimbolo di marchio registrato","infin":"infinito","iquest":"punto interrogativo invertito\npunto interrogativo rovesciato","circ":"lettera modificatrice accento circonflesso","kappa":"lettera greca minuscola kappa","lrm":"segno da-sinistra-a-destra","Atilde":"lettera latina A maiuscola con tilde","larr":"freccia verso sinistra","frac34":"frazione semplice tre quarti\nfrazione tre quarti","oacute":"lettera latina o minuscola con accento acuto","rsquo":"virgoletta destra singola","egrave":"lettera latina e minuscola con accento grave","oline":"linea sopra\noverscore con spazio","Mu":"lettera greca maiuscola mu","exist":"esiste","cap":"intersezione\nberretto","and":"and logico\ncuneo","Ouml":"lettera latina O maiuscola con dieresi","agrave":"lettera latina a minuscola con accento grave\nlettera latina a minuscola grave","uarr":"freccia verso l'alto","ang":"angolo","Zeta":"lettera greca maiuscola zeta","scaron":"lettera latina s minuscola con caron","Gamma":"lettera greca maiuscola gamma","isin":"elemento di","Auml":"lettera latina A maiuscola con dieresi","empty":"insieme vuoto\ninsieme nullo\ndiametro","gamma":"lettera greca minuscola gamma","para":"simbolo pilcrow\nsimbolo di paragrafo","ge":"maggiore di o uguale a","psi":"lettera greca minuscola psi","Alpha":"lettera greca maiuscola alpha","Nu":"lettera greca maiuscola nu","ouml":"lettera latina o minuscola con dieresi","zeta":"lettera greca minuscola zeta","alpha":"lettera greca maiuscola alpha","part":"differenziale parziale","auml":"lettera latina a minuscola con dieresi","Ugrave":"lettera latina U maiuscola con accento grave","Oslash":"lettera latina O maiuscola con barra obliqua\nlettera latina O maiuscola barrata","Epsilon":"lettera greca maiuscola epsilon","int":"integrale","Omega":"lettera greca maiuscola omega","perp":"puntina in su\nortogonale a\nperpendicolare","uml":"dieresi\ndieresi con spazio","upsilon":"lettera greca minuscola upsilon","lowast":"operatore asterisco","omega":"lettera greca minuscola omega","otilde":"lettera latina o minuscola con tilde","Egrave":"lettera latina E maiuscola con accento grave","phi":"lettera greca minuscola phi","ensp":"spazio en","Euml":"lettera latina E maiuscola con dieresi","cedil":"cediglia\ncediglia con spazio","laquo":"virgolette doppie angolari indicanti a sinistra\nguillemet indicante a sinistra","forall":"per tutti","thetasym":"simbolo della lettera greca minuscola theta","Agrave":"lettera latina A maiuscola con accento grave\nlettera latina A maiuscola grave","szlig":"lettera latina s minuscola sonora\nesse-zeta","Pi":"lettera greca maiuscola pi","rho":"lettera greca minuscola rho","trade":"simbolo di marchio commerciale","Igrave":"lettera latina I maiuscola con accento grave","minus":"segno meno","Beta":"lettera greca maiuscola beta","Ocirc":"lettera latina O maiuscola con accento circonflesso","rdquo":"virgolette doppie destre","Eta":"lettera greca maiuscola eta","rfloor":"pavimento destro","Oacute":"lettera latina O maiuscola con accento acuto","euml":"lettera latina e minuscola con dieresi","oplus":"più cerchiato\nsomma diretta","ocirc":"lettera latina o minuscola con accento circonflesso","radic":"radice quadrata\nsegno di radice","Chi":"lettera greca maiuscola chi","notin":"non un elemento di","sect":"simbolo di sezione","Acirc":"lettera latina A maiuscola con accento circonflesso","lsquo":"virgoletta sinistra singola","beta":"lettera greca minuscola beta","piv":"simbolo di pi greco","sup1":"esponente uno\nnumero uno in esponente","Scaron":"lettera latina S maiuscola con caron","sup2":"esponente due\nnumero due in esponente\nal quadrato","acirc":"lettera latina a minuscola con accento circonflesso","sube":"sottoinsieme di o uguale a","sup3":"esponente tre\nnumero tre in esponente\nal cubo","real":"R maiuscola gotica\nsimbolo di elemento reale","Iuml":"lettera latina I maiuscola con dieresi","rang":"parentesi angolare destra","lsaquo":"virgoletta angolare singola rivolta a sinistra","nabla":"nabla\ndifferenza retrograda","omicron":"lettera greca minuscola omicron","there4":"pertanto","plusmn":"segno più-meno\nsegno più o meno","rceil":"soffitto destro","micro":"simbolo di micro","rArr":"doppia freccia verso destra","Delta":"lettera greca maiuscola delta","iuml":"lettera latina i minuscola con dieresi","Tau":"lettera greca maiuscola tau","times":"segno di moltiplicazione","yacute":"lettera latina y minuscola con accento acuto","ograve":"lettera latina o minuscola con accento grave","delta":"lettera greca minuscola delta","Ecirc":"lettera latina E maiuscola con accento circonflesso","dArr":"doppia freccia verso il basso","ntilde":"lettera latina n minuscola con tilde","diams":"seme di quadri nero","uacute":"lettera latina u minuscola con accento acuto","Otilde":"lettera latina O maiuscola con tilde","ecirc":"lettera latina e minuscola con accento circonflesso","Upsilon":"lettera greca maiuscola upsilon","image":"I maiuscola gotica\nelemento immaginario","supe":"sovrainsieme di o uguale a","xi":"lettera greca minuscola xi","weierp":"P maiuscola scritta a mano\ninsieme potenza\np di Weierstrass"}) | PypiClean |
/GBT_parser-1.0.3-py3-none-any.whl/cantools/database/can/formats/arxml/message_specifics.py | from typing import List, Optional
from .end_to_end_properties import AutosarEnd2EndProperties
from .secoc_properties import AutosarSecOCProperties
class AutosarMessageSpecifics:
"""This class collects all AUTOSAR specific information of a CAN message
This means useful information about CAN messages which is provided
by ARXML files, but is specific to AUTOSAR.
"""
def __init__(self) -> None:
self._pdu_paths: List[str] = []
self._is_nm = False
self._is_general_purpose = False
self._secoc: Optional[AutosarSecOCProperties] = None
self._e2e: Optional[AutosarEnd2EndProperties] = None
self._signal_group = None
@property
def pdu_paths(self):
"""The ARXML paths of all PDUs featured by this message.
For the vast majority of messages, this list only has a single
entry. Messages with multiplexers and container frames are
different, though.
"""
return self._pdu_paths
@property
def is_nm(self):
"""True iff the message is used for network management
"""
return self._is_nm
@property
def is_general_purpose(self):
"""True iff the message is not used for signal-based communication
This comprises messages used for diagnostic and calibration
purpuses, e.g. messages used for the ISO-TP or XCP protocols.
"""
return self._is_general_purpose
@property
def is_secured(self):
"""True iff the message integrity is secured using SecOC
"""
return self._secoc is not None
@property
def secoc(self):
"""The properties required to implement secured on-board communication
"""
return self._secoc
@property
def e2e(self) -> Optional['AutosarEnd2EndProperties']:
"""Returns the end-to-end protection properties for the message"""
return self._e2e
@e2e.setter
def e2e(self, value: Optional['AutosarEnd2EndProperties']) -> None:
self._e2e = value | PypiClean |
/FastFlask-1.2.32-py3-none-any.whl/click/formatting.py | import typing as t
from contextlib import contextmanager
from gettext import gettext as _
from ._compat import term_len
from .parser import split_opt
# Can force a width. This is used by the test system
FORCED_WIDTH: t.Optional[int] = None
def measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]:
widths: t.Dict[int, int] = {}
for row in rows:
for idx, col in enumerate(row):
widths[idx] = max(widths.get(idx, 0), term_len(col))
return tuple(y for x, y in sorted(widths.items()))
def iter_rows(
rows: t.Iterable[t.Tuple[str, str]], col_count: int
) -> t.Iterator[t.Tuple[str, ...]]:
for row in rows:
yield row + ("",) * (col_count - len(row))
def wrap_text(
text: str,
width: int = 78,
initial_indent: str = "",
subsequent_indent: str = "",
preserve_paragraphs: bool = False,
) -> str:
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
wrapper = TextWrapper(
width,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False,
)
if not preserve_paragraphs:
return wrapper.fill(text)
p: t.List[t.Tuple[int, bool, str]] = []
buf: t.List[str] = []
indent = None
def _flush_par() -> None:
if not buf:
return
if buf[0].strip() == "\b":
p.append((indent or 0, True, "\n".join(buf[1:])))
else:
p.append((indent or 0, False, " ".join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None
else:
if indent is None:
orig_len = term_len(line)
line = line.lstrip()
indent = orig_len - term_len(line)
buf.append(line)
_flush_par()
rv = []
for indent, raw, text in p:
with wrapper.extra_indent(" " * indent):
if raw:
rv.append(wrapper.indent_only(text))
else:
rv.append(wrapper.fill(text))
return "\n\n".join(rv)
class HelpFormatter:
"""This class helps with formatting text-based help pages. It's
usually just needed for very special internal cases, but it's also
exposed so that developers can write their own fancy outputs.
At present, it always writes into memory.
:param indent_increment: the additional increment for each level.
:param width: the width for the text. This defaults to the terminal
width clamped to a maximum of 78.
"""
def __init__(
self,
indent_increment: int = 2,
width: t.Optional[int] = None,
max_width: t.Optional[int] = None,
) -> None:
import shutil
self.indent_increment = indent_increment
if max_width is None:
max_width = 80
if width is None:
width = FORCED_WIDTH
if width is None:
width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)
self.width = width
self.current_indent = 0
self.buffer: t.List[str] = []
def write(self, string: str) -> None:
"""Writes a unicode string into the internal buffer."""
self.buffer.append(string)
def indent(self) -> None:
"""Increases the indentation."""
self.current_indent += self.indent_increment
def dedent(self) -> None:
"""Decreases the indentation."""
self.current_indent -= self.indent_increment
def write_usage(
self, prog: str, args: str = "", prefix: t.Optional[str] = None
) -> None:
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: The prefix for the first line. Defaults to
``"Usage: "``.
"""
if prefix is None:
prefix = f"{_('Usage:')} "
usage_prefix = f"{prefix:>{self.current_indent}}{prog} "
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = " " * term_len(usage_prefix)
self.write(
wrap_text(
args,
text_width,
initial_indent=usage_prefix,
subsequent_indent=indent,
)
)
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write("\n")
indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
self.write(
wrap_text(
args, text_width, initial_indent=indent, subsequent_indent=indent
)
)
self.write("\n")
def write_heading(self, heading: str) -> None:
"""Writes a heading into the buffer."""
self.write(f"{'':>{self.current_indent}}{heading}:\n")
def write_paragraph(self) -> None:
"""Writes a paragraph into the buffer."""
if self.buffer:
self.write("\n")
def write_text(self, text: str) -> None:
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
indent = " " * self.current_indent
self.write(
wrap_text(
text,
self.width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True,
)
)
self.write("\n")
def write_dl(
self,
rows: t.Sequence[t.Tuple[str, str]],
col_max: int = 30,
col_spacing: int = 2,
) -> None:
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write(f"{'':>{self.current_indent}}{first}")
if not second:
self.write("\n")
continue
if term_len(first) <= first_col - col_spacing:
self.write(" " * (first_col - term_len(first)))
else:
self.write("\n")
self.write(" " * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
lines = wrapped_text.splitlines()
if lines:
self.write(f"{lines[0]}\n")
for line in lines[1:]:
self.write(f"{'':>{first_col + self.current_indent}}{line}\n")
else:
self.write("\n")
@contextmanager
def section(self, name: str) -> t.Iterator[None]:
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
@contextmanager
def indentation(self) -> t.Iterator[None]:
"""A context manager that increases the indentation."""
self.indent()
try:
yield
finally:
self.dedent()
def getvalue(self) -> str:
"""Returns the buffer contents."""
return "".join(self.buffer)
def join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]:
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == "/":
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
return ", ".join(x[1] for x in rv), any_prefix_is_slash | PypiClean |
/Bluebook-0.0.1.tar.gz/Bluebook-0.0.1/pylot/component/static/pylot/vendor/mdeditor/bower_components/codemirror/mode/q/q.js | CodeMirror.defineMode("q",function(config){
var indentUnit=config.indentUnit,
curPunc,
keywords=buildRE(["abs","acos","aj","aj0","all","and","any","asc","asin","asof","atan","attr","avg","avgs","bin","by","ceiling","cols","cor","cos","count","cov","cross","csv","cut","delete","deltas","desc","dev","differ","distinct","div","do","each","ej","enlist","eval","except","exec","exit","exp","fby","fills","first","fkeys","flip","floor","from","get","getenv","group","gtime","hclose","hcount","hdel","hopen","hsym","iasc","idesc","if","ij","in","insert","inter","inv","key","keys","last","like","list","lj","load","log","lower","lsq","ltime","ltrim","mavg","max","maxs","mcount","md5","mdev","med","meta","min","mins","mmax","mmin","mmu","mod","msum","neg","next","not","null","or","over","parse","peach","pj","plist","prd","prds","prev","prior","rand","rank","ratios","raze","read0","read1","reciprocal","reverse","rload","rotate","rsave","rtrim","save","scan","select","set","setenv","show","signum","sin","sqrt","ss","ssr","string","sublist","sum","sums","sv","system","tables","tan","til","trim","txf","type","uj","ungroup","union","update","upper","upsert","value","var","view","views","vs","wavg","where","where","while","within","wj","wj1","wsum","xasc","xbar","xcol","xcols","xdesc","xexp","xgroup","xkey","xlog","xprev","xrank"]),
E=/[|/&^!+:\\\-*%$=~#;@><,?_\'\"\[\(\]\)\s{}]/;
function buildRE(w){return new RegExp("^("+w.join("|")+")$");}
function tokenBase(stream,state){
var sol=stream.sol(),c=stream.next();
curPunc=null;
if(sol)
if(c=="/")
return(state.tokenize=tokenLineComment)(stream,state);
else if(c=="\\"){
if(stream.eol()||/\s/.test(stream.peek()))
return stream.skipToEnd(),/^\\\s*$/.test(stream.current())?(state.tokenize=tokenCommentToEOF)(stream, state):state.tokenize=tokenBase,"comment";
else
return state.tokenize=tokenBase,"builtin";
}
if(/\s/.test(c))
return stream.peek()=="/"?(stream.skipToEnd(),"comment"):"whitespace";
if(c=='"')
return(state.tokenize=tokenString)(stream,state);
if(c=='`')
return stream.eatWhile(/[A-Z|a-z|\d|_|:|\/|\.]/),"symbol";
if(("."==c&&/\d/.test(stream.peek()))||/\d/.test(c)){
var t=null;
stream.backUp(1);
if(stream.match(/^\d{4}\.\d{2}(m|\.\d{2}([D|T](\d{2}(:\d{2}(:\d{2}(\.\d{1,9})?)?)?)?)?)/)
|| stream.match(/^\d+D(\d{2}(:\d{2}(:\d{2}(\.\d{1,9})?)?)?)/)
|| stream.match(/^\d{2}:\d{2}(:\d{2}(\.\d{1,9})?)?/)
|| stream.match(/^\d+[ptuv]{1}/))
t="temporal";
else if(stream.match(/^0[NwW]{1}/)
|| stream.match(/^0x[\d|a-f|A-F]*/)
|| stream.match(/^[0|1]+[b]{1}/)
|| stream.match(/^\d+[chijn]{1}/)
|| stream.match(/-?\d*(\.\d*)?(e[+\-]?\d+)?(e|f)?/))
t="number";
return(t&&(!(c=stream.peek())||E.test(c)))?t:(stream.next(),"error");
}
if(/[A-Z|a-z]|\./.test(c))
return stream.eatWhile(/[A-Z|a-z|\.|_|\d]/),keywords.test(stream.current())?"keyword":"variable";
if(/[|/&^!+:\\\-*%$=~#;@><\.,?_\']/.test(c))
return null;
if(/[{}\(\[\]\)]/.test(c))
return null;
return"error";
}
function tokenLineComment(stream,state){
return stream.skipToEnd(),/\/\s*$/.test(stream.current())?(state.tokenize=tokenBlockComment)(stream,state):(state.tokenize=tokenBase),"comment";
}
function tokenBlockComment(stream,state){
var f=stream.sol()&&stream.peek()=="\\";
stream.skipToEnd();
if(f&&/^\\\s*$/.test(stream.current()))
state.tokenize=tokenBase;
return"comment";
}
function tokenCommentToEOF(stream){return stream.skipToEnd(),"comment";}
function tokenString(stream,state){
var escaped=false,next,end=false;
while((next=stream.next())){
if(next=="\""&&!escaped){end=true;break;}
escaped=!escaped&&next=="\\";
}
if(end)state.tokenize=tokenBase;
return"string";
}
function pushContext(state,type,col){state.context={prev:state.context,indent:state.indent,col:col,type:type};}
function popContext(state){state.indent=state.context.indent;state.context=state.context.prev;}
return{
startState:function(){
return{tokenize:tokenBase,
context:null,
indent:0,
col:0};
},
token:function(stream,state){
if(stream.sol()){
if(state.context&&state.context.align==null)
state.context.align=false;
state.indent=stream.indentation();
}
//if (stream.eatSpace()) return null;
var style=state.tokenize(stream,state);
if(style!="comment"&&state.context&&state.context.align==null&&state.context.type!="pattern"){
state.context.align=true;
}
if(curPunc=="(")pushContext(state,")",stream.column());
else if(curPunc=="[")pushContext(state,"]",stream.column());
else if(curPunc=="{")pushContext(state,"}",stream.column());
else if(/[\]\}\)]/.test(curPunc)){
while(state.context&&state.context.type=="pattern")popContext(state);
if(state.context&&curPunc==state.context.type)popContext(state);
}
else if(curPunc=="."&&state.context&&state.context.type=="pattern")popContext(state);
else if(/atom|string|variable/.test(style)&&state.context){
if(/[\}\]]/.test(state.context.type))
pushContext(state,"pattern",stream.column());
else if(state.context.type=="pattern"&&!state.context.align){
state.context.align=true;
state.context.col=stream.column();
}
}
return style;
},
indent:function(state,textAfter){
var firstChar=textAfter&&textAfter.charAt(0);
var context=state.context;
if(/[\]\}]/.test(firstChar))
while (context&&context.type=="pattern")context=context.prev;
var closing=context&&firstChar==context.type;
if(!context)
return 0;
else if(context.type=="pattern")
return context.col;
else if(context.align)
return context.col+(closing?0:1);
else
return context.indent+(closing?0:indentUnit);
}
};
});
CodeMirror.defineMIME("text/x-q","q"); | PypiClean |
/MDCatch-2.5-py3-none-any.whl/mdcatch/parser.py |
import os
import math
import time
from glob import iglob
from .config import *
from .utils import parseXml, parseMrc, parseTif, parseMdoc
class Parser:
""" Main parser class. """
def __init__(self):
# set default values
self.mdPath = METADATA_PATH
self.prjPath = PROJECT_PATH
self.software = DEF_SOFTWARE
self.user = None
self.fn = None
self.pipeline = DEF_PIPELINE
self.picker = DEF_PICKER
self.pickerModel = None
self.symmetry = DEF_SYMMETRY
self.size = DEF_PARTICLE_SIZE
self.mode = 'SPA'
self.acqDict = {
'Mode': 'Linear',
'NumSubFrames': '0',
'Dose': '0',
'OpticalGroup': 'opticsGroup1',
'PhasePlateUsed': 'false',
'GainReference': 'None',
'DefectFile': 'None',
'MTF': 'None'
}
def guessFn(self, prog="EPU"):
""" Return the first matching filename. """
regex = PATTERN_EPU if prog == "EPU" else PATTERN_SEM
print("\nUsing regex: ", regex)
files = iglob(os.path.join(self.mdPath, regex))
img = next(files, None)
return img
def parseMetadata(self, fn):
""" Parse metadata file and return updated acqDict. """
if fn.endswith("xml"):
acqDict = parseXml(fn)
elif fn.endswith("tif") or fn.endswith("tiff"):
acqDict = parseTif(fn)
elif fn.endswith("mdoc"):
acqDict = parseMdoc(fn)
elif fn.endswith("mrc") or fn.endswith("mrcs"):
acqDict = parseMrc(fn)
else:
raise Exception("Metadata format not recognized.")
self.acqDict.update(acqDict)
def calcDose(self):
""" Calculate dose rate per unbinned px per s. """
numFr = int(self.acqDict['NumSubFrames'])
dose_total = float(self.acqDict['Dose']) # e/A^2
exp = float(self.acqDict['ExposureTime']) # s
if self.acqDict['Mode'] == 'Super-resolution':
pix = 2 * float(self.acqDict['PixelSpacing']) / int(self.acqDict['Binning']) # A
else:
pix = float(self.acqDict['PixelSpacing']) / int(self.acqDict['Binning']) # A
if numFr: # not 0
dose_per_frame = dose_total / numFr # e/A^2/frame
else:
dose_per_frame = 0
dose_on_camera = dose_total * math.pow(pix, 2) / exp # e/unbinned_px/s
self.acqDict['DosePerFrame'] = str(dose_per_frame)
self.acqDict['DoseOnCamera'] = str(dose_on_camera)
def calcBox(self):
""" Calculate box, mask, downsample. """
size = self.acqDict['PtclSize']
angpix = float(self.acqDict['PixelSpacing'])
if self.acqDict['Mode'] == 'Super-resolution' and self.acqDict['Binning'] == '1':
# since we always bin by 2 in mc if using super-res and bin 1
angpix *= 2
ptclSizePx = float(size) / angpix
if self.mode == 'SPA':
# use +10% for mask size
self.acqDict['MaskSize'] = str(math.ceil(1.1 * ptclSizePx))
# use +50% for box size, make it even
boxSize = 1.5 * ptclSizePx
self.acqDict['BoxSize'] = str(math.ceil(boxSize / 2.) * 2)
elif self.mode == 'Helical':
# ptclSizePx is filament width
# use +20% for tube diameter
self.acqDict['TubeDiam'] = str(math.ceil(1.2 * float(size)))
# use mask size = 90% box size
self.acqDict['MaskSize'] = str(math.ceil(1.62 * ptclSizePx))
# use box size = 1.5x tube diam, make it even
boxSize = 1.8 * ptclSizePx
self.acqDict['BoxSize'] = str(math.ceil(boxSize / 2.) * 2)
# from relion_it.py script
# Authors: Sjors H.W. Scheres, Takanori Nakane & Colin M. Palmer
for box in (48, 64, 96, 128, 160, 192, 256, 288, 300, 320,
360, 384, 400, 420, 450, 480, 512, 640, 768,
896, 1024):
# Don't go larger than the original box
if box > boxSize:
self.acqDict['BoxSizeSmall'] = str(boxSize)
break
# If Nyquist freq. is better than 7.5 A, use this
# downscaled box, otherwise continue to next size up
small_box_angpix = angpix * boxSize / box
if small_box_angpix < 3.75:
self.acqDict['BoxSizeSmall'] = str(box)
break
def guessDataDir(self, testmode=False):
""" Guess folder name with movies, gain and defects files. """
movieDir, gainFn, defFn = 'None', 'None', 'None'
camera = self.acqDict['Detector']
scopeID = self.acqDict['MicroscopeID']
voltage = self.acqDict['Voltage']
# get MTF file
if camera == 'EF-CCD':
model = SCOPE_DICT[scopeID][3]
if model is not None:
self.acqDict['MTF'] = MTF_DICT[model] % voltage
else:
model = SCOPE_DICT[scopeID][2]
if self.acqDict['Mode'] == 'Linear':
self.acqDict['MTF'] = MTF_DICT['%s-linear' % model] % voltage
else:
self.acqDict['MTF'] = MTF_DICT['%s-count' % model] % voltage
# update with real camera name
self.acqDict['Detector'] = model
if self.software == 'EPU':
p1 = MOVIE_PATH_DICT[camera] % (SCOPE_DICT[scopeID][0], model)
session = os.path.basename(self.mdPath)
if camera == 'EF-CCD':
movieDir = os.path.join(p1, "DoseFractions", session, EPU_MOVIES_DICT[model])
movieBaseDir = os.path.join(p1, "DoseFractions", session)
gainFiles = iglob(os.path.join(os.path.dirname(movieDir), GAIN_DICT[model]))
gainFn = next(gainFiles, 'None')
else:
movieDir = os.path.join(p1, session, EPU_MOVIES_DICT[model])
movieBaseDir = os.path.join(p1, session)
# Falcon 4 EER gain reference
if model == "Falcon4":
if self.acqDict['Mode'] == "EER":
gainFn = os.path.join(GAIN_DICT[model], self.acqDict['GainReference'])
else: # MRC
if not EPU_MOVIES_DICT[model].endswith(".mrc"):
movieDir = movieDir.replace("_EER.eer", ".mrc")
if not os.path.exists(movieBaseDir) and not testmode:
raise FileNotFoundError("Movie folder %s does not exist!" % movieBaseDir)
else: # SerialEM
movieDir = os.path.join(self.mdPath, PATTERN_SEM_MOVIES)
gainFn = os.path.join(self.mdPath, self.acqDict['GainReference'])
defFn = os.path.join(self.mdPath, self.acqDict['DefectFile'])
# populate dict
self.acqDict.update({
'Software': self.software,
'PrjPath': self.prjPath,
'MoviePath': movieDir,
'PickerModel': self.pickerModel
})
if os.path.exists(gainFn):
self.acqDict['GainReference'] = gainFn
if os.path.exists(defFn):
self.acqDict['DefectFile'] = defFn | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/summernote/lang/summernote-fr-FR.min.js | !function(e,r){if("object"==typeof exports&&"object"==typeof module)module.exports=r();else if("function"==typeof define&&define.amd)define([],r);else{var a=r();for(var i in a)("object"==typeof exports?exports:e)[i]=a[i]}}(self,(function(){return(e=jQuery).extend(e.summernote.lang,{"fr-FR":{font:{bold:"Gras",italic:"Italique",underline:"Souligné",clear:"Effacer la mise en forme",height:"Interligne",name:"Famille de police",strikethrough:"Barré",superscript:"Exposant",subscript:"Indice",size:"Taille de police"},image:{image:"Image",insert:"Insérer une image",resizeFull:"Taille originale",resizeHalf:"Redimensionner à 50 %",resizeQuarter:"Redimensionner à 25 %",floatLeft:"Aligné à gauche",floatRight:"Aligné à droite",floatNone:"Pas d'alignement",shapeRounded:"Forme: Rectangle arrondi",shapeCircle:"Forme: Cercle",shapeThumbnail:"Forme: Vignette",shapeNone:"Forme: Aucune",dragImageHere:"Faites glisser une image ou un texte dans ce cadre",dropImage:"Lachez l'image ou le texte",selectFromFiles:"Choisir un fichier",maximumFileSize:"Taille de fichier maximale",maximumFileSizeError:"Taille maximale du fichier dépassée",url:"URL de l'image",remove:"Supprimer l'image",original:"Original"},video:{video:"Vidéo",videoLink:"Lien vidéo",insert:"Insérer une vidéo",url:"URL de la vidéo",providers:"(YouTube, Google Drive, Vimeo, Vine, Instagram, DailyMotion or Youku)"},link:{link:"Lien",insert:"Insérer un lien",unlink:"Supprimer un lien",edit:"Modifier",textToDisplay:"Texte à afficher",url:"URL du lien",openInNewWindow:"Ouvrir dans une nouvelle fenêtre",useProtocol:"Utiliser le protocole par défaut"},table:{table:"Tableau",addRowAbove:"Ajouter une ligne au-dessus",addRowBelow:"Ajouter une ligne en dessous",addColLeft:"Ajouter une colonne à gauche",addColRight:"Ajouter une colonne à droite",delRow:"Supprimer la ligne",delCol:"Supprimer la colonne",delTable:"Supprimer le tableau"},hr:{insert:"Insérer une ligne horizontale"},style:{style:"Style",p:"Normal",blockquote:"Citation",pre:"Code source",h1:"Titre 1",h2:"Titre 2",h3:"Titre 3",h4:"Titre 4",h5:"Titre 5",h6:"Titre 6"},lists:{unordered:"Liste à puces",ordered:"Liste numérotée"},options:{help:"Aide",fullscreen:"Plein écran",codeview:"Afficher le code HTML"},paragraph:{paragraph:"Paragraphe",outdent:"Diminuer le retrait",indent:"Augmenter le retrait",left:"Aligner à gauche",center:"Centrer",right:"Aligner à droite",justify:"Justifier"},color:{recent:"Dernière couleur sélectionnée",more:"Plus de couleurs",background:"Couleur de fond",foreground:"Couleur de police",transparent:"Transparent",setTransparent:"Définir la transparence",reset:"Restaurer",resetToDefault:"Restaurer la couleur par défaut"},shortcut:{shortcuts:"Raccourcis",close:"Fermer",textFormatting:"Mise en forme du texte",action:"Action",paragraphFormatting:"Mise en forme des paragraphes",documentStyle:"Style du document",extraKeys:"Touches supplémentaires"},help:{insertParagraph:"Insérer paragraphe",undo:"Défaire la dernière commande",redo:"Refaire la dernière commande",tab:"Tabulation",untab:"Tabulation arrière",bold:"Mettre en caractère gras",italic:"Mettre en italique",underline:"Mettre en souligné",strikethrough:"Mettre en texte barré",removeFormat:"Nettoyer les styles",justifyLeft:"Aligner à gauche",justifyCenter:"Centrer",justifyRight:"Aligner à droite",justifyFull:"Justifier à gauche et à droite",insertUnorderedList:"Basculer liste à puces",insertOrderedList:"Basculer liste ordonnée",outdent:"Diminuer le retrait du paragraphe",indent:"Augmenter le retrait du paragraphe",formatPara:"Changer le paragraphe en cours en normal (P)",formatH1:"Changer le paragraphe en cours en entête H1",formatH2:"Changer le paragraphe en cours en entête H2",formatH3:"Changer le paragraphe en cours en entête H3",formatH4:"Changer le paragraphe en cours en entête H4",formatH5:"Changer le paragraphe en cours en entête H5",formatH6:"Changer le paragraphe en cours en entête H6",insertHorizontalRule:"Insérer séparation horizontale","linkDialog.show":"Afficher fenêtre d'hyperlien"},history:{undo:"Annuler la dernière action",redo:"Restaurer la dernière action annulée"},specialChar:{specialChar:"Caractères spéciaux",select:"Choisir des caractères spéciaux"}}}),{};var e})); | PypiClean |
/chatglm6bpkg-0.0.1.tar.gz/chatglm6bpkg-0.0.1/ptuning/main.py | # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
import logging
import os
import sys
import json
import numpy as np
from datasets import load_dataset
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from ChatGLM6Bpkg.ptuning.trainer_seq2seq import Seq2SeqTrainer
from ChatGLM6Bpkg.ptuning.arguments import ModelArguments, DataTrainingArguments
logger = logging.getLogger(__name__)
def ptuning(**args):
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
model_args, data_args, training_args = parser.parse_dict(args)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
# datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Load dataset
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# Load pretrained model and tokenizer
config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
config.pre_seq_len = model_args.pre_seq_len
config.prefix_projection = model_args.prefix_projection
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
if model_args.ptuning_checkpoint is not None:
# Evaluation
# Loading extra state dict of prefix encoder
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
else:
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
if model_args.quantization_bit is not None:
print(f"Quantized to {model_args.quantization_bit} bit")
model = model.quantize(model_args.quantization_bit)
if model_args.pre_seq_len is not None:
# P-tuning v2
model = model.half()
model.transformer.prefix_encoder.float()
else:
# Finetune
model = model.float()
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
elif training_args.do_predict:
column_names = raw_datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Get the column names for input/target.
prompt_column = data_args.prompt_column
response_column = data_args.response_column
history_column = data_args.history_column
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
def preprocess_function_eval(examples):
inputs, targets = [], []
for i in range(len(examples[prompt_column])):
if examples[prompt_column][i] and examples[response_column][i]:
query = examples[prompt_column][i]
if history_column is None or len(examples[history_column][i]) == 0:
prompt = query
else:
prompt = ""
history = examples[history_column][i]
for turn_idx, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(turn_idx, old_query, response)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
inputs.append(prompt)
targets.append(examples[response_column][i])
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, truncation=True, padding=True)
labels = tokenizer(text_target=targets, max_length=max_target_length, truncation=True)
if data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def preprocess_function_train(examples):
max_seq_length = data_args.max_source_length + data_args.max_target_length
model_inputs = {
"input_ids": [],
"labels": [],
}
for i in range(len(examples[prompt_column])):
if examples[prompt_column][i] and examples[response_column][i]:
query, answer = examples[prompt_column][i], examples[response_column][i]
if history_column is None:
prompt = query
else:
prompt = ""
history = examples[history_column][i]
for turn_idx, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(turn_idx, old_query, response)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False)
if len(a_ids) > data_args.max_source_length - 1:
a_ids = a_ids[: data_args.max_source_length - 1]
if len(b_ids) > data_args.max_target_length - 2:
b_ids = b_ids[: data_args.max_target_length - 2]
input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)
context_length = input_ids.index(tokenizer.bos_token_id)
mask_position = context_length - 1
labels = [-100] * context_length + input_ids[mask_position+1:]
pad_len = max_seq_length - len(input_ids)
input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
labels = labels + [tokenizer.pad_token_id] * pad_len
if data_args.ignore_pad_token_for_loss:
labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]
model_inputs["input_ids"].append(input_ids)
model_inputs["labels"].append(labels)
return model_inputs
def print_dataset_example(example):
print("input_ids",example["input_ids"])
print("inputs", tokenizer.decode(example["input_ids"]))
print("label_ids", example["labels"])
print("labels", tokenizer.decode(example["labels"]))
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function_train,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
print_dataset_example(train_dataset[0])
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function_eval,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
print_dataset_example(eval_dataset[0])
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test"]
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function_eval,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
print_dataset_example(predict_dataset[0])
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=None,
padding=False
)
# Metric
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
score_dict = {
"rouge-1": [],
"rouge-2": [],
"rouge-l": [],
"bleu-4": []
}
for pred, label in zip(decoded_preds, decoded_labels):
hypothesis = list(jieba.cut(pred))
reference = list(jieba.cut(label))
rouge = Rouge()
scores = rouge.get_scores(' '.join(hypothesis) , ' '.join(reference))
result = scores[0]
for k, v in result.items():
score_dict[k].append(round(v["f"] * 100, 4))
bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3)
score_dict["bleu-4"].append(round(bleu_score * 100, 4))
for k, v in score_dict.items():
score_dict[k] = float(np.mean(v))
return score_dict
# Override the decoding parameters of Seq2SeqTrainer
training_args.generation_max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
training_args.generation_num_beams = (
data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
)
# Initialize our Trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
save_prefixencoder=model_args.pre_seq_len is not None
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
# elif last_checkpoint is not None:
# checkpoint = last_checkpoint
model.gradient_checkpointing_enable()
model.enable_input_require_grads()
train_result = trainer.train(resume_from_checkpoint=checkpoint)
# trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
max_seq_length = data_args.max_source_length + data_args.max_target_length + 1
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(metric_key_prefix="eval", do_sample=True, top_p=0.7, max_length=max_seq_length, temperature=0.95)
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
predict_results = trainer.predict(predict_dataset, metric_key_prefix="predict", max_length=max_seq_length, do_sample=True, top_p=0.7, temperature=0.95)
metrics = predict_results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = tokenizer.batch_decode(
predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
predictions = [pred.strip() for pred in predictions]
labels = tokenizer.batch_decode(
predict_results.label_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
labels = [label.strip() for label in labels]
output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt")
with open(output_prediction_file, "w", encoding="utf-8") as writer:
for p, l in zip(predictions, labels):
res = json.dumps({"labels": l, "predict": p}, ensure_ascii=False)
writer.write(f"{res}\n")
return results
def load_ptuning_checkpoint(
model_name_or_path,
ptuning_checkpoint,
pre_seq_len,
trust_remote_code=True,
quantization_bit=None
):
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=trust_remote_code)
config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=trust_remote_code, pre_seq_len=pre_seq_len)
model = AutoModel.from_pretrained(model_name_or_path, config=config, trust_remote_code=trust_remote_code)
prefix_state_dict = torch.load(os.path.join(ptuning_checkpoint, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
if quantization_bit is not None:
print(f"Quantized to {quantization_bit} bit")
model = model.quantize(quantization_bit)
model = model.half()
model.transformer.prefix_encoder.float()
return tokenizer, config, model
# def _mp_fn(index):
# # For xla_spawn (TPUs)
# main()
if __name__ == "__main__":
# mode = "train"
mode = "evaluate"
if mode == "train":
ptuning(
do_train=True,
train_file="AdvertiseGen/train.json",
validation_file="AdvertiseGen/dev.json",
prompt_column="content",
response_column="summary",
overwrite_cache=True,
model_name_or_path="THUDM/chatglm-6b",
output_dir="output/adgen-chatglm-6b-pt-128-2e-2",
overwrite_output_dir=True,
max_source_length=64,
max_target_length=64,
per_device_train_batch_size=1,
per_device_eval_batch_size=1,
gradient_accumulation_steps=16,
predict_with_generate=True,
max_steps=3000,
logging_steps=10,
save_steps=100,
learning_rate=2e-2,
pre_seq_len=128,
quantization_bit=4
)
elif mode == "evaluate":
ptuning(
do_predict=True,
validation_file="AdvertiseGen/dev.json",
test_file="AdvertiseGen/dev.json",
overwrite_cache=True,
prompt_column="content",
response_column="summary",
model_name_or_path="THUDM/chatglm-6b",
ptuning_checkpoint="./output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100",
output_dir="./output/adgen-chatglm-6b-pt-128-2e-2",
overwrite_output_dir=True,
max_source_length=64,
max_target_length=64,
per_device_eval_batch_size=1,
predict_with_generate=True,
pre_seq_len=128,
quantization_bit=4
) | PypiClean |
/1secMail-1.1.0.tar.gz/1secMail-1.1.0/README.md | <p align="center">
<br>
<img src="https://github.com/qvco/1secMail-Python/assets/77382767/fde69c1a-b95f-4d78-af1a-2dca315204bc" alt="1secMail" width="700">
<!-- <br>
1secMail for Python
<br> -->
</p>
<h4 align="center">An API wrapper for <a href="https://www.1secmail.com/" target="_blank">www.1secmail.com</a> written in Python.</h4>
<p align="center">
<img src="https://img.shields.io/github/release/qvco/1secMail-Python">
<img src="https://img.shields.io/badge/python-3.8-blue.svg">
<img src="https://img.shields.io/badge/License-MIT-blue.svg">
</p>
### About
This is an easy to use yet full-featured Python API wrapper for www.1secmail.com ↗ using the official 1secMail API. It allows you to easily create temporary email addresses for testing, verification, or other purposes where you need a disposable email address.
> Asynchronous operations are also supported!:thumbsup:
### Install
To install the package, you'll need Python 3.8 or above installed on your computer. From your command line:
```bash
pip install 1secMail
```
<br>
> **Note**
> If you're willing to install the development version, do the following:
```bash
git clone https://github.com/qvco/1secMail-Python.git
cd 1secMail-Python
pip install -r requirements.txt
pip install -e .
```
## Usage
### Generating Email Addresses
To generate a list of random email addresses, use the `random_email()` method:
```python
import secmail
client = secmail.Client()
client.random_email(amount=3)
>>> ['c3fho3cry1@1secmail.net', '5qcd3d36zr@1secmail.org', 'b6fgeothtg@1secmail.net']
```
You can also generate a custom email address by specifying the username and domain:
> **Note**
> Specifying a domain is optional!
```python
client.custom_email(username="bobby-bob", domain="kzccv.com")
>>> 'bobby-bob@kzccv.com'
```
### Receiving Messages
To wait until a new message is received, use the `await_new_message()` method:
```python
message = client.await_new_message("bobby-bob@kzccv.com")
```
To check all messages received on a particular email address, use the `get_inbox()` method and pass the email address:
```python
inbox = client.get_inbox("bobby-bob@kzccv.com")
for message in inbox:
print(message.id)
print(message.from_address)
print(message.subject)
print(message.date)
```
You can also fetch a single message using the `get_message()` method and passing the email address and message ID:
```python
message = client.get_message(address="bobby-bob@kzccv.com", message_id=235200687)
print(message.id)
print(message.subject)
print(message.body)
print(message.text_body)
print(message.html_body)
print(message.attachments)
print(message.date)
```
### Downloading an attachment
You can download an attachment from a message in the inbox of a specified email address using the download_attachment method like this:
```python
client.download_attachment(address, message_id, attachment_filename)
>>> 'Path: (C:\Users\user\path/config/rocket.png), Size: 49071B'
```
## Asynchronous Client
### Generating Email Addresses
To generate a list of random email addresses, use the `random_email()` method:
```python
import asyncio
import secmail
async def main():
client = secmail.AsyncClient()
email_addresses = await client.random_email(amount=3)
print(email_addresses)
asyncio.run(main())
>>> ['c3fho3cry1@1secmail.net', '5qcd3d36zr@1secmail.org', 'b6fgeothtg@1secmail.net']
```
You can also generate a custom email address by specifying the username and domain:
> **Note**
> Specifying a domain is optional!
```python
await client.custom_email(username="bobby-bob", domain="kzccv.com")
>>> 'bobby-bob@kzccv.com'
```
### Receiving Messages
To wait until a new message is received, use the `await_new_message()` method:
```python
import asyncio
import secmail
async def main():
client = secmail.AsyncClient()
message = await client.await_new_message("bobby-bob@kzccv.com")
print(f"{message.from_address}: {message.subject}")
asyncio.run(main())
```
To check all messages received on a particular email address, use the `get_inbox()` method and pass the email address:
```python
import asyncio
import secmail
async def main():
client = secmail.AsyncClient()
inbox = await client.get_inbox("bobby-bob@kzccv.com")
print(f"You have {len(inbox)} messages in your inbox.")
for message in inbox:
print(message.id)
print(message.from_address)
print(message.subject)
print(message.date)
asyncio.run(main())
```
You can also fetch a single message using the `get_message()` method and passing the email address and message ID:
```python
import asyncio
import secmail
async def main():
client = secmail.AsyncClient()
address = "bobby-bob@kzccv.com"
inbox = await client.get_inbox(address)
message_id = inbox[0].id
message = await client.get_message(address, message_id)
print(message.id)
print(message.subject)
print(message.body)
print(message.text_body)
print(message.html_body)
print(message.attachments)
print(message.date)
asyncio.run(main())
```
### Downloading an attachment
You can download an attachment from a message in the inbox of a specified email address using the download_attachment method like this:
```python
import asyncio
import secmail
async def main():
client = secmail.AsyncClient()
address = "bobby-bob@kzccv.com"
inbox = await client.get_inbox(address)
message_id = inbox[0].id
message = await client.get_message(address, message_id)
attachment_filename = message.attachments[0].filename
await client.download_attachment(address, message_id, attachment_filename)
asyncio.run(main())
>>> 'Path: (C:\Users\user\path/config/rocket.png), Size: 49071B'
```
## Licnese
This software is licensed under the [MIT](https://github.com/qvco/1secMail-Python/blob/master/LICENSE) © [Qvco](https://github.com/qvco).
| PypiClean |
/CatKit-0.5.4-py3-none-any.whl/catkit/hub/ase_tools/__init__.py | import sys
from ase import Atoms
from ase.io import read
# from ase.io.trajectory import convert
import numpy as np
import ase
import copy
from catkit.hub.tools import get_atoms, get_state, clear_prefactor
# A lot of functions from os.path
# in python 2 moved to os. and changed
# their signature. Pathlib can be
# installed under python2.X with
# pip install pathlib2 and is in
# standard library in Python 3,
# hence we use it as a compatiblity
# library
try:
from pathlib import Path
Path().expanduser()
except (ImportError, AttributeError):
from pathlib2 import Path
PUBLICATION_TEMPLATE = ''
def get_chemical_formula(atoms, mode='metal'):
"""
Compatibility function, return mode=metal, when
available, mode=hill, when not (ASE <= 3.13)
"""
try:
return atoms.get_chemical_formula(mode=mode)
except ValueError:
return atoms.get_chemical_formula(mode='hill')
def symbols(atoms):
formula = get_chemical_formula(atoms)
symbols = ase.symbols.string2symbols(formula)
return ''.join(symbols)
def collect_structures(foldername, verbose=False, level='*'):
structures = []
if verbose:
print(foldername)
for i, filename in enumerate(Path(foldername).glob(level)):
posix_filename = str(filename.as_posix())
if verbose:
print(i, posix_filename)
if posix_filename.endswith('publication.txt'):
with open(posix_filename) as infile:
global PUBLICATION_TEMPLATE
PUBLICATION_TEMPLATE = infile.read()
elif Path(posix_filename).is_file():
try:
filetype = ase.io.formats.filetype(posix_filename)
except Exception as e:
continue
if filetype:
try:
structure = ase.io.read(posix_filename)
structure.info['filename'] = posix_filename
structure.info['filetype'] = ase.io.formats.filetype(
posix_filename)
try:
structure.get_potential_energy()
# ensure that the structure has an energy
structures.append(structure)
except RuntimeError:
print("Did not add {posix_filename} since it has no energy"
.format(
posix_filename=posix_filename,
))
except TypeError:
print("Warning: Could not read {posix_filename}"
.format(
posix_filename=posix_filename,
))
except StopIteration:
print("Warning: StopIteration {posix_filename} hit."
.format(
posix_filename=posix_filename,
))
except IndexError:
print("Warning: File {posix_filename} looks incomplete"
.format(
posix_filename=posix_filename,
))
except OSError as e:
print("Error with {posix_filename}: {e}".format(
posix_filename=posix_filename,
e=e,
))
except AssertionError as e:
print("Hit an assertion error with {posix_filename}: {e}".format(
posix_filename=posix_filename,
e=e,
))
except ValueError as e:
print("Trouble reading {posix_filename}: {e}".format(
posix_filename=posix_filename,
e=e,
))
except DeprecationWarning as e:
print("Trouble reading {posix_filename}: {e}".format(
posix_filename=posix_filename,
e=e,
))
return structures
def get_energies(atoms_list):
""" Potential energy for a list of atoms objects"""
if len(atoms_list) == 1:
return atoms_list[0].get_potential_energy()
elif len(atoms_list) > 1:
energies = []
for atoms in atoms_list:
energies.append(atoms.get_potential_energy())
return energies
def get_atomic_numbers(atoms):
return list(atoms.get_atomic_numbers())
def get_formula_from_numbers(numbers):
formula = Atoms(numbers).get_chemical_formula(mode='all')
return formula
def get_numbers_from_formula(formula):
atoms = Atoms(formula)
return get_atomic_numbers(atoms)
def get_reaction_energy(structures, reaction, reaction_atoms, states,
prefactors, prefactors_TS, energy_corrections):
energies = {}
for key in structures.keys():
energies.update({key: ['' for n in range(len(structures[key]))]})
for key, atoms_list in structures.items():
for i, atoms in enumerate(atoms_list):
try:
name = clear_prefactor(reaction[key][i])
except BaseException:
name = None
if name in energy_corrections.keys():
Ecor = energy_corrections[name]
else:
Ecor = 0
energies[key][i] = prefactors[key][i] * \
(atoms.get_potential_energy() + Ecor)
# Reaction energy:
energy_reactants = np.sum(energies['reactants'])
energy_products = np.sum(energies['products'])
reaction_energy = energy_products - energy_reactants
# Activation energy
if 'TS' in structures.keys():
# Is a different empty surface used for the TS?
if 'TSempty' in structure.keys():
for key in reaction_atoms.keys():
if '' in reaction_atoms[key]:
index = reaction_atoms[key].index('')
empty = structures[key][index]
tsempty = structures['TSempty'][0]
tsemptydiff = tsempty.get_potential_energy - \
empty.get_potential_energy()
for i, structure in enumerate(structures['reactants']):
try:
name = clear_prefactor(reaction['reactants'][i])
except BaseException:
name = None
if name in energy_corrections.keys():
Ecor = energy_corrections[name]
else:
Ecor = 0
energies['reactants'][i] = prefactors_TS['reactants'][i]\
* structure.get_potential_energy() + Ecor
if 'TSempty' in structures.keys() and \
states['reactants'][i] == 'star':
energies['reactants'][i] += prefactors_TS['reactants'][i]\
* tsemptydiff
energy_reactants = np.sum(energies['reactants'])
energy_TS = energies['TS'][0]
activation_energy = energy_TS - energy_reactants
else:
activation_energy = None
return reaction_energy, activation_energy
def get_layers(atoms):
tolerance = 0.01
d = atoms.positions[:, 2]
keys = np.argsort(d)
ikeys = np.argsort(keys)
mask = np.concatenate(([True], np.diff(d[keys]) > tolerance))
layer_i = np.cumsum(mask)[ikeys]
if layer_i.min() == 1:
layer_i -= 1
return layer_i
def get_surface_composition(atoms):
if len(np.unique(atoms.get_atomic_numbers())) == 1:
return atoms.get_chemical_symbols()[0]
layer_i = get_layers(atoms)
top_layer_i = np.max(layer_i)
atom_i = np.where(layer_i >= top_layer_i - 1)[0]
layer_atoms = atoms[atom_i]
surface_composition = layer_atoms.get_chemical_formula(mode='metal')
return surface_composition
def get_n_layers(atoms):
layer_i = get_layers(atoms)
n = np.max(layer_i)
return n
def get_bulk_composition(atoms):
if len(np.unique(atoms.get_atomic_numbers())) == 1:
return atoms.get_chemical_symbols()[0]
layer_i = get_layers(atoms)
top_layer_i = np.max(layer_i)
compositions = []
for i in range(0, top_layer_i + 1):
atom_i = np.where(layer_i == top_layer_i - i)[0]
atoms_layer = atoms[atom_i]
if len(np.unique(atoms_layer.get_atomic_numbers())) == 1:
c = atoms_layer.get_chemical_symbols()[0]
compositions.append(c)
else:
c = atoms[atom_i].get_chemical_formula(mode='metal')
compositions.append(c)
compositions = np.array(compositions)
same_next_layer = compositions[1:] == compositions[:-1]
bulk_compositions = compositions[:-1][same_next_layer]
if len(bulk_compositions) > 0 and \
all(c == bulk_compositions[0] for c in bulk_compositions):
bulk_composition = bulk_compositions[0]
else:
bulk_composition = None
return bulk_composition
def check_in_ase(atoms, ase_db, energy=None):
"""Check if entry is allready in ASE db"""
db_ase = ase.db.connect(ase_db)
if energy is None:
energy = atoms.get_potential_energy()
formula = get_chemical_formula(atoms)
rows = db_ase.select(energy=energy)
n = 0
ids = []
for row in rows:
if formula == row.formula:
n += 1
ids.append(row.id)
if n > 0:
id = ids[0]
unique_id = db_ase.get(id)['unique_id']
return id, unique_id
else:
return None, None
def _normalize_key_value_pairs_inplace(data):
for key in data:
if isinstance(data[key], np.int64):
data[key] = int(data[key])
def write_ase(atoms, db_file, stdout=sys.stdout, user=None, data=None, **key_value_pairs):
"""Connect to ASE db"""
db_ase = ase.db.connect(db_file)
_normalize_key_value_pairs_inplace(key_value_pairs)
id = db_ase.write(atoms, data=data, **key_value_pairs)
stdout.write(' writing atoms to ASE db row id = {}\n'.format(id))
unique_id = db_ase.get(id)['unique_id']
return unique_id
def update_ase(db_file, identity, stdout, **key_value_pairs):
"""Connect to ASE db"""
db_ase = ase.db.connect(db_file)
_normalize_key_value_pairs_inplace(key_value_pairs)
count = db_ase.update(identity, **key_value_pairs)
stdout.write(' Updating {0} key value pairs in ASE db row id = {1}\n'
.format(count, identity))
return
def get_reaction_from_folder(folder_name):
reaction = {}
if '__' in folder_name: # Complicated reaction
if '-' in folder_name and '_-' not in folder_name:
# intermediate syntax
a, b = folder_name.split('-')
folder_name = a + '_-' + b
reaction.update({'reactants': folder_name.split('__')[0].split('_'),
'products': folder_name.split('__')[1].split('_')})
elif '_' in folder_name: # Standard format
AB, A, B = folder_name.split('_')
if '-' in A:
A = A.split('-')
A[1] = '-' + A[1]
products = [A[0], A[1], B]
else:
products = [A, B]
reaction.update({'reactants': [AB],
'products': products})
else:
raise AssertionError('problem with folder {}'.format(folder_name))
sites = {}
for key, mollist in reaction.items():
for n, mol in enumerate(mollist):
if '@' in mol:
mol, site = mol.split('@')
clean_mol = clear_prefactor(mol)
if not clean_mol in sites:
sites.update({clean_mol: site})
else:
site0 = sites.get(clean_mol)
if not site0 == site: # sites is a list
diff_sites = [site0, site]
sites.update({clean_mol: diff_sites})
reaction[key][n] = mol
if 'gas' not in mol and 'star' not in mol:
reaction[key][n] = mol + 'star'
for key, mollist in reaction.items():
n_star = mollist.count('star')
if n_star > 1:
for n in range(n_star):
mollist.remove('star')
mollist.append(str(n_star) + 'star')
return reaction, sites
def get_reaction_atoms(reaction):
reaction_atoms = {'reactants': [],
'products': []}
prefactors = {'reactants': [],
'products': []}
states = {'reactants': [],
'products': []}
for key, mollist in reaction.items():
for molecule in mollist:
atoms, prefactor = get_atoms(molecule)
reaction_atoms[key].append(atoms)
prefactors[key].append(prefactor)
state = get_state(molecule)
states[key].append(state)
prefactors_TS = copy.deepcopy(prefactors)
# Balance the number of slabs on each side of reaction
n_star = {'reactants': 0,
'products': 0}
for key, statelist in states.items():
for j, s in enumerate(statelist):
if s == 'star':
n_star[key] += prefactors[key][j]
n_r = n_star['reactants']
n_p = n_star['products']
diff = n_p - n_r
if abs(diff) > 0:
if diff > 0: # add empty slabs to left-hand side
n_r += diff
key = 'reactants'
else: # add to right-hand side
diff *= -1 # diff should be positive
n_p += diff
key = 'products'
if '' not in reaction_atoms[key]:
reaction[key].append('star')
prefactors[key].append(diff)
if key == 'reactants':
prefactors_TS[key].append(1)
states[key].append('star')
reaction_atoms[key].append('')
else:
index = states[key].index('star')
prefactors[key][index] += diff
if key == 'reactants':
prefactors_TS[key][index] += diff
if n_r > 1: # Balance slabs for transition state
count_empty = 0
if '' in reaction_atoms['reactants']:
index = reaction_atoms['reactants'].index('')
count_empty = prefactors_TS['reactants'][index]
prefactors_TS['reactants'][index] = -(n_r - count_empty - 1)
else:
reaction_atoms['reactants'].append('')
prefactors['reactants'].append(0)
states['reactants'].append('star')
prefactors_TS['reactants'].append(-n_r + 1)
else:
if '' in reaction_atoms['reactants']:
index = reaction_atoms['reactants'].index('')
prefactors_TS['reactants'][index] = 1
return reaction_atoms, prefactors, prefactors_TS, states
def debug_assert(expression, message, debug=False):
if debug:
try:
assert expression, message
except AssertionError as e:
print(e)
return False
else:
assert expression, message
return True | PypiClean |
/DeepManufacturing-0.0.7.tar.gz/DeepManufacturing-0.0.7/README.md | # ManufacturingNet
[Website](http://manufacturingnet.io/) | [Documentation](https://manufacturingnet.readthedocs.io/en/latest/)
ManufacturingNet provides a sustainable, open-source ecosystem of modern artificial intelligence (AI) tools for tackling diverse engineering challenges.
Written in Python3 and designed for ease of use, ManufacturingNet's machine learning library simplifies AI for manufacturing professionals.
ManufacturingNet is developed and maintained by the Mechanical and AI Lab (MAIL) at Carnegie Mellon University.
For more information, visit our website, [manufacturingnet.io.](http://manufacturingnet.io/)
## Requirements
To use ManufacturingNet, you will need a version of [Python](https://www.python.org/downloads/) greater than 3.4 installed.
To check if Python3 is installed, open the terminal on Linux/MacOS or PowerShell on Windows and run the following command:
```bash
python3 --version
```
To install ManufacturingNet and its dependencies, you will need [pip](https://pip.pypa.io/en/stable/), the Python package manager. If you have a version of Python greater than 3.4, pip should already be installed.
To check if pip is installed, open the terminal/PowerShell and run the following command:
```bash
pip --version
```
ManufacturingNet depends on the following packages:
- [Matplotlib](https://matplotlib.org/)
- [NumPy](https://numpy.org/)
- [Pillow](https://python-pillow.org/)
- [PyTorch](https://pytorch.org/)
- [SciPy](https://www.scipy.org/)
- [Scikit-Learn](https://scikit-learn.org/stable/)
- [XGBoost](https://xgboost.readthedocs.io/en/latest/)
These packages will be automatically installed when you install ManufacturingNet.
### Handling Import Errors
The above packages should be all you need to run ManufacturingNet, but if you run into errors like `ImportError: No module named ModuleName`, try installing the module with pip like so:
```bash
pip install ModuleName
```
## Installation
After you've installed the above requirements, open the terminal/PowerShell and run the following command:
```bash
pip install DeepManufacturing
```
## Usage
To start using ManufacturingNet in any Python environment, import the library as such:
```python
import ManufacturingNet
```
If you don't need the entire library, you can import specific classes using dot notation and "from" statements. For example, to import the linear regression model, use this code:
```python
from ManufacturingNet.models import LinRegression
```
To import the feature extraction functionality, use this code:
```python
from ManufacturingNet.featurization import Featurizer
```
When in doubt, check the [documentation](https://manufacturingnet.readthedocs.io/en/latest/)!
## License
[MIT](https://choosealicense.com/licenses/mit/)
| PypiClean |
/DeepManufacturing-0.0.7.tar.gz/DeepManufacturing-0.0.7/ManufacturingNet/models/resnet.py | import datetime
import os
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from sklearn.metrics import confusion_matrix
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as scheduler
import torch.utils.data as data_utils
import torchvision
from torch.utils import data as data_utils
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.models import (resnet18, resnet34, resnet50, resnet101,
resnext50_32x4d)
def conv2D_output_size(img_size, kernel_size, stride, padding):
outshape = (np.floor((img_size[0] + 2 * padding[0] - (kernel_size[0] - 1) - 1) / stride[0] + 1).astype(int),
np.floor((img_size[1] + 2 * padding[1] - (kernel_size[1] - 1) - 1) / stride[1] + 1).astype(int))
return outshape
def spacing():
print('='*25)
class Network(nn.Module):
def __init__(self, img_size, num_class):
super(Network, self).__init__()
self.num_class = num_class
self.channel=img_size[-1]
self.img_size = img_size[:-1]
self.get_pretrained_model()
def get_pretrained_model(self):
print('Question [2/9]: Model Selection:')
print('\n')
self.pretrained_dict = {1: resnet18, 2: resnet34,
3: resnet50, 4: resnet101, 5: resnext50_32x4d}
gate = 0
while gate != 1:
pretrained_input = input('Do you want pretrained model? (y/n): ').replace(' ','')
if pretrained_input.lower() == 'y':
self.pretrained = True
gate = 1
elif pretrained_input.lower() == 'n':
self.pretrained = False
gate = 1
else:
print('Please enter valid input')
gate = 0
while gate != 1:
self.model_select = int(input('Please enter any number between 1 to 5 to select the model:\
\n[1:ResNet18,2:ResNet34,3:ResNet50,4:ResNet101,5:ResNext50]').replace(' ',''))
if (1 <= self.model_select <= 5):
model = self.pretrained_dict[self.model_select](
pretrained=self.pretrained)
gate = 1
else:
print('Please enter valid input')
model.conv1 = nn.Conv2d(self.channel, 64, kernel_size=(
7, 7), stride=(2, 2), padding=(3, 3), bias=False)
if self.model_select in [1, 2]:
model.fc = nn.Linear(512, self.num_class)
else:
model.fc = nn.Linear(2048, self.num_class)
self.net = model.double()
spacing()
# The following class will be called by a user. The class calls other necessary classes to build a complete pipeline required for training
class ResNet():
"""
Documentation Link:
"""
def __init__(self, train_data_address, val_data_address, shuffle=True):
# Lists used in the functions below
self.criterion_list = {1: nn.CrossEntropyLoss(), 2: torch.nn.L1Loss(
), 3: torch.nn.SmoothL1Loss(), 4: torch.nn.MSELoss()}
self.train_address = train_data_address
self.val_address = val_data_address
self.shuffle = shuffle
self.get_default_paramters() # getting default parameters argument
self.num_classes = self.get_num_classes() # getting the number of classes
print('1/8 - Image size')
self.get_image_size() # getting the image size (resized or original)
# building a network architecture
self.net = (Network(self.img_size, self.num_classes)).net
print('='*25)
print('3/7 - Batch size input')
# getting a batch size for training and validation
self._get_batchsize_input()
print('='*25)
print('4/7- Loss function')
self._get_loss_function() # getting a loss function
print('='*25)
print('5/7 - Optimizer')
self._get_optimizer() # getting an optimizer input
print('='*25)
print('6/7 - Scheduler')
self._get_scheduler() # getting a scheduler input
self._set_device() # setting the device to gpu or cpu
print('='*25)
print('7/7 - Number of epochs')
self._get_epoch() # getting an input for number oftraining epochs
self.main() # run function
def get_default_paramters(self):
# Method for getting a binary input for default paramters
gate = 0
while gate != 1:
self.default = input(
'Do you want default values for all the training parameters (y/n)? ').replace(' ','')
if self.default == 'y' or self.default == 'Y' or self.default == 'n' or self.default == 'N':
if self.default.lower() == 'y':
self.default_gate = True
else:
self.default_gate = False
gate = 1
else:
print('Enter a valid input')
print(' ')
print(' ')
def check_address(self, address):
isfile = os.path.isfile(address)
return isfile
def get_num_classes(self):
train_num_folder = 0
train_num_files = 0
for _, dirnames, filenames in os.walk(self.train_address):
train_num_folder += len(dirnames)
train_num_files += len(filenames)
if train_num_files == 0:
print('Train data: Zero images found.\n System exit initialized')
sys.exit()
val_num_folder = 0
val_num_files = 0
for _, dirnames, filenames in os.walk(self.val_address):
val_num_folder += len(dirnames)
val_num_files += len(filenames)
if val_num_files == 0:
print('Validation data: Zero images found.\n System exit initialized')
sys.exit()
if train_num_folder != val_num_folder:
print(
'Warning: Number of folders in the Validation set and Training set is not the same.')
print('Number of classes: ', train_num_folder)
print('Total number of training images: ', train_num_files)
print('Total number of validation images: ', val_num_files)
spacing()
return train_num_folder
def get_image_size(self):
gate = 0
while gate != 1:
self.img_size = []
print('All the images must have same size.')
size_input = (input('Please enter the dimensions to which images need to be resized (heigth, width, channels): \nFor example - 228, 228, 1 (For gray scale conversion)\n If all images have same size, enter the actual image size (heigth, width, channels) :\n ')).replace(' ','')
size_input = size_input.split(',')
if len(size_input) == 3:
for i in range(len(size_input)):
if size_input[i].isnumeric() and (1 <= int(size_input[i])):
self.img_size.append(int(size_input[i]))
self.img_size = tuple(self.img_size)
if len(self.img_size) == 3 and (self.img_size[-1] == 1 or self.img_size[-1] == 3):
gate = 1
else:
print(
'Please enter a valid input.\n Image size must be positive integers and number of channels can be 1 or 3')
else:
print('Please enter a valid input')
spacing()
def _get_batchsize_input(self):
# Method for getting batch size input
gate = 0
while gate != 1:
self.batchsize = (input('Please enter the batch size: ')).replace(' ','')
if self.batchsize.isnumeric() and int(self.batchsize) > 0:
self.batchsize = int(self.batchsize)
gate = 1
else:
print('Please enter a valid input')
def _get_loss_function(self):
# Method for getting a loss function for training
self.criterion_input = '1'
self.criterion = self.criterion_list[int(self.criterion_input)]
print('Loss function: CrossEntropy()')
def _get_optimizer(self):
# Method for getting a optimizer input
gate = 0
while gate != 1:
if self.default_gate == True:
print('Default optimizer selected : Adam')
self.optimizer_input = '1'
else:
self.optimizer_input = (input(
'Please enter the optimizer index for the problem \n Optimizer_list - [1: Adam, 2: SGD] \n For default optimizer, please directly press enter without any input: ')).replace(' ','')
if self.optimizer_input == '': # handling default case for optimizer
print('Default optimizer selected : Adam')
self.optimizer_input = '1'
if self.optimizer_input.isnumeric() and int(self.optimizer_input) > 0 and int(self.optimizer_input) < 3:
gate = 1
else:
print('Please enter a valid input')
print(' ')
print(' ')
gate = 0
while gate != 1:
if self.default_gate == True:
print('Default value for learning rate selected : 0.001')
self.user_lr = '0.001'
else:
self.user_lr = input(
'Please enter a required value float input for learning rate (learning rate > 0) \n For default learning rate, please directly press enter without any input: ').replace(' ','')
if self.user_lr == '': # handling default case for learning rate
print('Default value for learning rate selected : 0.001')
self.user_lr = '0.001'
if self.user_lr.replace('.', '').isdigit():
if float(self.user_lr) > 0:
self.lr = float(self.user_lr)
gate = 1
else:
print('Please enter a valid input')
print(' ')
self.optimizer_list = {1: optim.Adam(self.net.parameters(
), lr=self.lr), 2: optim.SGD(self.net.parameters(), lr=self.lr)}
self.optimizer = self.optimizer_list[int(self.optimizer_input)]
print(' ')
def _get_scheduler(self):
# Method for getting scheduler
gate = 0
while gate != 1:
if self.default_gate == True:
print('By default no scheduler selected')
self.scheduler_input = '1'
else:
self.scheduler_input = input(
'Please enter the scheduler index for the problem: Scheduler_list - [1: None, 2:StepLR, 3:MultiStepLR] \n For default option of no scheduler, please directly press enter without any input: ').replace(' ','')
if self.scheduler_input == '':
print('By default no scheduler selected')
self.scheduler_input = '1'
if self.scheduler_input.isnumeric() and int(self.scheduler_input) > 0 and int(self.scheduler_input) < 4:
gate = 1
else:
print('Please enter a valid input')
print(' ')
if self.scheduler_input == '1':
print(' ')
self.scheduler = None
elif self.scheduler_input == '2':
print(' ')
gate = 0
while gate != 1:
self.step = (
input('Please enter a step value int input (step > 0): ')).replace(' ','')
if self.step.isnumeric() and int(self.step) > 0:
self.step = int(self.step)
gate = 1
else:
print('Please enter a valid input')
print(' ')
print(' ')
gate = 0
while gate != 1:
self.gamma = (input(
'Please enter a Multiplying factor value float input (Multiplying factor > 0): ')).replace(' ','')
if self.gamma.replace('.', '').isdigit():
if float(self.gamma) > 0:
self.gamma = float(self.gamma)
gate = 1
else:
print('Please enter a valid input')
print(' ')
self.scheduler = scheduler.StepLR(
self.optimizer, step_size=self.step, gamma=self.gamma)
elif self.scheduler_input == '3':
print(' ')
gate = 0
while gate != 1:
self.milestones_input = (
input('Please enter values of milestone epochs int input (Example: 2, 6, 10): ')).replace(' ','')
self.milestones_input = self.milestones_input.split(',')
for i in range(len(self.milestones_input)):
if self.milestones_input[i].isnumeric() and int(self.milestones_input[i]) > 0:
gate = 1
else:
gate = 0
break
if gate == 0:
print('Please enter a valid input')
print(' ')
self.milestones = [int(x)
for x in self.milestones_input if int(x) > 0]
print(' ')
gate = 0
while gate != 1:
self.gamma = (input(
'Please enter a Multiplying factor value float input (Multiplying factor > 0): ')).replace(' ','')
if self.gamma.replace('.', '').isdigit():
if float(self.gamma) > 0:
self.gamma = float(self.gamma)
gate = 1
else:
print('Please enter a valid input')
print(' ')
self.scheduler = scheduler.MultiStepLR(
self.optimizer, milestones=self.milestones, gamma=self.gamma)
def _set_device(self):
# Method for setting device type if GPU is available
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
def _get_epoch(self):
# Method for getting number of epochs for training the model
gate = 0
while gate != 1:
self.numEpochs = (
input('Please enter the number of epochs to train the model: ')).replace(' ','')
if self.numEpochs.isnumeric() and int(self.numEpochs) > 0:
self.numEpochs = int(self.numEpochs)
gate = 1
else:
print('Please enter a valid input')
def main(self):
# Method integrating all the functions and training the model
self.net.to(self.device)
print('='*25)
print('Neural network architecture: ')
print(' ')
print(self.net) # printing model architecture
print('='*25)
self.get_model_summary() # printing summaray of the model
print(' ')
print('='*25)
image_transform = transforms.Compose([transforms.Grayscale(
num_output_channels=self.img_size[-1]), transforms.Resize((self.img_size[:-1]), interpolation=2), transforms.ToTensor()])
self.train_dataset = torchvision.datasets.ImageFolder(
root=self.train_address, transform=image_transform) # creating the training dataset
self.val_dataset = torchvision.datasets.ImageFolder(
root=self.val_address, transform=image_transform) # creating the validation dataset
# creating the training dataset dataloadet
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, batch_size=self.batchsize, shuffle=True)
# creating the validation dataset dataloader
self.dev_loader = torch.utils.data.DataLoader(
self.val_dataset, batch_size=self.batchsize)
self.train_model() # training the model
self.get_loss_graph() # saving the loss graph
if self.criterion_input == '1':
self.get_accuracy_graph() # saving the accuracy graph
self.get_confusion_matrix() # printing confusion matrix
self._save_model() # saving model paramters
print(' Call get_prediction() to make predictions on new data')
print(' ')
print('=== End of training ===')
def _save_model(self):
# Method for saving the model parameters if user wants to
gate = 0
while gate != 1:
save_model = input(
'Do you want to save the model weights? (y/n): ').replace(' ','')
if save_model.lower() == 'y' or save_model.lower() == 'yes':
path = 'model_parameters.pth'
torch.save(self.net.state_dict(), path)
gate = 1
elif save_model.lower() == 'n' or save_model.lower() == 'no':
gate = 1
else:
print('Please enter a valid input')
print('='*25)
def get_model_summary(self):
# Method for getting the summary of the model
print('Model Summary:')
print(' ')
print('Criterion: ', self.criterion)
print('Optimizer: ', self.optimizer)
print('Scheduler: ', self.scheduler)
print('Batch size: ', self.batchsize)
print('Initial learning rate: ', self.lr)
print('Number of training epochs: ', self.numEpochs)
print('Device: ', self.device)
def train_model(self):
# Method for training the model
self.net.train()
self.training_loss = []
self.training_acc = []
self.dev_loss = []
self.dev_accuracy = []
total_predictions = 0.0
correct_predictions = 0.0
print('Training the model...')
for epoch in range(self.numEpochs):
start_time = time.time()
self.net.train()
print('Epoch_Number: ', epoch)
running_loss = 0.0
for batch_idx, (data, target) in enumerate(self.train_loader):
self.optimizer.zero_grad()
data = data.double().to(self.device)
target = target.to(self.device)
outputs = self.net(data)
# calculating the batch accuracy only if the loss function is Cross entropy
if self.criterion_input == '1':
loss = self.criterion(outputs, target.long())
_, predicted = torch.max(outputs.data, 1)
total_predictions += target.size(0)
correct_predictions += (predicted == target).sum().item()
else:
loss = self.criterion(outputs, target)
running_loss += loss.item()
loss.backward()
self.optimizer.step()
running_loss /= len(self.train_loader)
self.training_loss.append(running_loss)
print('Training Loss: ', running_loss)
# printing the epoch accuracy only if the loss function is Cross entropy
if self.criterion_input == '1':
acc = (correct_predictions/total_predictions)*100.0
self.training_acc.append(acc)
print('Training Accuracy: ', acc, '%')
dev_loss, dev_acc = self.validate_model()
if self.scheduler_input != '1':
self.scheduler.step()
print('Current scheduler status: ', self.optimizer)
end_time = time.time()
print('Epoch Time: ', end_time - start_time, 's')
print('#'*50)
self.dev_loss.append(dev_loss)
# saving the epoch validation accuracy only if the loss function is Cross entropy
if self.criterion_input == '1':
self.dev_accuracy.append(dev_acc)
def validate_model(self):
with torch.no_grad():
self.net.eval()
running_loss = 0.0
total_predictions = 0.0
correct_predictions = 0.0
acc = 0
self.actual = []
self.predict = []
for batch_idx, (data, target) in enumerate(self.dev_loader):
data = data.double().to(self.device)
target = target.to(self.device)
outputs = self.net(data)
if self.criterion_input == '1':
loss = self.criterion(outputs, target.long())
_, predicted = torch.max(outputs.data, 1)
total_predictions += target.size(0)
correct_predictions += (predicted == target).sum().item()
self.predict.append(predicted.detach().cpu().numpy())
else:
loss = self.criterion(outputs, target)
self.predict.append(outputs.detach().cpu().numpy())
running_loss += loss.item()
self.actual.append(target.detach().cpu().numpy())
running_loss /= len(self.dev_loader)
print('Validation Loss: ', running_loss)
# calculating and printing the epoch accuracy only if the loss function is Cross entropy
if self.criterion_input == '1':
acc = (correct_predictions/total_predictions)*100.0
print('Validation Accuracy: ', acc, '%')
return running_loss, acc
def get_loss_graph(self):
# Method for showing and saving the loss graph in the root directory
plt.figure(figsize=(8, 8))
plt.plot(self.training_loss, label='Training Loss')
plt.plot(self.dev_loss, label='Validation Loss')
plt.legend()
plt.title('Model Loss')
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.savefig('loss.png')
def get_accuracy_graph(self):
# Method for showing and saving the accuracy graph in the root directory
plt.figure(figsize=(8, 8))
plt.plot(self.training_acc, label='Training Accuracy')
plt.plot(self.dev_accuracy, label='Validation Accuracy')
plt.legend()
plt.title('Model accuracy')
plt.xlabel('Epochs')
plt.ylabel('acc')
plt.savefig('accuracy.png')
def get_confusion_matrix(self):
# Method for getting the confusion matrix for classification problem
print('Confusion Matix: ')
np_predict = np.zeros((0,))
np_actual = np.zeros((0,))
for i in range(len(self.predict)):
np_predict = np.concatenate((np_predict,
np.asarray(self.predict[i]).reshape(-1)),
axis=0)
np_actual = np.concatenate((np_actual,
self.actual[i].reshape(-1)),
axis=0)
result = confusion_matrix(np_predict, np_actual)
print(result)
def get_prediction(self, x_input):
"""
Pass in an input numpy array for making prediction.
For passing multiple inputs, make sure to keep number of examples to be the first dimension of the input.
For example, 10 data points need to be checked and each point has (3, 50, 50) resized or original input size, the shape of the array must be (10, 3, 50, 50).
For more information, please see documentation.
"""
# Method to use at the time of inference
if len(x_input.shape) == 3: # handling the case of single
x_input = (x_input).reshape(
1, x_input.shape[0], x_input.shape[1], x_input.shape[2])
x_input = torch.from_numpy(x_input).to(self.device)
net_output = self.net.forward(x_input)
if self.criterion_input == '1': # handling the case of classification problem
_, net_output = torch.max(net_output.data, 1)
return net_output
def get_mapping(self):
mapped_labels = self.train_dataset.class_to_idx
return mapped_labels | PypiClean |
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/inginious/frontend/task_dispensers/toc.py |
import json
from collections import OrderedDict
from inginious.frontend.task_dispensers.util import check_toc, SectionsList, SectionConfigItem
from inginious.frontend.task_dispensers import TaskDispenser
class TableOfContents(TaskDispenser):
def __init__(self, task_list_func, dispenser_data, database, course_id):
self._task_list_func = task_list_func
self._toc = SectionsList(dispenser_data)
@classmethod
def get_id(cls):
""" Returns the task dispenser id """
return "toc"
@classmethod
def get_name(cls, language):
""" Returns the localized task dispenser name """
return _("Table of contents")
def get_dispenser_data(self):
""" Returns the task dispenser data structure """
return self._toc
def render_edit(self, template_helper, course, task_data):
""" Returns the formatted task list edition form """
config_fields = {
"closed": SectionConfigItem(_("Closed by default"), "checkbox", False)
}
return template_helper.render("course_admin/task_dispensers/toc.html", course=course,
course_structure=self._toc, tasks=task_data, config_fields=config_fields)
def render(self, template_helper, course, tasks_data, tag_list):
""" Returns the formatted task list"""
return template_helper.render("task_dispensers/toc.html", course=course, tasks=self._task_list_func(),
tasks_data=tasks_data, tag_filter_list=tag_list, sections=self._toc)
@classmethod
def check_dispenser_data(cls, dispenser_data):
""" Checks the dispenser data as formatted by the form from render_edit function """
new_toc = json.loads(dispenser_data)
valid, errors = check_toc(new_toc)
return new_toc if valid else None, errors
def get_user_task_list(self, usernames):
""" Returns a dictionary with username as key and the user task list as value """
tasks = self._task_list_func()
task_list = [taskid for taskid in self._toc.get_tasks() if
taskid in tasks and tasks[taskid].get_accessible_time().after_start()]
return {username: task_list for username in usernames}
def get_ordered_tasks(self):
""" Returns a serialized version of the tasks structure as an OrderedDict"""
tasks = self._task_list_func()
return OrderedDict([(taskid, tasks[taskid]) for taskid in self._toc.get_tasks() if taskid in tasks])
def get_task_order(self, taskid):
""" Get the position of this task in the course """
tasks_id = self._toc.get_tasks()
if taskid in tasks_id:
return tasks_id.index(taskid)
else:
return len(tasks_id) | PypiClean |
/Acquisition-4.4.4-cp33-cp33m-win_amd64.whl/Acquisition-4.4.4.dist-info/DESCRIPTION.rst | Environmental Acquisiton
========================
This package implements "environmental acquisiton" for Python, as
proposed in the OOPSLA96_ paper by Joseph Gil and David H. Lorenz:
We propose a new programming paradigm, environmental acquisition in
the context of object aggregation, in which objects acquire
behaviour from their current containers at runtime. The key idea is
that the behaviour of a component may depend upon its enclosing
composite(s). In particular, we propose a form of feature sharing in
which an object "inherits" features from the classes of objects in
its environment. By examining the declaration of classes, it is
possible to determine which kinds of classes may contain a
component, and which components must be contained in a given kind of
composite. These relationships are the basis for language constructs
that supports acquisition.
.. _OOPSLA96: http://www.cs.virginia.edu/~lorenz/papers/oopsla96/>`_:
.. contents::
Introductory Example
--------------------
Zope implements acquisition with "Extension Class" mix-in classes. To
use acquisition your classes must inherit from an acquisition base
class. For example::
>>> import ExtensionClass, Acquisition
>>> class C(ExtensionClass.Base):
... color = 'red'
>>> class A(Acquisition.Implicit):
... def report(self):
... print(self.color)
...
>>> a = A()
>>> c = C()
>>> c.a = a
>>> c.a.report()
red
>>> d = C()
>>> d.color = 'green'
>>> d.a = a
>>> d.a.report()
green
>>> try:
... a.report()
... except AttributeError:
... pass
... else:
... raise AssertionError('AttributeError not raised.')
The class ``A`` inherits acquisition behavior from
``Acquisition.Implicit``. The object, ``a``, "has" the color of
objects ``c`` and d when it is accessed through them, but it has no
color by itself. The object ``a`` obtains attributes from its
environment, where its environment is defined by the access path used
to reach ``a``.
Acquisition Wrappers
--------------------
When an object that supports acquisition is accessed through an
extension class instance, a special object, called an acquisition
wrapper, is returned. In the example above, the expression ``c.a``
returns an acquisition wrapper that contains references to both ``c``
and ``a``. It is this wrapper that performs attribute lookup in ``c``
when an attribute cannot be found in ``a``.
Acquisition wrappers provide access to the wrapped objects through the
attributes ``aq_parent``, ``aq_self``, ``aq_base``. Continue the
example from above::
>>> c.a.aq_parent is c
True
>>> c.a.aq_self is a
True
Explicit and Implicit Acquisition
---------------------------------
Two styles of acquisition are supported: implicit and explicit
acquisition.
Implicit acquisition
--------------------
Implicit acquisition is so named because it searches for attributes
from the environment automatically whenever an attribute cannot be
obtained directly from an object or through inheritance.
An attribute can be implicitly acquired if its name does not begin
with an underscore.
To support implicit acquisition, your class should inherit from the
mix-in class ``Acquisition.Implicit``.
Explicit Acquisition
--------------------
When explicit acquisition is used, attributes are not automatically
obtained from the environment. Instead, the method aq_acquire must be
used. For example::
>>> print(c.a.aq_acquire('color'))
red
To support explicit acquisition, your class should inherit from the
mix-in class ``Acquisition.Explicit``.
Controlling Acquisition
-----------------------
A class (or instance) can provide attribute by attribute control over
acquisition. Your should subclass from ``Acquisition.Explicit``, and set
all attributes that should be acquired to the special value
``Acquisition.Acquired``. Setting an attribute to this value also allows
inherited attributes to be overridden with acquired ones. For example::
>>> class C(Acquisition.Explicit):
... id = 1
... secret = 2
... color = Acquisition.Acquired
... __roles__ = Acquisition.Acquired
The only attributes that are automatically acquired from containing
objects are color, and ``__roles__``. Note that the ``__roles__``
attribute is acquired even though its name begins with an
underscore. In fact, the special ``Acquisition.Acquired`` value can be
used in ``Acquisition.Implicit`` objects to implicitly acquire
selected objects that smell like private objects.
Sometimes, you want to dynamically make an implicitly acquiring object
acquire explicitly. You can do this by getting the object's
aq_explicit attribute. This attribute provides the object with an
explicit wrapper that replaces the original implicit wrapper.
Filtered Acquisition
--------------------
The acquisition method, ``aq_acquire``, accepts two optional
arguments. The first of the additional arguments is a "filtering"
function that is used when considering whether to acquire an
object. The second of the additional arguments is an object that is
passed as extra data when calling the filtering function and which
defaults to ``None``. The filter function is called with five
arguments:
* The object that the aq_acquire method was called on,
* The object where an object was found,
* The name of the object, as passed to aq_acquire,
* The object found, and
* The extra data passed to aq_acquire.
If the filter returns a true object that the object found is returned,
otherwise, the acquisition search continues.
Here's an example::
>>> from Acquisition import Explicit
>>> class HandyForTesting(object):
... def __init__(self, name):
... self.name = name
... def __str__(self):
... return "%s(%s)" % (self.name, self.__class__.__name__)
... __repr__=__str__
...
>>> class E(Explicit, HandyForTesting): pass
...
>>> class Nice(HandyForTesting):
... isNice = 1
... def __str__(self):
... return HandyForTesting.__str__(self)+' and I am nice!'
... __repr__ = __str__
...
>>> a = E('a')
>>> a.b = E('b')
>>> a.b.c = E('c')
>>> a.p = Nice('spam')
>>> a.b.p = E('p')
>>> def find_nice(self, ancestor, name, object, extra):
... return hasattr(object,'isNice') and object.isNice
>>> print(a.b.c.aq_acquire('p', find_nice))
spam(Nice) and I am nice!
The filtered acquisition in the last line skips over the first
attribute it finds with the name ``p``, because the attribute doesn't
satisfy the condition given in the filter.
Filtered acquisition is rarely used in Zope.
Acquiring from Context
----------------------
Normally acquisition allows objects to acquire data from their
containers. However an object can acquire from objects that aren't its
containers.
Most of the examples we've seen so far show establishing of an
acquisition context using getattr semantics. For example, ``a.b`` is a
reference to ``b`` in the context of ``a``.
You can also manually set acquisition context using the ``__of__``
method. For example::
>>> from Acquisition import Implicit
>>> class C(Implicit): pass
...
>>> a = C()
>>> b = C()
>>> a.color = "red"
>>> print(b.__of__(a).color)
red
In this case, ``a`` does not contain ``b``, but it is put in ``b``'s
context using the ``__of__`` method.
Here's another subtler example that shows how you can construct an
acquisition context that includes non-container objects::
>>> from Acquisition import Implicit
>>> class C(Implicit):
... def __init__(self, name):
... self.name = name
>>> a = C("a")
>>> a.b = C("b")
>>> a.b.color = "red"
>>> a.x = C("x")
>>> print(a.b.x.color)
red
Even though ``b`` does not contain ``x``, ``x`` can acquire the color
attribute from ``b``. This works because in this case, ``x`` is accessed
in the context of ``b`` even though it is not contained by ``b``.
Here acquisition context is defined by the objects used to access
another object.
Containment Before Context
--------------------------
If in the example above suppose both a and b have an color attribute::
>>> a = C("a")
>>> a.color = "green"
>>> a.b = C("b")
>>> a.b.color = "red"
>>> a.x = C("x")
>>> print(a.b.x.color)
green
Why does ``a.b.x.color`` acquire color from ``a`` and not from ``b``?
The answer is that an object acquires from its containers before
non-containers in its context.
To see why consider this example in terms of expressions using the
``__of__`` method::
a.x -> x.__of__(a)
a.b -> b.__of__(a)
a.b.x -> x.__of__(a).__of__(b.__of__(a))
Keep in mind that attribute lookup in a wrapper is done by trying to
look up the attribute in the wrapped object first and then in the
parent object. So in the expressions above proceeds from left to
right.
The upshot of these rules is that attributes are looked up by
containment before context.
This rule holds true also for more complex examples. For example,
``a.b.c.d.e.f.g.attribute`` would search for attribute in ``g`` and
all its containers first. (Containers are searched in order from the
innermost parent to the outermost container.) If the attribute is not
found in ``g`` or any of its containers, then the search moves to
``f`` and all its containers, and so on.
Additional Attributes and Methods
---------------------------------
You can use the special method ``aq_inner`` to access an object
wrapped only by containment. So in the example above,
``a.b.x.aq_inner`` is equivalent to ``a.x``.
You can find out the acquisition context of an object using the
aq_chain method like so:
>>> [obj.name for obj in a.b.x.aq_chain]
['x', 'b', 'a']
You can find out if an object is in the containment context of another
object using the ``aq_inContextOf`` method. For example:
>>> a.b.aq_inContextOf(a)
True
.. Note: as of this writing the aq_inContextOf examples don't work the
way they should be working. According to Jim, this is because
aq_inContextOf works by comparing object pointer addresses, which
(because they are actually different wrapper objects) doesn't give
you the expected results. He acknowledges that this behavior is
controversial, and says that there is a collector entry to change
it so that you would get the answer you expect in the above. (We
just need to get to it).
Acquisition Module Functions
----------------------------
In addition to using acquisition attributes and methods directly on
objects you can use similar functions defined in the ``Acquisition``
module. These functions have the advantage that you don't need to
check to make sure that the object has the method or attribute before
calling it.
``aq_acquire(object, name [, filter, extra, explicit, default, containment])``
Acquires an object with the given name.
This function can be used to explictly acquire when using explicit
acquisition and to acquire names that wouldn't normally be
acquired.
The function accepts a number of optional arguments:
``filter``
A callable filter object that is used to decide if an object
should be acquired.
The filter is called with five arguments:
* The object that the aq_acquire method was called on,
* The object where an object was found,
* The name of the object, as passed to aq_acquire,
* The object found, and
* The extra argument passed to aq_acquire.
If the filter returns a true object that the object found is
returned, otherwise, the acquisition search continues.
``extra``
Extra data to be passed as the last argument to the filter.
``explicit``
A flag (boolean value) indicating whether explicit acquisition
should be used. The default value is true. If the flag is
true, then acquisition will proceed regardless of whether
wrappers encountered in the search of the acquisition
hierarchy are explicit or implicit wrappers. If the flag is
false, then parents of explicit wrappers are not searched.
This argument is useful if you want to apply a filter without
overriding explicit wrappers.
``default``
A default value to return if no value can be acquired.
``containment``
A flag indicating whether the search should be limited to the
containment hierarchy.
In addition, arguments can be provided as keywords.
``aq_base(object)``
Return the object with all wrapping removed.
``aq_chain(object [, containment])``
Return a list containing the object and it's acquisition
parents. The optional argument, containment, controls whether the
containment or access hierarchy is used.
``aq_get(object, name [, default, containment])``
Acquire an attribute, name. A default value can be provided, as
can a flag that limits search to the containment hierarchy.
``aq_inner(object)``
Return the object with all but the innermost layer of wrapping
removed.
``aq_parent(object)``
Return the acquisition parent of the object or None if the object
is unwrapped.
``aq_self(object)``
Return the object with one layer of wrapping removed, unless the
object is unwrapped, in which case the object is returned.
In most cases it is more convenient to use these module functions
instead of the acquisition attributes and methods directly.
Acquisition and Methods
-----------------------
Python methods of objects that support acquisition can use acquired
attributes. When a Python method is called on an object that is
wrapped by an acquisition wrapper, the wrapper is passed to the method
as the first argument. This rule also applies to user-defined method
types and to C methods defined in pure mix-in classes.
Unfortunately, C methods defined in extension base classes that define
their own data structures, cannot use aquired attributes at this
time. This is because wrapper objects do not conform to the data
structures expected by these methods. In practice, you will seldom
find this a problem.
Conclusion
----------
Acquisition provides a powerful way to dynamically share information
between objects. Zope 2 uses acquisition for a number of its key
features including security, object publishing, and DTML variable
lookup. Acquisition also provides an elegant solution to the problem
of circular references for many classes of problems. While acquisition
is powerful, you should take care when using acquisition in your
applications. The details can get complex, especially with the
differences between acquiring from context and acquiring from
containment.
Changelog
=========
4.4.4 (2017-11-24)
------------------
- add Appveyor configuration to automate building Windows eggs
4.4.3 (2017-11-23)
------------------
- Fix the extremely rare potential for a crash when the C extensions
are in use. See `issue 21 <https://github.com/zopefoundation/Acquisition/issues/21>`_.
4.4.2 (2017-05-12)
------------------
- Fixed C capsule name to fix import errors.
- Ensure our dependencies match our expactations about C extensions.
4.4.1 (2017-05-04)
------------------
- Fix C code under Python 3.4, with missing Py_XSETREF.
4.4.0 (2017-05-04)
------------------
- Enable the C extension under Python 3.
- Drop support for Python 3.3.
4.3.0 (2017-01-20)
------------------
- Make tests compatible with ExtensionClass 4.2.0.
- Drop support for Python 2.6 and 3.2.
- Add support for Python 3.5 and 3.6.
4.2.2 (2015-05-19)
------------------
- Make the pure-Python Acquirer objects cooperatively use the
superclass ``__getattribute__`` method, like the C implementation.
See https://github.com/zopefoundation/Acquisition/issues/7.
- The pure-Python implicit acquisition wrapper allows wrapped objects
to use ``object.__getattribute__(self, name)``. This differs from
the C implementation, but is important for compatibility with the
pure-Python versions of libraries like ``persistent``. See
https://github.com/zopefoundation/Acquisition/issues/9.
4.2.1 (2015-04-23)
------------------
- Correct several dangling pointer uses in the C extension,
potentially fixing a few interpreter crashes. See
https://github.com/zopefoundation/Acquisition/issues/5.
4.2 (2015-04-04)
----------------
- Add support for PyPy, PyPy3, and Python 3.2, 3.3, and 3.4.
4.1 (2014-12-18)
----------------
- Bump dependency on ``ExtensionClass`` to match current release.
4.0.3 (2014-11-02)
------------------
- Skip readme.rst tests when tests are run outside a source checkout.
4.0.2 (2014-11-02)
------------------
- Include ``*.rst`` files in the release.
4.0.1 (2014-10-30)
------------------
- Tolerate Unicode attribute names (ASCII only). LP #143358.
- Make module-level ``aq_acquire`` API respect the ``default`` parameter.
LP #1387363.
- Don't raise an attribute error for ``__iter__`` if the fallback to
``__getitem__`` succeeds. LP #1155760.
4.0 (2013-02-24)
----------------
- Added trove classifiers to project metadata.
4.0a1 (2011-12-13)
------------------
- Raise `RuntimeError: Recursion detected in acquisition wrapper` if an object
with a `__parent__` pointer points to a wrapper that in turn points to the
original object.
- Prevent wrappers to be created while accessing `__parent__` on types derived
from Explicit or Implicit base classes.
2.13.9 (2015-02-17)
-------------------
- Tolerate Unicode attribute names (ASCII only). LP #143358.
- Make module-level ``aq_acquire`` API respect the ``default`` parameter.
LP #1387363.
- Don't raise an attribute error for ``__iter__`` if the fallback to
``__getitem__`` succeeds. LP #1155760.
2.13.8 (2011-06-11)
-------------------
- Fixed a segfault on 64bit platforms when providing the `explicit` argument to
the aq_acquire method of an Acquisition wrapper. Thx to LP #675064 for the
hint to the solution. The code passed an int instead of a pointer into a
function.
2.13.7 (2011-03-02)
-------------------
- Fixed bug: When an object did not implement ``__unicode__``, calling
``unicode(wrapped)`` was calling ``__str__`` with an unwrapped ``self``.
2.13.6 (2011-02-19)
-------------------
- Add ``aq_explicit`` to ``IAcquisitionWrapper``.
- Fixed bug: ``unicode(wrapped)`` was not calling a ``__unicode__``
method on wrapped objects.
2.13.5 (2010-09-29)
-------------------
- Fixed unit tests that failed on 64bit Python on Windows machines.
2.13.4 (2010-08-31)
-------------------
- LP 623665: Fixed typo in Acquisition.h.
2.13.3 (2010-04-19)
-------------------
- Use the doctest module from the standard library and no longer depend on
zope.testing.
2.13.2 (2010-04-04)
-------------------
- Give both wrapper classes a ``__getnewargs__`` method, which causes the ZODB
optimization to fail and create persistent references using the ``_p_oid``
alone. This happens to be the persistent oid of the wrapped object. This lets
these objects to be persisted correctly, even though they are passed to the
ZODB in a wrapped state.
- Added failing tests for http://dev.plone.org/plone/ticket/10318. This shows
an edge-case where AQ wrappers can be pickled using the specific combination
of cPickle, pickle protocol one and a custom Pickler class with an
``inst_persistent_id`` hook. Unfortunately this is the exact combination used
by ZODB3.
2.13.1 (2010-02-23)
-------------------
- Update to include ExtensionClass 2.13.0.
- Fix the ``tp_name`` of the ImplicitAcquisitionWrapper and
ExplicitAcquisitionWrapper to match their Python visible names and thus have
a correct ``__name__``.
- Expand the ``tp_name`` of our extension types to hold the fully qualified
name. This ensures classes have their ``__module__`` set correctly.
2.13.0 (2010-02-14)
-------------------
- Added support for method cache in Acquisition. Patch contributed by
Yoshinori K. Okuji. See https://bugs.launchpad.net/zope2/+bug/486182.
2.12.4 (2009-10-29)
-------------------
- Fix iteration proxying to pass `self` acquisition-wrapped into both
`__iter__` as well as `__getitem__` (this fixes
https://bugs.launchpad.net/zope2/+bug/360761).
- Add tests for the __getslice__ proxying, including open-ended slicing.
2.12.3 (2009-08-08)
-------------------
- More 64-bit fixes in Py_BuildValue calls.
- More 64-bit issues fixed: Use correct integer size for slice operations.
2.12.2 (2009-08-02)
-------------------
- Fixed 64-bit compatibility issues for Python 2.5.x / 2.6.x. See
http://www.python.org/dev/peps/pep-0353/ for details.
2.12.1 (2009-04-15)
-------------------
- Update for iteration proxying: The proxy for `__iter__` must not rely on the
object to have an `__iter__` itself, but also support fall-back iteration via
`__getitem__` (this fixes https://bugs.launchpad.net/zope2/+bug/360761).
2.12 (2009-01-25)
-----------------
- Release as separate package.
| PypiClean |
/Circos-1.3.5-py3-none-any.whl/circos/circos.py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.path import Path
class CircosPlot(object):
def __init__(self, nodes, edges, radius,
nodecolor=None, edgecolor=None,
nodeprops=None, edgeprops=None,
figsize=(8, 8), ax=None, fig=None):
self.nodes = nodes # list of nodes
self.edges = edges # list of edge tuples
# Make sure props are dictionaries if passed in
# Node props
if nodeprops is not None:
if isinstance(nodeprops, dict):
self.nodeprops = nodeprops
else:
raise TypeError("nodeprops must be a dictionary")
else:
self.nodeprops = {}
# Edge props
if edgeprops is not None:
if isinstance(edgeprops, dict):
self.edgeprops = edgeprops
else:
raise TypeError("edgeprops must be a dictionary")
else:
self.edgeprops = {}
# Set colors. Priority: nodecolor > nodeprops > default
# Node color
if nodecolor is not None:
self.nodecolor = nodecolor
elif nodeprops:
try:
self.nodecolor = nodeprops.pop('facecolor')
except KeyError:
self.nodecolor = 'blue'
else:
self.nodecolor = 'blue'
# Edge color
if edgecolor is not None:
self.edgecolor = edgecolor
elif edgeprops:
try:
self.edgecolor = edgeprops.pop('edgecolor')
except KeyError:
self.edgecolor = 'black'
else:
self.edgecolor = 'black'
self.radius = radius
if fig is None:
self.fig = plt.figure(figsize=figsize)
else:
self.fig = fig
if ax is None:
self.ax = self.fig.add_subplot(111)
else:
self.ax = ax
self.node_radius = self.radius*0.05
self.ax.set_xlim(-radius*1.05, radius*1.05)
self.ax.set_ylim(-radius*1.05, radius*1.05)
self.ax.xaxis.set_visible(False)
self.ax.yaxis.set_visible(False)
for k in self.ax.spines.keys():
self.ax.spines[k].set_visible(False)
def draw(self):
self.add_nodes()
self.add_edges()
def add_nodes(self):
"""
Draws nodes onto the canvas with colours.
"""
r = self.radius
node_r = self.node_radius
# if 'color' in self.nodeprops:
# self.nodeprops.pop('color')
if 'facecolor' in self.nodeprops:
self.nodeprops.pop('facecolor')
# Check if self.nodecolor is a string. If so, this color gets applied
# to all nodes.
if isinstance(self.nodecolor, str):
nodes_and_colors = zip(self.nodes,
[self.nodecolor] * len(self.nodes))
# Check if nodecolor is an iterable. If so and same length as nodes.
# This applies each matched color to that node.
elif hasattr(self.nodecolor, '__iter__') and \
(len(self.nodes) == len(self.nodecolor)):
nodes_and_colors = zip(self.nodes, self.nodecolor)
# Throw error if above two conditions are not met.
else:
raise TypeError("""nodecolor must be a string or iterable of the
same length as nodes.""")
# Draw the nodes to screen.
for node, color in nodes_and_colors:
theta = self.node_theta(node)
x, y = get_cartesian(r, theta)
self.nodeprops['facecolor'] = color
node_patch = patches.Ellipse((x, y), node_r, node_r,
lw=0, **self.nodeprops)
self.ax.add_patch(node_patch)
# def draw_edge(self, node1, node2):
# start_theta = self.node_theta(node1)
# end_theta = self.node_theta(node2)
# # middle_theta = (start_theta + end_theta)/2.0
# # delta_theta = abs(end_theta - start_theta)
# # middle_r = self.radius * (1 - delta_theta / np.pi)
# # verts = [get_cartesian(self.radius, start_theta),
# # get_cartesian(middle_theta, middle_r),
# # get_cartesian(self.radius,end_theta)]
# verts = [get_cartesian(self.radius, start_theta),
# (0, 0),
# get_cartesian(self.radius, end_theta)]
# codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
# path = Path(verts, codes)
# self.edgeprops['facecolor'] = 'none'
# self.edgeprops['edgecolor'] = self.edgecolor
# patch = patches.PathPatch(path, lw=1, **self.edgeprops)
# self.ax.add_patch(patch)
def node_theta(self, node):
"""
Maps node to Angle.
"""
i = self.nodes.index(node)
theta = i*2*np.pi/len(self.nodes)
return theta
def add_edges(self):
"""
Draws edges to screen.
"""
for start, end in self.edges:
start_theta = self.node_theta(start)
end_theta = self.node_theta(end)
verts = [get_cartesian(self.radius, start_theta),
(0, 0),
get_cartesian(self.radius, end_theta)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
path = Path(verts, codes)
self.edgeprops['facecolor'] = 'none'
self.edgeprops['edgecolor'] = self.edgecolor
patch = patches.PathPatch(path, lw=1, **self.edgeprops)
self.ax.add_patch(patch)
def get_cartesian(r, theta):
x = r*np.sin(theta)
y = r*np.cos(theta)
return x, y | PypiClean |
/CryptoWallets-0.0.1.tar.gz/CryptoWallets-0.0.1/bitcoin/gen_addr.py |
from ecdsa import SigningKey, SECP256k1
from binascii import hexlify
import bitcoin.constants as BCONST
from bitcoin.constants import NETWORK_TYPES
from bitcoin.hashes import hash160, hash256
from bitcoin.base58 import base58_to_base256, base58check
def get_compressed_pub_key(pub_key_raw):
'''Represent public key in the compressed format'''
assert len(pub_key_raw) == 64
p_x = pub_key_raw[:32]
p_y = pub_key_raw[32:]
p_y_num = int(hexlify(p_y), 16)
prefix = b'\x03' if p_y_num % 2 else b'\x02'
compressed_pub_key = prefix + p_x
assert len(compressed_pub_key) == 33
return compressed_pub_key
def get_uncompressed_pub_key(pub_key_raw):
'''Represent public key in full/uncompressed format'''
# (65 bytes, 1 byte 0x04, 32 bytes corresponding to X coordinate,
# 32 bytes corresponding to Y coordinate)
assert len(pub_key_raw) == 64
full_pub_key = b'\x04' + pub_key_raw
assert len(full_pub_key) == 65
return full_pub_key
PUB_KEY_FORMATS = {
BCONST.COMPRESSED: get_compressed_pub_key,
BCONST.UNCOMPRESSED: get_uncompressed_pub_key,
}
def guess_wif_details(priv_key_wif):
'''Deduce details of WIF private key'''
# https://en.bitcoin.it/wiki/List_of_address_prefixes
if priv_key_wif[0] == '5':
return {'network_type': BCONST.MAINNET, 'key_fmt': BCONST.UNCOMPRESSED}
elif priv_key_wif[0] in ['K', 'L']:
return {'network_type': BCONST.MAINNET, 'key_fmt': BCONST.COMPRESSED}
elif priv_key_wif[0] == '9':
return {'network_type': BCONST.TESTNET, 'key_fmt': BCONST.UNCOMPRESSED}
elif priv_key_wif[0] == 'c':
return {'network_type': BCONST.TESTNET, 'key_fmt': BCONST.COMPRESSED}
else:
raise Exception('Unhandled WIF format')
def pub_key_from_priv_key_hex(priv_key_hex):
'''Obtain public key from the private key in HEX'''
secexp = int(priv_key_hex, 16)
signing_key = SigningKey.from_secret_exponent(secexp, curve=SECP256k1)
vk = signing_key.get_verifying_key()
return vk.to_string()
def bitcoin_addr_from_priv_key_hex(priv_key_hex, network_type, key_fmt):
'''Create Bitcoin address from the private key in HEX'''
# Generate ECDSA public key from the private key
pub_key_raw = pub_key_from_priv_key_hex(priv_key_hex)
pub_key_formatted = PUB_KEY_FORMATS[key_fmt](pub_key_raw)
return bitcoin_addr_from_pub_key(pub_key_formatted, network_type)
def priv_key_from_wif(priv_key_wif):
'''Extract raw private key from the WIF string'''
wif_details = guess_wif_details(priv_key_wif)
network_type = wif_details['network_type']
key_fmt = wif_details['key_fmt']
decoded_wif = base58_to_base256(priv_key_wif)
# Verify the WIF string
if key_fmt == BCONST.COMPRESSED:
assert len(decoded_wif) == 38
network_prefix, priv_key_raw = decoded_wif[0:1], decoded_wif[1:33]
padding, checksum = decoded_wif[33:34], decoded_wif[34:]
payload = decoded_wif[:34]
assert padding == b'\x01'
elif key_fmt == BCONST.UNCOMPRESSED:
assert len(decoded_wif) == 37
payload = decoded_wif[:33]
checksum = decoded_wif[33:]
network_prefix, priv_key_raw = payload[:1], payload[1:]
else:
raise Exception('Invalid key format: %s' % key_fmt)
assert network_prefix == NETWORK_TYPES[network_type][BCONST.PRIVKEY]
assert hash256(payload)[:4] == checksum
return (priv_key_raw, network_type, key_fmt)
def pub_key_from_priv_key_wif(priv_key_wif):
'''Obtain public key from the private key in WIF'''
# Obtain the raw public key from raw private key
priv_key_raw, network_type, key_fmt = priv_key_from_wif(priv_key_wif)
signing_key = SigningKey.from_string(priv_key_raw, curve=SECP256k1)
vk = signing_key.get_verifying_key()
pub_key_raw = vk.to_string()
return (pub_key_raw, network_type, key_fmt)
def bitcoin_addr_from_priv_key_wif(priv_key_wif):
'''Create Bitcoin address from the private key'''
# Generate ECDSA public key from the private key
pub_key_raw, network_type, key_fmt = pub_key_from_priv_key_wif(
priv_key_wif)
pub_key_formatted = PUB_KEY_FORMATS[key_fmt](pub_key_raw)
return bitcoin_addr_from_pub_key(pub_key_formatted, network_type)
def bitcoin_addr_from_pub_key(pub_key_formatted, network_type):
'''Create Bitcoin address from the public key'''
# Perform SHA-256 hashing on the public key
# Perform RIPEMD-160 hashing on the result of SHA-256
vk_hash160 = hash160(pub_key_formatted)
version = NETWORK_TYPES[network_type][BCONST.PUBKEY]
# Use Base58Check to obtain address
bitcoin_addr = base58check(version, vk_hash160)
return bitcoin_addr
def verify_bitcoin_addr(bitcoin_addr):
'''Verify Bitcoin address'''
bitcoin_addr_raw = base58_to_base256(bitcoin_addr)
assert len(bitcoin_addr_raw) == 25
fmt_pubkey_hash = bitcoin_addr_raw[:21]
checksum_4bytes = bitcoin_addr_raw[21:]
# Verify that the double SHA-256 has the same prefix as checksum_4bytes
full_checksum = hash256(fmt_pubkey_hash)
return checksum_4bytes == full_checksum[:4] | PypiClean |
/ClearWrap-0.2.tar.gz/ClearWrap-0.2/src/clearwrap/clearparser.py | from os import path
import glob
import subprocess
import tempfile
from datetime import datetime
from codecs import open
from collections import namedtuple
import shutil
import os
import warnings
import sys
import networkx as nx
CLEAR_JAVA_EXTRA_ARGS = ['-XX:+UseConcMarkSweepGC', '-Xmx6g']
ENCODING = 'UTF-8'
POS_SUFF = '.postag'
PARSE_SUFF = '.parse'
TOK_SUFF = '.pos-retag' # for pre-tokenised files
TIKZ_DEP_TEMPLATE = u"""
\\centering
\\begin{dependency}[edge unit distance = %0.2fex]
\\begin{deptext}
%s \\end{deptext}
%s
\\end{dependency}
"""
class ClearWrapper(object):
"""Wrapper for clearparser"""
def __init__(self, parsing_config, pos_tagging_config, dep_model, clearparser_base):
"""Initialize wrapper for a ClearParser instance with the supplied
configuration files. Separate files must be supplied for parsing and
POS-tagging, while the explict `dep_model` is used for parsing. `clearparser_base`
should point to the base of the clearparser installation."""
self.parsing_config = parsing_config
self.pos_tagging_config = pos_tagging_config
self.dep_model = dep_model
self.clearparser_base = clearparser_base
def parse_from_directory(self, directory):
"""Parse all the files in the given directory"""
pos_tagged_dir = directory + '-postagged'
os.mkdir(pos_tagged_dir)
post_pos_tagged_dir = directory + '-postagged-post'
os.mkdir(post_pos_tagged_dir)
parsed_dir = directory + '-parsed'
os.mkdir(parsed_dir)
post_parsed_dir = directory + '-parsed-post'
os.mkdir(post_parsed_dir)
self._pos_tag_directory(directory, pos_tagged_dir)
for fn in glob.glob(path.join(pos_tagged_dir, '*' + POS_SUFF)):
base = path.basename(fn)
self.postprocess_pos_tags(fn, path.join(post_pos_tagged_dir, base + '-mod'))
self._parse_directory(post_pos_tagged_dir, parsed_dir)
for fn in glob.glob(path.join(parsed_dir, '*' + PARSE_SUFF)):
base = path.basename(fn)
self.postprocess_parsed(fn, path.join(post_parsed_dir, base + '-mod'))
self._normalize_parsed_filenames(directory, parsed_dir)
def _normalize_parsed_filenames(self, directory, parsed_dir):
"""Normalize from the parsed directories so we get the same
file structure we would get directly from Clearparser"""
for fstem in os.listdir(directory):
parse_matches = glob.glob(path.join(parsed_dir, fstem + '*' + PARSE_SUFF))
assert len(parse_matches) == 1, "Invalid number of parse files found"
parse_match = parse_matches[0]
if fstem.endswith(POS_SUFF):
fstem = fstem[:-len(POS_SUFF)]
elif fstem.endswith(TOK_SUFF):
fstem = fstem[:-len(TOK_SUFF)]
os.rename(parse_match, path.join(directory, fstem + PARSE_SUFF))
def parse_in_bulk(self, sentences, to_graph=False):
sentences = self.preprocess_raw(sentences)
pos_tagged_fname = self._pos_tag_sentence_list(sentences)
modified_pos_tagged_fname = pos_tagged_fname + '-mod'
self.postprocess_pos_tags(pos_tagged_fname, modified_pos_tagged_fname)
parsed_fname = self._parse_single_file(modified_pos_tagged_fname)
modified_parsed_fname = parsed_fname + '-mod'
self.postprocess_parsed(parsed_fname, modified_parsed_fname)
raw_outputs = list(self._raw_parser_output_by_sentence(modified_parsed_fname))
if to_graph:
return [DependencyGraph.from_clear_parse(raw) for raw in raw_outputs]
else:
return raw_outputs
def _raw_parser_output_by_sentence(self, parsed_fname):
with open(parsed_fname, encoding=ENCODING) as f:
contents = f.read()
for raw in contents.rstrip(u'\n').split(u'\n\n'):
yield raw
def _pos_tag_directory(self, directory, pos_tagged_directory):
holding_dir_for_pretagged = pos_tagged_directory + '-already'
os.mkdir(holding_dir_for_pretagged)
# first put aside any files which are explitcly pre-tagged (detect from suffix)
# so they don't get double-tagged
self._move_by_suffix(directory, holding_dir_for_pretagged, POS_SUFF)
# do POS-tagging of new files:
self._do_pos_tagging(directory)
# put them into the dir for POS-tagged files
self._move_by_suffix(directory, pos_tagged_directory, POS_SUFF)
# then handle the pre-tagged files - copy into the original dir
self._copy_by_suffix(holding_dir_for_pretagged, directory, POS_SUFF)
self._move_by_suffix(holding_dir_for_pretagged, pos_tagged_directory, POS_SUFF)
os.rmdir(holding_dir_for_pretagged)
def _pos_tag_sentence_list(self, source_sentences):
raw_fname = self._get_temp_name('raw')
with open(raw_fname, 'w', encoding=ENCODING) as f:
for sent in source_sentences:
f.write(sent + u'\n')
pos_fname = self._get_temp_name('pos')
self._do_pos_tagging(raw_fname, pos_fname)
return pos_fname
def _parse_single_file(self, pos_tagged_fname):
parsed_fname = self._get_temp_name('parsed')
self._do_parsing(pos_tagged_fname, parsed_fname)
return parsed_fname
def _parse_directory(self, directory, parsed_directory):
self._do_parsing(directory)
self._move_by_suffix(directory, parsed_directory, PARSE_SUFF)
def _do_pos_tagging(self, raw_fname, pos_fname=None):
args = self._get_pos_tagging_args(raw_fname, pos_fname)
subprocess.check_call(args)
def _do_parsing(self, pos_tagged_fname, parsed_fname=None):
args = self._get_parsing_args(pos_tagged_fname, parsed_fname)
subprocess.check_call(args)
def preprocess_raw(self, sentences):
return sentences
def postprocess_pos_tags(self, pos_tagged_fname, postprocessed_fname):
shutil.copy(pos_tagged_fname, postprocessed_fname)
def postprocess_parsed(self, parsed_fname, postprocessed_fname):
shutil.copy(parsed_fname, postprocessed_fname)
def _get_classpath(self):
jar_path_glob = path.join(self.clearparser_base, 'lib', '*.jar')
return ':'.join(glob.glob(jar_path_glob))
def _get_required_env(self):
return {'CLASSPATH': self._get_classpath()}
def _get_temp_name(self, kind):
tempdir = tempfile.gettempdir()
fstem = 'clearparser.%s.%s' % (
datetime.now().strftime('%Y%m%d%H%M%S'),
kind)
return path.join(tempdir, fstem)
def _get_base_args(self):
return ['java', '-cp', self._get_classpath()] + CLEAR_JAVA_EXTRA_ARGS
def _get_pos_tagging_args(self, raw_fname, pos_fname=None):
"""raw_fname can also be a directory, in which case we don't need pos_fname"""
return self._get_base_args() + [
'clear.engine.PosPredict',
'-c', self.pos_tagging_config,
'-i', raw_fname, '-o', pos_fname if pos_fname else 'XXX']
def _get_parsing_args(self, pos_fname, parsed_fname):
return self._get_base_args() + [
'clear.engine.DepPredict',
'-c', self.parsing_config,
'-m', self.dep_model,
'-i', pos_fname, '-o', parsed_fname if parsed_fname else 'XXX']
def _fnames_with_suffix(self, directory, suff):
fnames = glob.glob(path.join(directory, '*' + suff))
if not fnames:
warnings.warn("No files with suffix '%s' found in '%s'" % (suff, directory))
return fnames
def _move_by_suffix(self, orig_dir, new_dir, target_suff):
for fname in self._fnames_with_suffix(orig_dir, target_suff):
shutil.move(fname, new_dir)
def _copy_by_suffix(self, orig_dir, new_dir, target_suff):
for fname in self._fnames_with_suffix(orig_dir, target_suff):
shutil.copy2(fname, new_dir)
class DepGraphError(Exception):
pass
class DepGraphIntegrityError(DepGraphError):
pass
class MissingDepGraphNodeError(DepGraphError):
pass
class DependencyGraph(object):
"""A utility class to handle ClearParser dependency graphs
and do some very basic graph operations with them.
"""
def __init__(self, non_root_nodes, links, raw_parser_output=None, graph_label=''):
self.root_node = RootNode()
self.nodes = [self.root_node] + non_root_nodes
self.links = links
self.nodes_by_id = dict((node.id, node) for node in self.nodes)
self.raw_parser_output = raw_parser_output
self.graph_label = graph_label
self._as_networkx = None
self._as_pgv = None
def top_content_nodes(self):
root = self.root_node
root_predecs = self.predecessors_for_label(root, u'ROOT')
if len(self.as_networkx().predecessors(root)) != len(root_predecs):
raise DepGraphIntegrityError(
"Found a non-ROOT link pointing to the root node in graph %r " %
self.graph_label)
return root_predecs
def predecessors_for_label(self, node, label):
as_nx = self.as_networkx()
predecs = []
for predec_node in as_nx.predecessors(node):
if label is None or as_nx.adj[predec_node][node]['reln'] == label:
predecs.append(predec_node)
return predecs
def predecessors(self, node):
return self.predecessors_for_label(node, None)
def as_networkx(self):
if not self._as_networkx:
self._as_networkx = self._get_networkx()
return self._as_networkx
def as_pgv(self):
if not self._as_pgv:
self._as_pgv = self._get_pgv()
return self._as_pgv
def _get_networkx(self):
graph = nx.DiGraph()
for node in self.nodes:
graph.add_node(node)
for link in self.links:
graph.add_edge(
self.nodes_by_id[link.from_id], self.nodes_by_id[link.to_id],
reln=link.label)
return graph
def _get_pgv(self):
import pygraphviz as pgv
graph = pgv.AGraph(directed=True)
nodes_for_pgv = dict(
(node.id, u'%s:%s' % (node.id, self.node_brief_repr(node)))
for node in self.nodes)
for node in self.nodes:
graph.add_node(nodes_for_pgv[node.id])
for link in self.links:
graph.add_edge(
nodes_for_pgv[link.from_id], nodes_for_pgv[link.to_id],
label=link.label)
graph.graph_attr['rankdir'] = 'BT'
return graph
def node_brief_repr(self, node):
return node_form_pos(node) if isinstance(node, DepNode) else str(self.graph_label)
def as_tikz_dep_latex(self, std_direction=True, explicit_zero_node=False,
edge_unit_distance=3.0):
dep_text = []
if explicit_zero_node:
nodes = self.nodes
else:
nodes = self.nodes[1:]
nids = [n.id for n in nodes]
forms = [getattr(n, 'form', '') for n in nodes]
lemmas = [getattr(n, 'lemma', '') for n in nodes]
postags = [getattr(n, 'pos', '') for n in nodes]
dt_indent = ' ' * 6
for dt_elem_list in (nids, forms, lemmas, postags):
dt_elem_list = [e.replace('&', '$\with$') for e in dt_elem_list]
dep_text.append(
dt_indent + u' \\& '.join(dt_elem_list) + u'\\\\ \n')
nids_to_indexes = dict((n.id, idx + 1) for idx, n in enumerate(nodes))
edges = []
edge_indent = ' ' * 4
if not explicit_zero_node:
for node in self.top_content_nodes():
edges.append(u'\\deproot{%d}{ROOT}\n' % nids_to_indexes[node.id])
for link in self.links:
if link.label == u'ROOT' and not explicit_zero_node:
continue
from_idx = nids_to_indexes[link.from_id]
to_idx = nids_to_indexes[link.to_id]
if std_direction:
from_idx, to_idx = to_idx, from_idx
dist = abs(from_idx - to_idx)
if dist > 8:
unit_dist = 0.5 * edge_unit_distance + \
(4.0 * edge_unit_distance) / dist
scaling = '[edge unit distance = %0.3fex]' % unit_dist
else:
scaling = ''
edges.append(u'%s\\depedge%s{%d}{%d}{%s}\n' %
(edge_indent, scaling, from_idx, to_idx, link.label))
return TIKZ_DEP_TEMPLATE % (
edge_unit_distance, u''.join(dep_text), u''.join(edges))
def draw(self):
nxgraph = self.as_networkx()
node_labels = {}
for node in self.nodes:
node_labels[node] = self.node_brief_repr(node)
layout = nx.spring_layout(nxgraph)
nx.draw_networkx(nxgraph, pos=layout, labels=node_labels)
nx.draw_networkx_edge_labels(nxgraph, pos=layout)
@classmethod
def from_clear_parse(cls, raw_parse_data, graph_label=''):
nodes = []
links = []
for node_data in raw_parse_data.split(u'\n'):
if not node_data:
continue
comps = node_data.split(u'\t')
nid, form, lemma, pos, _, link_to_nid, link_label = comps
node = DepNode(nid, form, lemma, pos)
nodes.append(node)
if link_to_nid:
link = DepLink(nid, link_to_nid, link_label)
links.append(link)
return cls(nodes, links, raw_parse_data, graph_label)
@classmethod
def from_clear_parse_file(cls, parse_filename, graph_label=''):
with open(parse_filename, encoding=ENCODING) as f:
raw_parse = f.read()
return cls.from_clear_parse(
raw_parse, graph_label if graph_label else parse_filename)
@classmethod
def multi_from_clear_parse_file(cls, parse_filename, graph_label=''):
with open(parse_filename, encoding=ENCODING) as f:
raw_parses = f.read()
for raw_parse in raw_parses.split(u'\n\n'):
yield cls.from_clear_parse(
raw_parse, graph_label if graph_label else parse_filename)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.nodes, self.links)
class RootNode(object):
@property
def id(self):
return '0'
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
return '%s()' % self.__class__.__name__
DepNode = namedtuple('DepNode', ['id', 'form', 'lemma', 'pos'])
DepLink = namedtuple('DepLink', ['from_id', 'to_id', 'label'])
def node_form_pos(node):
return u'%s/%s' % (node.form, node.pos)
def node_flp(node):
return u'%s[%s]/%s' % (node.form, node.lemma, node.pos)
def draw_stored_parse_with_pgv(parse_filename, format='pdf', output_filename=''):
dep_graph = DependencyGraph.from_clear_parse_file(parse_filename)
graph_as_pgv = dep_graph.as_pgv()
graph_as_pgv.layout('dot')
if not output_filename:
output_filename = "%s.%s" % (parse_filename, format)
graph_as_pgv.draw(output_filename) | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/electrum_chi/electrum/gui/kivy/uix/gridview.py | from kivy.uix.boxlayout import BoxLayout
from kivy.adapters.dictadapter import DictAdapter
from kivy.adapters.listadapter import ListAdapter
from kivy.properties import ObjectProperty, ListProperty, AliasProperty
from kivy.uix.listview import (ListItemButton, ListItemLabel, CompositeListItem,
ListView)
from kivy.lang import Builder
from kivy.metrics import dp, sp
Builder.load_string('''
<GridView>
header_view: header_view
content_view: content_view
BoxLayout:
orientation: 'vertical'
padding: '0dp', '2dp'
BoxLayout:
id: header_box
orientation: 'vertical'
size_hint: 1, None
height: '30dp'
ListView:
id: header_view
BoxLayout:
id: content_box
orientation: 'vertical'
ListView:
id: content_view
<-HorizVertGrid>
header_view: header_view
content_view: content_view
ScrollView:
id: scrl
do_scroll_y: False
RelativeLayout:
size_hint_x: None
width: max(scrl.width, dp(sum(root.widths)))
BoxLayout:
orientation: 'vertical'
padding: '0dp', '2dp'
BoxLayout:
id: header_box
orientation: 'vertical'
size_hint: 1, None
height: '30dp'
ListView:
id: header_view
BoxLayout:
id: content_box
orientation: 'vertical'
ListView:
id: content_view
''')
class GridView(BoxLayout):
"""Workaround solution for grid view by using 2 list view.
Sometimes the height of lines is shown properly."""
def _get_hd_adpt(self):
return self.ids.header_view.adapter
header_adapter = AliasProperty(_get_hd_adpt, None)
'''
'''
def _get_cnt_adpt(self):
return self.ids.content_view.adapter
content_adapter = AliasProperty(_get_cnt_adpt, None)
'''
'''
headers = ListProperty([])
'''
'''
widths = ListProperty([])
'''
'''
data = ListProperty([])
'''
'''
getter = ObjectProperty(lambda item, i: item[i])
'''
'''
on_context_menu = ObjectProperty(None)
def __init__(self, **kwargs):
self._from_widths = False
super(GridView, self).__init__(**kwargs)
#self.on_headers(self, self.headers)
def on_widths(self, instance, value):
if not self.get_root_window():
return
self._from_widths = True
self.on_headers(instance, self.headers)
self._from_widths = False
def on_headers(self, instance, value):
if not self._from_widths:
return
if not (value and self.canvas and self.headers):
return
widths = self.widths
if len(self.widths) != len(value):
return
#if widths is not None:
# widths = ['%sdp' % i for i in widths]
def generic_args_converter(row_index,
item,
is_header=True,
getter=self.getter):
cls_dicts = []
_widths = self.widths
getter = self.getter
on_context_menu = self.on_context_menu
for i, header in enumerate(self.headers):
kwargs = {
'padding': ('2dp','2dp'),
'halign': 'center',
'valign': 'middle',
'size_hint_y': None,
'shorten': True,
'height': '30dp',
'text_size': (_widths[i], dp(30)),
'text': getter(item, i),
}
kwargs['font_size'] = '9sp'
if is_header:
kwargs['deselected_color'] = kwargs['selected_color'] =\
[0, 1, 1, 1]
else: # this is content
kwargs['deselected_color'] = 1, 1, 1, 1
if on_context_menu is not None:
kwargs['on_press'] = on_context_menu
if widths is not None: # set width manually
kwargs['size_hint_x'] = None
kwargs['width'] = widths[i]
cls_dicts.append({
'cls': ListItemButton,
'kwargs': kwargs,
})
return {
'id': item[-1],
'size_hint_y': None,
'height': '30dp',
'cls_dicts': cls_dicts,
}
def header_args_converter(row_index, item):
return generic_args_converter(row_index, item)
def content_args_converter(row_index, item):
return generic_args_converter(row_index, item, is_header=False)
self.ids.header_view.adapter = ListAdapter(data=[self.headers],
args_converter=header_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=CompositeListItem)
self.ids.content_view.adapter = ListAdapter(data=self.data,
args_converter=content_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=CompositeListItem)
self.content_adapter.bind_triggers_to_view(self.ids.content_view._trigger_reset_populate)
class HorizVertGrid(GridView):
pass
if __name__ == "__main__":
from kivy.app import App
class MainApp(App):
def build(self):
data = []
for i in range(90):
data.append((str(i), str(i)))
self.data = data
return Builder.load_string('''
BoxLayout:
orientation: 'vertical'
HorizVertGrid:
on_parent: if args[1]: self.content_adapter.data = app.data
headers:['Address', 'Previous output']
widths: [400, 500]
<Label>
font_size: '16sp'
''')
MainApp().run() | PypiClean |
/Helmholtz-0.2.0.tar.gz/Helmholtz-0.2.0/helmholtz/editor/shortcuts.py | from datetime import datetime
from django.db.models import Model
from helmholtz.editor.models import Entity
def get_schema(schema, child, record=None, key=None):
if not schema :
return None
_cls = schema.content_type.model_class()
new_schema = schema.get_child_entity(child, key)
if new_schema :
return new_schema.get_subclass_entity(record.__class__)
elif _cls == child :
return schema
elif issubclass(_cls, child) or issubclass(child, _cls) :
if not schema.parent :
return schema
else :
_parent = schema.parent.cast()
if isinstance(_parent, Entity) :
return get_schema(_parent, child)
else :
return schema
# return get_schema(schema.parent.cast(), child)
# return schema.get_parent_entity(child)
else :
return None
def get_constraint(schema):
return None if not schema or not schema.constraints.count() else schema.constraints.get().cast()
def get_displayed_value(record, notation):
"""Return human readable value."""
try :
obj_attr = getattr(record, notation)
except :
return None
#replace the value by get_FOO_display function
if isinstance(record, Model) :
display_attr = getattr(record, 'get_%s_display' % notation, None)
if display_attr :
obj_attr = display_attr
#replace the value by __unicode__ function
if isinstance(obj_attr, Model) :
obj_attr = obj_attr.__unicode__
_tmp = obj_attr() if callable(obj_attr) else obj_attr
return _tmp if not isinstance(_tmp, datetime) else datetime(_tmp.year, _tmp.month, _tmp.day, _tmp.hour, _tmp.minute, _tmp.second)
def get_value(record, notation):
_chain = notation.split('.')
_record = record
if len(_chain) > 1 :
for _attr in _chain[0:-1] :
try :
obj_attr = getattr(_record, _attr)
if callable(obj_attr) :
obj_attr = obj_attr()
_record = obj_attr
except :
return None
return get_displayed_value(_record, _chain[-1]) if _record else None | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/chardet/mbcssm.py |
from .enums import MachineState
# BIG5
BIG5_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
)
BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
BIG5_SM_MODEL = {'class_table': BIG5_CLS,
'class_factor': 5,
'state_table': BIG5_ST,
'char_len_table': BIG5_CHAR_LEN_TABLE,
'name': 'Big5'}
# CP949
CP949_CLS = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_ST = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
)
CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949_SM_MODEL = {'class_table': CP949_CLS,
'class_factor': 10,
'state_table': CP949_ST,
'char_len_table': CP949_CHAR_LEN_TABLE,
'name': 'CP949'}
# EUC-JP
EUCJP_CLS = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_ST = (
3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
)
EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
'class_factor': 6,
'state_table': EUCJP_ST,
'char_len_table': EUCJP_CHAR_LEN_TABLE,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_ST = (
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
)
EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
'class_factor': 4,
'state_table': EUCKR_ST,
'char_len_table': EUCKR_CHAR_LEN_TABLE,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_CLS = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_ST = (
MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
'class_factor': 7,
'state_table': EUCTW_ST,
'char_len_table': EUCTW_CHAR_LEN_TABLE,
'name': 'x-euc-tw'}
# GB2312
GB2312_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_ST = (
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validating
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
GB2312_SM_MODEL = {'class_table': GB2312_CLS,
'class_factor': 7,
'state_table': GB2312_ST,
'char_len_table': GB2312_CHAR_LEN_TABLE,
'name': 'GB2312'}
# Shift_JIS
SJIS_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
)
SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
SJIS_SM_MODEL = {'class_table': SJIS_CLS,
'class_factor': 6,
'state_table': SJIS_ST,
'char_len_table': SJIS_CHAR_LEN_TABLE,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_ST = (
5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17
6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27
5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
'class_factor': 6,
'state_table': UCS2BE_ST,
'char_len_table': UCS2BE_CHAR_LEN_TABLE,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_ST = (
6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27
5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
'class_factor': 6,
'state_table': UCS2LE_ST,
'char_len_table': UCS2LE_CHAR_LEN_TABLE,
'name': 'UTF-16LE'}
# UTF-8
UTF8_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_ST = (
MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
)
UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8_SM_MODEL = {'class_table': UTF8_CLS,
'class_factor': 16,
'state_table': UTF8_ST,
'char_len_table': UTF8_CHAR_LEN_TABLE,
'name': 'UTF-8'} | PypiClean |
/Dabo-0.9.16.tar.gz/Dabo-0.9.16/dabo/db/dbMsSQL.py | import datetime
from dabo.dLocalize import _
from dBackend import dBackend
from dabo.lib.utils import ustr
class MSSQL(dBackend):
"""Class providing Microsoft SQL Server connectivity. Uses pymssql."""
def __init__(self):
dBackend.__init__(self)
#- jfcs 11/06/06 first try getting Microsoft SQL 2000 server working
# MSSQL requires the installation of FreeTDS. FreeTDS can be downloaded from
# http://www.freetds.org/
self.dbModuleName = "pymssql"
self.useTransactions = True # this does not appear to be required
import pymssql
self.dbapi = pymssql
def getConnection(self, connectInfo, forceCreate=False, **kwargs):
"""
The pymssql module requires the connection be created for the FreeTDS libraries first.
Therefore, the DSN is really the name of the connection for FreeTDS :
__init__(self, dsn, user, passwd, database = None, strip = 0)
"""
port = ustr(connectInfo.Port)
if not port or port == "None":
port = 1433
host = "%s:%s" % (connectInfo.Host, port)
user = connectInfo.User
password = connectInfo.revealPW()
database = connectInfo.Database
# hack to make this work. I am sure there is a better way.
self.database = database
# Hack to make new driver working with non us-ascii encoding.
if "charset" not in kwargs and self.dbapi.__version__ >= "2.0.0":
kwargs["charset"] = self.Encoding
self._connection = self.dbapi.connect(host=host, user=user, password=password,
database=database, **kwargs)
return self._connection
def getDictCursorClass(self):
"""Since there are two versions of driver package we support both,
deprecated and new one.
"""
if self.dbapi.__version__ >= "2.0.0":
class ConCursor(self.dbapi.Cursor):
def __init__(self, *args, **kwargs):
# pymssql requires an additional param to be passed
# to its __init__() method
kwargs["as_dict"] = True
super(ConCursor, self).__init__(*args, **kwargs)
def fetchall(self):
# In dictionary mode both column numbers and names are used
# as keys. We need to filter them and leave name based keys only.
rows = super(ConCursor, self).fetchall()
for row in rows:
for key in range(len(row) / 2):
row.pop(key, None)
return rows
else:
class ConCursor(self.dbapi.pymssqlCursor):
def __init__(self, *args, **kwargs):
# pymssql requires an additional param to be passed
# to its __init__() method
kwargs["as_dict"] = True
super(ConCursor, self).__init__(*args, **kwargs)
if not hasattr(self.dbapi.pymssqlCursor, "connection"):
def _getconn(self):
return self._source
# pymssql doesn't supply this optional dbapi attribute, so create it here.
connection = property(_getconn, None, None)
return ConCursor
def escQuote(self, val):
# escape backslashes and single quotes, and
# wrap the result in single quotes
sl = "\\"
qt = "\'"
return qt + val.replace(sl, sl + sl).replace(qt, sl + qt) + qt
def formatDateTime(self, val):
"""We need to wrap the value in quotes."""
sqt = "'" # single quote
val = ustr(val)
return "%s%s%s" % (sqt, val, sqt)
def getTables(self, cursor, includeSystemTables=False):
# jfcs 11/01/06 assumed public schema
# cfk: this worries me: how does it know what db is being used?
# tempCursor.execute("select name from sysobjects where xtype = 'U' order by name")
dbName = self.database
sql = """
select table_schema + '.' + table_name AS table_name
from INFORMATION_SCHEMA.TABLES
where table_catalog = '%(db)s'
and table_type IN ('BASE TABLE', 'VIEW')
order by 1 """
cursor.execute(sql % {'db':dbName})
rs = cursor.getDataSet()
tables = [x["table_name"] for x in rs]
tables = tuple(tables)
return tables
def getTableRecordCount(self, tableName, cursor):
cursor.execute("select count(*) as ncount from '%(tablename)'" % tableName)
return cursor.getDataSet()[0]["ncount"]
def _fieldTypeNativeToDabo(self, nativeType):
"""
converts the results of
select DATA_TYPE from INFORMATION_SCHEMA.COLUMNS
to a dabo datatype.
"""
# todo: break out the dict into a constant defined somewhere
# todo: make a formal definition of the dabo datatypes.
# (at least document them)
try:
ret = {
"BINARY": "I",
"BIT": "I",
"BIGINT": "G",
"BLOB": "M",
"CHAR": "C",
"DATE": "D",
"DATETIME": "T",
"DATETIME2": "T",
"DECIMAL": "N",
"DOUBLE": "G", ## G maps to Long (INT), but this could be wrong if it is supposed to be a double float.
"ENUM": "C",
"FLOAT": "F",
"GEOMETRY": "?",
"INT": "I",
"IMAGE": "?",
"INTERVAL": "?",
"LONG": "G",
"LONGBLOB": "M",
"LONGTEXT": "M",
"MEDIUMBLOB": "M",
"MEDIUMINT": "I",
"MEDIUMTEXT": "M",
"MONEY": "F",
"NEWDATE": "?",
"NCHAR": "C",
"NTEXT": "M",
"NUMERIC": "N",
"NVARCHAR": "C",
"NULL": "?",
"SET": "?",
"SHORT": "I",
"SMALLINT": "I",
"STRING": "C",
"TEXT": "M",
"TIME": "?",
"TIMESTAMP": "T",
"TINY": "I",
"TINYINT": "I",
"TINYBLOB": "M",
"TINYTEXT": "M",
"UNIQUEIDENTIFIER": "?",
"VARBINARY": "I",
"VARCHAR": "C",
"VAR_STRING": "C",
"YEAR": "?"}[nativeType.upper()]
except KeyError:
print 'KeyError:', nativeType
ret = '?'
return ret
def getFields(self, tableName, cursor):
"""
Returns the list of fields of the passed table
field: ( fieldname, dabo data type, key )
"""
# fairly standard way of getting column settings
# this may be standard enough to put in the super class
dbName = self.database
tableNamespace = tableName.split(".")
if len(tableNamespace) > 1:
tableSchema = tableNamespace[-2]
tableName = tableNamespace[-1]
else:
tableSchema = "dbo"
if not tableName:
return ()
sql = """
select COLUMN_NAME,
DATA_TYPE
from INFORMATION_SCHEMA.COLUMNS
where table_catalog = %s and
table_schema = %s and
table_name = %s
order by ORDINAL_POSITION """
cursor.execute(sql, (dbName, tableSchema, tableName))
fieldDefs = cursor.getDataSet()
sql = """
select kc.COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kc
inner join INFORMATION_SCHEMA.TABLE_CONSTRAINTS as tc
on tc.CONSTRAINT_NAME = kc.CONSTRAINT_NAME
where kc.TABLE_CATALOG = %s and
kc.TABLE_SCHEMA = %s and
kc.TABLE_NAME = %s and
tc.CONSTRAINT_TYPE = 'PRIMARY KEY' """
cursor.execute(sql, (dbName, tableSchema, tableName))
pkFields = cursor.getDataSet()
fields = []
for r in fieldDefs:
name = r["COLUMN_NAME"]
ft = self._fieldTypeNativeToDabo(r["DATA_TYPE"])
pk = (name,) in [(p["COLUMN_NAME"],) for p in pkFields]
fields.append((name, ft, pk))
return tuple(fields)
def noResultsOnSave(self):
"""
Most backends will return a non-zero number if there are updates.
Some do not, so this will have to be customized in those cases.
"""
return
def noResultsOnDelete(self):
"""
Most backends will return a non-zero number if there are deletions.
Some do not, so this will have to be customized in those cases.
"""
#raise dException.dException(_("No records deleted"))
return
def flush(self, cursor):
self.commitTransaction(cursor)
def getLimitWord(self):
return "TOP"
def formSQL(self, fieldClause, fromClause, joinClause,
whereClause, groupByClause, orderByClause, limitClause):
"""MS SQL wants the limit clause before the field clause."""
clauses = (limitClause, fieldClause, fromClause, joinClause,
whereClause, groupByClause, orderByClause)
sql = "SELECT " + "\n".join([clause for clause in clauses if clause])
return sql
def getLastInsertID(self, cursor):
"""
Pymssql does not populate the 'lastrowid' attribute of the cursor, so we
need to get the newly-inserted PK ourselves.
"""
# Use the AuxCursor so as not to disturb the contents of the primary data cursor.
try:
idVal = cursor.lastrowid
except AttributeError:
crs = cursor.AuxCursor
crs.execute("select @@IDENTITY as newid")
idVal = crs.getFieldVal("newid")
# Some interface versions return PK constraint values as Decimal type
# what isn't well tolerated by Dabo.
if "Decimal" in str(type(idVal)):
idVal = int(idVal)
return idVal
def beginTransaction(self, cursor):
pass | PypiClean |
/Misago-0.36.1.tar.gz/Misago-0.36.1/misago/users/profilefields/__init__.py | import copy
import logging
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.utils.module_loading import import_string
from django.utils.translation import gettext as _
from ...conf import settings
from .basefields import *
from .serializers import serialize_profilefields_data
logger = logging.getLogger("misago.users.ProfileFields")
class ProfileFields:
def __init__(self, fields_groups):
self.is_loaded = False
self.fields_groups = fields_groups
self.fields_dict = {}
def load(self):
self.fields_dict = {}
fieldnames = {}
for group in self.fields_groups:
for field_path in group["fields"]:
field = import_string(field_path)
field._field_path = field_path
if field_path in self.fields_dict:
raise ValueError(
"%s profile field has been specified twice" % field._field_path
)
if not getattr(field, "fieldname", None):
raise ValueError(
"%s profile field has to specify fieldname attribute"
% field._field_path
)
if field.fieldname in fieldnames:
raise ValueError(
(
'%s profile field defines fieldname "%s" '
"that is already in use by the %s"
)
% (
field._field_path,
field.fieldname,
fieldnames[field.fieldname],
)
)
fieldnames[field.fieldname] = field_path
self.fields_dict[field_path] = field()
self.is_loaded = True
def get_fields(self):
if not self.is_loaded:
self.load()
return self.fields_dict.values()
def get_fields_groups(self):
if not self.is_loaded:
self.load()
groups = []
for group in self.fields_groups:
group_dict = {"name": _(group["name"]), "fields": []}
for field_path in group["fields"]:
field = self.fields_dict[field_path]
group_dict["fields"].append(field)
if group_dict["fields"]:
groups.append(group_dict)
return groups
def add_fields_to_form(self, request, user, form):
if not self.is_loaded:
self.load()
form._profile_fields = []
for field in self.get_fields():
if not field.is_editable(request, user):
continue
form._profile_fields.append(field.fieldname)
form.fields[field.fieldname] = field.get_form_field(request, user)
def add_fields_to_admin_form(self, request, user, form):
self.add_fields_to_form(request, user, form)
form._profile_fields_groups = []
for group in self.fields_groups:
group_dict = {"name": _(group["name"]), "fields": []}
for field_path in group["fields"]:
field = self.fields_dict[field_path]
if field.fieldname in form._profile_fields:
group_dict["fields"].append(field.fieldname)
if group_dict["fields"]:
form._profile_fields_groups.append(group_dict)
def clean_form(self, request, user, form, cleaned_data):
for field in self.get_fields():
if field.fieldname not in cleaned_data:
continue
try:
cleaned_data[field.fieldname] = field.clean(
request, user, cleaned_data[field.fieldname]
)
except ValidationError as e:
form.add_error(field.fieldname, e)
return cleaned_data
def update_user_profile_fields(self, request, user, form):
old_fields = copy.copy(user.profile_fields or {})
new_fields = {}
for fieldname in form._profile_fields:
if fieldname in form.cleaned_data:
new_fields[fieldname] = form.cleaned_data[fieldname]
user.profile_fields = new_fields
old_fields_reduced = {k: v for k, v in old_fields.items() if v}
new_fields_reduced = {k: v for k, v in new_fields.items() if v}
if old_fields_reduced != new_fields_reduced:
self.log_profile_fields_update(request, user)
def log_profile_fields_update(self, request, user):
if request.user == user:
log_message = "%s edited own profile fields" % user.username
else:
log_message = "%s edited %s's (#%s) profile fields" % (
request.user,
user.username,
user.pk,
)
logger.info(
log_message,
extra={
"absolute_url": request.build_absolute_uri(
reverse(
"misago:user-details", kwargs={"slug": user.slug, "pk": user.pk}
)
)
},
)
def search_users(self, criteria, queryset):
if not self.is_loaded:
self.load()
q_obj = None
for field in self.fields_dict.values():
q = field.search_users(criteria)
if q:
if q_obj:
q_obj = q_obj | q
else:
q_obj = q
if q_obj:
return queryset.filter(q_obj)
return queryset
profilefields = ProfileFields(settings.MISAGO_PROFILE_FIELDS) | PypiClean |
/HTConsole-0.2.tar.gz/HTConsole-0.2/htconsole/htmlrepr.py | import dispatch
import types
import inspect
import os
import textwrap
import itertools
from cStringIO import StringIO
import traceback
from webhelpers.util import html_escape
import simplejson
py_doc_base = 'http://python.org/doc/current/lib/module-%s.html'
stdlib_dir = os.path.dirname(inspect.__file__)
@dispatch.generic()
def html_repr(obj, context, verbosity, interactive):
"""Return the HTML representation of the object
verbosity is an integer:
0:
very brief (like repr(), but maybe shorter)
1:
normal, but not excessive
2:
pretty verbose/complete
3:
complete, maybe recursively complete
interactive is an boolean
"""
pass
obj_memory = {}
obj_counter = itertools.count()
@html_repr.when("isinstance(obj, object)")
def html_repr_object(obj, context, verbosity, interactive):
if hasattr(obj, '__html_repr__'):
return obj.__html_repr__(verbosity)
try:
concrete_repr = obj.__class__.__repr__
except AttributeError:
concrete_repr = None
if concrete_repr is object.__repr__:
# The default repr, boo!
ob_id = id(obj)
if ob_id in obj_memory:
small_id = obj_memory[ob_id]
else:
small_id = obj_memory[ob_id] = obj_counter.next()
class_name = obj.__class__.__name__
if obj.__class__.__module__ != '__builtin__':
class_name = obj.__class__.__module__ + '.' + class_name
return '''
<code class="py-repr"><<span
title="%s">%s</span> object
%s></code>''' % (class_name,
obj.__class__.__name__,
small_id)
return '<code class="py-repr">%s</code>' % html_escape(repr(obj))
@html_repr.when("isinstance(obj, (list, tuple))")
def html_repr_list(obj, context, type,
verbosity, interactive):
if isinstance(obj, list):
sep = '[]'
else:
sep = '()'
content = ''.join([
' %s,<br>\n' % html_repr(item, context, verbosity-1)
for item in obj])
return '%s: %s<br>\n%s%s' % (
type(obj).__name__, sep[0], content, sep[1])
@html_repr.when("isinstance(obj, types.FunctionType)")
def html_repr_func(obj, context, verbosity, interactive):
args = inspect.formatargspec(
*inspect.getargspec(obj))[1:-1]
try:
body = inspect.getsource(obj)
body = ''.join(body.splitlines(True)[1:])
body = textwrap.dedent(body)
except IOError:
if getattr(obj, 'body', None):
body = obj.body
else:
# cannot get source code :(
body = '(code not found)'
name = obj.func_name
obj_id = context.get_id()
uri = context.get_http_callback(obj, _html_set_func)
return '''
<div id="%(id)s"><code>def
<b id="%(id)s-name">%(name)s</b>(<span id="%(id)s-args">%(args)s</span>):</code>
<a href="#" onclick="editFunc(%(id)r, %(uri)r)"
title="edit this function"
class="small-button">edit</a>
<pre id="%(id)s-body" class="py-func-body">%(body)s</pre>
</div>
''' % dict(id=obj_id, name=html_escape(name), args=html_escape(args), body=html_escape(body), uri=uri)
def _html_set_func(obj, context, name, args, body):
expr = 'def %s(%s):\n%s' % (name, args, indent(body))
ns = {}
try:
exec expr in ns
except:
out = StringIO()
traceback.print_exc(file=out)
result = {'error': out.getvalue()}
else:
new_func = ns[name]
obj.func_code = new_func.func_code
obj.func_defaults = new_func.func_defaults
obj.func_doc = new_func.func_doc
obj.body = body
result = {'result': html_repr(obj, context, 1, False)}
return simplejson.dumps(result)
@html_repr.when("isinstance(obj, types.MethodType)")
def html_repr_method(obj, context, verbosity, interactive):
if obj.im_self is None:
parent_rel = 'unbound method of'
parent = obj.im_class
else:
parent_rel = 'method of'
parent = obj.im_self
func_repr = html_repr(obj.im_func, context, verbosity, interactive)
parent_repr = html_repr(parent, context, 0, interactive)
return '''
<div>%s <span type="py-method-parent">%s</span>:<br>
%s
</div>''' % (parent_rel, parent_repr, func_repr)
@html_repr.when("obj is None")
def html_repr_None(obj, context, verbosity, interactive):
return '<span class="py-none">None</span>'
def indent(s, level=4):
lines = s.splitlines(True)
return ''.join([' '*level+l for l in lines])
@html_repr.when("isinstance(obj, types.ModuleType)")
def html_repr_module(obj, context, verbosity, interactive):
obj_dir = os.path.dirname(obj.__file__)
if obj_dir == stdlib_dir:
return (
'Module <code class="py-module-name">%s</code> '
'from <a href="%s" target="_blank">standard library</a>'
% (obj.__name__,
py_doc_base % obj.__name__))
return (
'Module <code class="py-module-name">%s</code> '
'in <code class="py-module-file">%s</code>'
% (obj.__name__, obj.__file__))
@html_repr.when("isinstance(obj, basestring)")
def html_repr_string(obj, context, verbosity, interactive):
r = repr(obj)
if len(r) < 50:
return '<code>%s</code>' % html_escape(r)
expand_id = context.get_id()
callback = context.get_js_callback(
obj, _html_repr_string_long, insert_into=expand_id)
return (
'<code id="%s">' % expand_id
+ r[:40]
+ '<a href="#" onclick="return %s" title="click to expand to full width (%i characters)">...</a>' % (html_escape(callback), len(r))
+ r[-5:]
+ '</code>')
def _html_repr_string_long(obj, context):
return html_escape(repr(obj))
@html_repr.when("isinstance(obj, (types.ClassType, type))")
def html_repr_class(obj, context, verbosity, interactive):
cls_name = obj.__name__
bases = obj.__bases__
if bases:
bases = ', '.join([c.__name__ for c in bases])
bases = '(%s)' % bases
else:
bases = ''
attrs = {}
methods = {}
special = {}
for name, value in obj.__dict__.items():
if name in ['__doc__', '__module__', '__builtin__']:
special[name] = value
continue
if isinstance(value, types.FunctionType):
methods[name] = value
else:
attrs[name] = value
if special.get('__doc__'):
doc = html_repr(special['__doc__'], context)
doc = '<div class="py-doc">%s</div>' % doc
else:
doc = ''
attrs = sorted(attrs.items())
attrs = '<br>\n'.join([
'<code class="py-name">%s</code> = %s'
% (html_escape(name), html_repr(value, context, verbosity-1))
for name, value in attrs])
methods = sorted(methods.items())
methods = '<br>\n'.join([
html_repr(value, context, verbosity-1)
for name, value in methods])
if not methods and not attrs and not doc:
extra = '<code>pass</code>'
else:
extra = ''
return (
'<code>class <b>%(name)s</b>%(bases)s:</code>'
'<blockquote class="py-class-body">\n'
'%(doc)s %(attrs)s %(methods)s %(extra)s\n'
'</blockquote>\n'
% dict(name=cls_name, bases=bases, doc=doc or '',
extra=extra, attrs=attrs, methods=methods)) | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/jsonrpclib/utils.py | import sys
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 4, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
if sys.version_info[0] < 3:
# Python 2
# pylint: disable=E1101
import types
try:
STRING_TYPES = (
types.StringType,
types.UnicodeType
)
except NameError:
# Python built without unicode support
STRING_TYPES = (types.StringType,)
NUMERIC_TYPES = (
types.IntType,
types.LongType,
types.FloatType
)
def to_bytes(string):
"""
Converts the given string into bytes
"""
# pylint: disable=E0602
if type(string) is unicode:
return str(string)
return string
def from_bytes(data):
"""
Converts the given bytes into a string
"""
if type(data) is str:
return data
return str(data)
else:
# Python 3
# pylint: disable=E1101
STRING_TYPES = (
bytes,
str
)
NUMERIC_TYPES = (
int,
float
)
def to_bytes(string):
"""
Converts the given string into bytes
"""
if type(string) is bytes:
return string
return bytes(string, "UTF-8")
def from_bytes(data):
"""
Converts the given bytes into a string
"""
if type(data) is str:
return data
return str(data, "UTF-8")
# ------------------------------------------------------------------------------
# Enumerations
try:
import enum
def is_enum(obj):
"""
Checks if an object is from an enumeration class
:param obj: Object to test
:return: True if the object is an enumeration item
"""
return isinstance(obj, enum.Enum)
except ImportError:
# Pre-Python 3.4
def is_enum(_):
"""
Before Python 3.4, enumerations didn't exist.
:param _: Object to test
:return: Always False
"""
return False
# ------------------------------------------------------------------------------
# Common
DictType = dict
ListType = list
TupleType = tuple
ITERABLE_TYPES = (
list,
set, frozenset,
tuple
)
VALUE_TYPES = (
bool,
type(None)
)
PRIMITIVE_TYPES = STRING_TYPES + NUMERIC_TYPES + VALUE_TYPES | PypiClean |
/Colorful_Distributions-0.1.tar.gz/Colorful_Distributions-0.1/Colorful_Distributions/Gaussiandistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | PypiClean |
/Avalara.SDK-2.4.29.tar.gz/Avalara.SDK-2.4.29/README.md | # Avalara.SDK
API for evaluating transactions against direct-to-consumer Beverage Alcohol shipping regulations.
This API is currently in beta.
- Package version: 2.4.29
## Requirements.
Python >= 3.6
## Installation & Usage
### pip install
If the python package is hosted on a repository, you can install directly using:
```sh
pip install git+https://github.com/GIT_USER_ID/GIT_REPO_ID.git
```
(you may need to run `pip` with root permission: `sudo pip install git+https://github.com/GIT_USER_ID/GIT_REPO_ID.git`)
Then import the package:
```python
import Avalara.SDK
```
### Setuptools
Install via [Setuptools](http://pypi.python.org/pypi/setuptools).
```sh
python setup.py install --user
```
(or `sudo python setup.py install` to install the package for all users)
Then import the package:
```python
import Avalara.SDK
```
## Getting Started
Please follow the [installation procedure](#installation--usage) and then run the following:
```python
import time
import Avalara.SDK
from pprint import pprint
from Avalara.SDK.api import age_verification_api
from Avalara.SDK.model.age_verify_failure_code import AgeVerifyFailureCode
from Avalara.SDK.model.age_verify_request import AgeVerifyRequest
from Avalara.SDK.model.age_verify_result import AgeVerifyResult
# Defining the host is optional and defaults to http://localhost
# See configuration.py for a list of all supported configuration parameters.
configuration = Avalara.SDK.Configuration(
username = 'YOUR USERNAME',
password = 'YOUR PASSWORD',
environment='sandbox'
)
# Enter a context with an instance of the API client
with Avalara.SDK.ApiClient(configuration) as api_client:
# Create an instance of the API class
api_instance = age_verification_api.AgeVerificationApi(api_client)
age_verify_request = AgeVerifyRequest(
first_name="first_name_example",
last_name="last_name_example",
address=AgeVerifyRequestAddress(
line1="line1_example",
city="city_example",
region="region_example",
country="US",
postal_code="postal_code_example",
),
dob="dob_example",
) # AgeVerifyRequest | Information about the individual whose age is being verified.
simulated_failure_code = AgeVerifyFailureCode("not_found") # AgeVerifyFailureCode | (Optional) The failure code included in the simulated response of the endpoint. Note that this endpoint is only available in Sandbox for testing purposes. (optional)
try:
# Determines whether an individual meets or exceeds the minimum legal drinking age.
api_response = api_instance.verify_age(age_verify_request, simulated_failure_code=simulated_failure_code)
pprint(api_response)
except Avalara.SDK.ApiException as e:
print("Exception when calling AgeVerificationApi->verify_age: %s\n" % e)
```
## Documentation for API Endpoints
All URIs are relative to *http://localhost*
Class | Method | HTTP request | Description
------------ | ------------- | ------------- | -------------
*AgeVerificationApi* | [**verify_age**](docs/AgeVerificationApi.md#verify_age) | **POST** /api/v2/ageverification/verify | Determines whether an individual meets or exceeds the minimum legal drinking age.
*ShippingVerificationApi* | [**deregister_shipment**](docs/ShippingVerificationApi.md#deregister_shipment) | **DELETE** /api/v2/companies/{companyCode}/transactions/{transactionCode}/shipment/registration | Removes the transaction from consideration when evaluating regulations that span multiple transactions.
*ShippingVerificationApi* | [**register_shipment**](docs/ShippingVerificationApi.md#register_shipment) | **PUT** /api/v2/companies/{companyCode}/transactions/{transactionCode}/shipment/registration | Registers the transaction so that it may be included when evaluating regulations that span multiple transactions.
*ShippingVerificationApi* | [**register_shipment_if_compliant**](docs/ShippingVerificationApi.md#register_shipment_if_compliant) | **PUT** /api/v2/companies/{companyCode}/transactions/{transactionCode}/shipment/registerIfCompliant | Evaluates a transaction against a set of direct-to-consumer shipping regulations and, if compliant, registers the transaction so that it may be included when evaluating regulations that span multiple transactions.
*ShippingVerificationApi* | [**verify_shipment**](docs/ShippingVerificationApi.md#verify_shipment) | **GET** /api/v2/companies/{companyCode}/transactions/{transactionCode}/shipment/verify | Evaluates a transaction against a set of direct-to-consumer shipping regulations.
## Documentation For Models
- [AgeVerifyFailureCode](docs/AgeVerifyFailureCode.md)
- [AgeVerifyRequest](docs/AgeVerifyRequest.md)
- [AgeVerifyRequestAddress](docs/AgeVerifyRequestAddress.md)
- [AgeVerifyResult](docs/AgeVerifyResult.md)
- [ErrorDetails](docs/ErrorDetails.md)
- [ErrorDetailsError](docs/ErrorDetailsError.md)
- [ErrorDetailsErrorDetails](docs/ErrorDetailsErrorDetails.md)
- [ShippingVerifyResult](docs/ShippingVerifyResult.md)
- [ShippingVerifyResultLines](docs/ShippingVerifyResultLines.md)
## Documentation For Authorization
## BasicAuth
- **Type**: HTTP basic authentication
## Bearer
- **Type**: API key
- **API key parameter name**: Authorization
- **Location**: HTTP header
## Author
## Notes for Large OpenAPI documents
If the OpenAPI document is large, imports in Avalara.SDK.apis and Avalara.SDK.models may fail with a
RecursionError indicating the maximum recursion limit has been exceeded. In that case, there are a couple of solutions:
Solution 1:
Use specific imports for apis and models like:
- `from Avalara.SDK.api.default_api import DefaultApi`
- `from Avalara.SDK.model.pet import Pet`
Solution 2:
Before importing the package, adjust the maximum recursion limit as shown below:
```
import sys
sys.setrecursionlimit(1500)
import Avalara.SDK
from Avalara.SDK.apis import *
from Avalara.SDK.models import *
```
| PypiClean |
/DendroPy-4.6.1.tar.gz/DendroPy-4.6.1/src/dendropy/dataio/newickyielder.py |
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Implementation of NEWICK-schema tree iterator.
"""
import sys
from dendropy.dataio import ioservice
from dendropy.dataio import newickreader
from dendropy.dataio import nexusprocessing
class NewickTreeDataYielder(ioservice.TreeDataYielder):
def __init__(self,
files=None,
taxon_namespace=None,
tree_type=None,
**kwargs):
r"""
Parameters
----------
files : iterable of sources
Iterable of sources, which can either be strings specifying file
paths or file-like objects open for reading. If a source element is
a stringm then it is assumed to be a path to a file. Otherwise, the
source is assumed to be a file-like object.
taxon_namespace : |TaxonNamespace| instance
The operational taxonomic unit concept namespace to use to manage
taxon definitions.
\*\*kwargs : keyword arguments
These will be passed directly to the base `newickreader.NexusReader`
class. See `newickreader.NexusReader` for details.
"""
ioservice.TreeDataYielder.__init__(self,
files=files,
taxon_namespace=taxon_namespace,
tree_type=tree_type)
self.newick_reader = newickreader.NewickReader(**kwargs)
###########################################################################
## Implementation of DataYielder interface
def _yield_items_from_stream(self, stream):
nexus_tokenizer = nexusprocessing.NexusTokenizer(stream,
preserve_unquoted_underscores=self.newick_reader.preserve_unquoted_underscores)
taxon_symbol_mapper = nexusprocessing.NexusTaxonSymbolMapper(
taxon_namespace=self.attached_taxon_namespace,
enable_lookup_by_taxon_number=False,
case_sensitive=self.newick_reader.case_sensitive_taxon_labels)
while True:
tree = self.newick_reader._parse_tree_statement(
nexus_tokenizer=nexus_tokenizer,
tree_factory=self.tree_factory,
taxon_symbol_map_fn=taxon_symbol_mapper.require_taxon_for_symbol)
if tree is None:
break
yield tree | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.