content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import re
#source: https://en.wiktionary.org/wiki/Category:English_abbreviations
ABBREVIATIONS_COMMON = [(re.compile(r'\b%s\.?(,?)\b' % x[0]), r'%s\1 ' % x[1]) for x in [
("abbr", "abbreviation"),
("abbrev", "abbreviation"),
#("abr", "abridged"),
("abstr", "abstract"),
("AI", "artificial intelligence"),
#("Amer", "american"),
#("am", "ante meridiem"),
#("AM", "ante meridiem"),
("approx", "approximately"),
("[Aa]pr", "april"),
("[Aa]pt", "apartment"),
("[Aa]pts", "apartments"),
("appt", "appointment"),
("[Aa]ssoc", "association"),
("[Aa]sst", "assistant"),
#("[Aa]ug", "august"),
#("auth", "authority"),
("[Aa]v", "avenue"),
("ave", "average"),
("[Bb]lvd", "boulevard"),
("ca", "circa"),
("Capt", "captain"),
("cert", "certified"),
("cllr", "councillor"),
("co", "company"),
("c\/o", "care of"),
("\.com", " dot com"),
("colloq", "colloquial"),
("Comdr", "commander"),
("cont\'d", "continued"),
("Corp", "corporation"),
("Ctrl", "control"),
("Dr", "doctor"),
("dr", "drive"),
#("[Dd]ec", "december"),
("[Dd]ept", "department"),
("[Dd]istrib", "distributor"),
("[Ee]d", "edition"),
("est", "established"),
("etc", "etcetra"),
("[Ee]xec", "executive"),
#("[Ff]eb", "february"),
("[Ff]wd", "forward"),
("[Gg]ov", "government"),
("[Gg]ov\'t", "government"),
("GMT", "greenwich mean time"),
("Hebr", "hebrew"),
("[Hh]on\'ble", "honorable"),
("Hon", "honorable"),
("i\.e", "that is"),
("illust?", "illustration"),
("[Ii]ntro", "introduction"),
("[Ii]nc", "incorporated"),
#("[Jj]an", "january"),
("[Jj]our", "journal"),
("Jr", "junior"),
("[Ll]n", "lane"),
("Lieut", "lieutenant"),
("[Ll]td", "limited"),
("Maj", "major"),
("mfg", "manufacturing"),
("[Mm]gmt", "management"),
("min", "minute"),
("misc", "miscellaneous"),
("mktg", "marketing"),
("Mr", "mister"),
("Mrs", "missus"),
("Ms", "miss"),
("Mme", "madame"),
("n\.b", "nota bene"),
("net wt", "net weight"),
#("[Nn]ov", "november"),
#("no", "number"),
#("occupn", "occupation"),
#("[Oo]ct", "october"),
("[Oo]rg", "organisation"),
("PM", "prime minister"), #conflict with pm in time expressions
#("pm", "post meridiem"),
#("PM", "post meridiem"),
("[Pp]res", "president"),
("Prof", "professor"),
("[Pp]vt", "private"),
("[Qq]uot", "quotation"),
("[Rr]egd", "registered"),
("[Rr]egds", "regards"),
#("sched", "schedule"),
("sec", "section"),
#("[Ss]ept", "september"),
("smth", "something"),
("Sqn Ldr", "squadron leader"),
("Sr", "senior"),
("St", "saint"),
("st", "street"),
("tbsp", "tablespoon"),
("[Tt]el", "telephone"),
("tsp", "teaspoon"),
("UK", "United Kingdom"),
("unabr", "unabridged"),
("unk", "unknown"),
("US", "United States"),
("vol", "volume"),
("vols", "volumes"),
("[Vv]s", "versus"),
#("viz", "namely"),
("wt", "weight"),
("WWI", "world war one"),
("WWII", "world war two"),
("WWIII", "world war three"),
("WW1", "world war one"),
("WW2", "world war two"),
("WW3", "world war three"),
("[Xx]mas", "christmas"),
#("N", "North"),
#("E", "East"),
#("W", "West"),
#("S", "South"),
("@", " at"),
("#", "hashtag"),
("&", " and"),
("\+", " plus"),
("\=", " is equal to"),
("\*", " times"),
]]
class AbbreviationConverter:
def __init__(self,lang):
super(AbbreviationConverter,self).__init__()
self.lang = lang
def transform(self,text):
text = self.convert_abbreviations(text)
return text
def convert_abbreviations(self,text):
for regex, replacement in ABBREVIATIONS_COMMON:
try:
text = re.sub(regex, replacement, text)
except:
print('Abbreviation conversion failed')
return text
|
nilq/baby-python
|
python
|
def insertionSort(arr):
for x in range(1, len(arr)):
item = arr[x]
i = x - 1
while i >= 0 and item < arr[i]:
arr[i + 1] = arr[i]
i -= 1
arr[i + 1] = item
return arr
print(insertionSort([1,9,8,4,6,7,3,12,5,18,2,22]))
|
nilq/baby-python
|
python
|
"""Originally Adapted from sphinxcontrib.details.directive
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.docutils import SphinxDirective
from sphinx.transforms.post_transforms import SphinxPostTransform
from sphinx.util.nodes import NodeMatcher
def setup_dropdown(app):
app.add_node(dropdown_main, html=(visit_dropdown_main, depart_dropdown_main))
app.add_node(dropdown_title, html=(visit_dropdown_title, depart_dropdown_title))
app.add_directive("dropdown", DropdownDirective)
app.add_post_transform(DropdownHtmlTransform)
class dropdown_main(nodes.Element, nodes.General):
pass
class dropdown_title(nodes.TextElement, nodes.General):
pass
def visit_dropdown_main(self, node):
if node.get("opened"):
self.body.append(self.starttag(node, "details", open="open"))
else:
self.body.append(self.starttag(node, "details"))
def depart_dropdown_main(self, node):
self.body.append("</details>")
def visit_dropdown_title(self, node):
self.body.append(self.starttag(node, "summary"))
def depart_dropdown_title(self, node):
self.body.append("</summary>")
class DropdownDirective(SphinxDirective):
optional_arguments = 1
final_argument_whitespace = True
has_content = True
option_spec = {
"container": directives.unchanged,
"title": directives.unchanged,
"body": directives.unchanged,
"open": directives.flag,
"marker-color": directives.unchanged,
"name": directives.unchanged,
"animate": lambda a: directives.choice(a, ("fade-in", "fade-in-slide-down")),
}
def run(self):
# default classes
classes = {
"container_classes": ["mb-3"],
"title_classes": [],
"body_classes": [],
}
# add classes from options
for element in ["container", "title", "body"]:
if element not in self.options:
continue
value = self.options.get(element).strip()
if value.startswith("+"):
classes.setdefault(element + "_classes", []).extend(value[1:].split())
else:
classes[element + "_classes"] = value.split()
# add animation classes
if (
"animate" in self.options
and self.options["animate"] not in classes["container_classes"]
):
classes["container_classes"].append(self.options["animate"])
container = nodes.container(
"",
marker_color=self.options.get("marker-color", "currentColor"),
opened="open" in self.options,
type="dropdown",
has_title=len(self.arguments) > 0,
**classes
)
if self.arguments:
textnodes, messages = self.state.inline_text(self.arguments[0], self.lineno)
container += nodes.paragraph(self.arguments[0], "", *textnodes)
container += messages
self.state.nested_parse(self.content, self.content_offset, container)
self.add_name(container)
return [container]
CHEVRON = """\
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24"
viewBox="0 0 24 24" fill="none"
stroke="{color}" stroke-width="2"stroke-linecap="round" stroke-linejoin="round"
>
<polyline points="{points}"></polyline>
</svg>"""
ELLIPSIS = """\
<svg viewBox="0 0 36 24" width="36" height="16" xmlns="http://www.w3.org/2000/svg"
data-icon="ui-components:ellipses" class="ellipsis">
<g xmlns="http://www.w3.org/2000/svg" class="jp-icon3" fill="currentColor">
<circle cx="0" cy="12" r="6"></circle>
<circle cx="18" cy="12" r="6"></circle>
<circle cx="36" cy="12" r="6"></circle>
</g>
</svg>"""
class DropdownHtmlTransform(SphinxPostTransform):
default_priority = 200
builders = ("html", "dirhtml", "singlehtml", "readthedocs")
def run(self):
matcher = NodeMatcher(nodes.container, type="dropdown")
for node in self.document.traverse(matcher):
open_marker = nodes.container(
"",
nodes.raw(
"",
nodes.Text(
CHEVRON.format(
color=node["marker_color"], points="18 15 12 9 6 15"
)
),
format="html",
),
is_div=True,
classes=["summary-chevron-down"],
)
closed_marker = nodes.container(
"",
nodes.raw(
"",
nodes.Text(
CHEVRON.format(
color=node["marker_color"], points="6 9 12 15 18 9"
)
),
format="html",
),
is_div=True,
classes=["summary-chevron-up"],
)
newnode = dropdown_main(
opened=node["opened"],
classes=["sphinx-bs", "dropdown", "card"] + node["container_classes"],
)
if node["has_title"]:
title_children = node[0]
body_children = node[1:]
else:
title_children = [nodes.raw("...", nodes.Text(ELLIPSIS), format="html")]
body_children = node
newnode += dropdown_title(
"",
"",
*title_children,
closed_marker,
open_marker,
classes=["summary-title", "card-header"] + node["title_classes"]
)
body_node = nodes.container(
"",
*body_children,
is_div=True,
classes=["summary-content", "card-body"] + node["body_classes"]
)
for para in body_node.traverse(nodes.paragraph):
para["classes"] = ([] if "classes" in para else para["classes"]) + [
"card-text"
]
newnode += body_node
# newnode += open_marker
node.replace_self(newnode)
|
nilq/baby-python
|
python
|
from robocorp_code.protocols import IRcc, IRccRobotMetadata
import py.path
def test_rcc_template_names(rcc: IRcc):
result = rcc.get_template_names()
assert result.success
assert result.result
assert "standard" in result.result
def test_rcc_cloud(rcc: IRcc, ci_credentials: str, tmpdir: py.path.local):
assert not rcc.credentials_valid()
result = rcc.add_credentials(ci_credentials)
assert result.success
assert rcc.credentials_valid()
result = rcc.cloud_list_workspaces()
assert result.success
workspaces = result.result
if not workspaces:
raise AssertionError("Expected to have CI Workspace available.")
workspaces = [ws for ws in workspaces if ws.workspace_name == "CI workspace"]
if not workspaces:
raise AssertionError("Expected to have CI Workspace available.")
ws = workspaces[0]
result = rcc.cloud_list_workspace_robots(ws.workspace_id)
assert result.success
lst = result.result
if lst is None:
raise AssertionError("Found no workspace")
acts = [act for act in lst if act.robot_name == "CI activity"]
if not acts:
result = rcc.cloud_create_robot(ws.workspace_id, "CI activity")
assert result.success
result = rcc.cloud_list_workspace_robots(ws.workspace_id)
assert result.success
lst = result.result
if lst is None:
raise AssertionError("Found no activity")
acts = [act for act in lst if act.robot_name == "CI activity"]
if not acts:
raise AssertionError(
"Expected to be able to create CI activity (or have it there already)."
)
act: IRccRobotMetadata = acts[0]
wsdir = str(tmpdir.join("ws"))
result = rcc.create_robot("standard", wsdir)
assert result.success
result = rcc.cloud_set_robot_contents(wsdir, ws.workspace_id, act.robot_id)
assert result.success
def test_rcc_run_with_conda_yaml(rcc: IRcc, rcc_conda_installed):
python_code = """
import sys
sys.stdout.write('It worked')
"""
conda_yaml_str_contents = """
channels:
- defaults
- conda-forge
dependencies:
- python=3.7.5
"""
result = rcc.run_python_code_robot_yaml(python_code, conda_yaml_str_contents)
assert result.success
assert result.result
# Note: even in silent mode we may have additional output!
assert "It worked" in result.result
def test_numbered_dir(tmpdir):
from robocorp_code.rcc import make_numbered_in_temp
from pathlib import Path
import time
registered = []
from functools import partial
def register(func, *args, **kwargs):
registered.append(partial(func, *args, **kwargs))
n = make_numbered_in_temp(
keep=2, lock_timeout=0.01, tmpdir=Path(tmpdir), register=register
)
# Sleep so that it'll be scheduled for removal at the next creation.
time.sleep(0.02)
assert n.name.endswith("-0")
assert n.is_dir()
n = make_numbered_in_temp(
keep=2, lock_timeout=0.01, tmpdir=Path(tmpdir), register=register
)
assert n.name.endswith("-1")
assert n.is_dir()
n = make_numbered_in_temp(
keep=2, lock_timeout=0.01, tmpdir=Path(tmpdir), register=register
)
assert n.name.endswith("-2")
assert n.is_dir()
# Removed dir 0.
assert len(list(n.parent.iterdir())) == 3
for r in registered:
r()
assert len(list(n.parent.iterdir())) == 2
|
nilq/baby-python
|
python
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
lookup = {1:[TreeNode(0)]}
def allPossibleFBT(self, N):
if N%2 == 0:
return None
if N not in Solution.lookup:
ans = []
for x in range(1,N,2):
y = N - 1 -x
for left in self.allPossibleFBT(x):
for right in self.allPossibleFBT(y):
root = TreeNode(0)
root.left = left
root.right = right
ans.append(root)
Solution.lookup[N] = ans
return Solution.lookup[N]
|
nilq/baby-python
|
python
|
import unittest
from preset import ActionTypes, Step, Preset
class TestStep(unittest.TestCase):
def test_01_constructor_and_properties(self):
temp_name = "name"
temp_value = "value"
temp_executable = lambda x: x
temp_action_type = ActionTypes.filter
temp_instance = Step(temp_name, temp_value, temp_executable, temp_action_type)
self.assertEqual(temp_name, temp_instance.name)
self.assertEqual(temp_value, temp_instance.value)
self.assertEqual(temp_executable, temp_instance.executable)
self.assertEqual(temp_action_type, temp_instance.action_type)
class TestPreset(unittest.TestCase):
def test_01_constructor_and_properties(self):
temp_names = ["name1", "name2", "name3"]
temp_values = ["value1", "value2", "value3"]
temp_executables = [lambda x: x, lambda y: y + 2, lambda z: z + 3]
temp_action_types = [ActionTypes.filter, ActionTypes.enhanceAction, ActionTypes.custom]
steps = [Step(temp_names[i], temp_values[i], temp_executables[i], temp_action_types[i]) for i in range(len(temp_names))]
instance_name = "foo"
instance_description = "description"
temp_instance = Preset(instance_name, instance_description, steps)
self.assertEqual(instance_name, temp_instance.name)
self.assertEqual(instance_description, temp_instance.description)
for i, step in enumerate(temp_instance.steps):
self.assertEqual(steps[i], step)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import logging
import os
import pickle
from typing import Generator, List, Tuple, Dict
from gensim.models import Word2Vec
from gensim.test.utils import common_texts
from wbtools.db.dbmanager import WBDBManager
from wbtools.lib.nlp.common import PaperSections
from wbtools.lib.nlp.text_preprocessing import preprocess
from wbtools.lib.nlp.text_similarity import get_softcosine_index, get_similar_documents, SimilarityResult
from wbtools.literature.paper import WBPaper
logger = logging.getLogger(__name__)
class CorpusManager(object):
"""manage a list of WBPaper objects by populating their data from database or local directory"""
def __init__(self):
self.corpus = {}
def add_or_update_wb_paper(self, wb_paper: WBPaper):
"""add a paper
Args:
wb_paper(WBPaper): the paper to add to the corpus
"""
self.corpus[wb_paper.paper_id] = wb_paper
def remove_wb_paper(self, wb_paper: WBPaper):
"""remove a paper
Args:
wb_paper(WBPaper): the paper to remove from the corpus
"""
del self.corpus[wb_paper.paper_id]
def load_from_dir_with_txt_files(self, dir_path: str):
"""
load papers from a directory containing text files with file name in the following format:
<WBPaperID>_<Author><Year>_<additional_options>.txt
Only files with .txt extension are loaded. Paper ID is derived from the file name and additional options are
used to understand the type of file (e.g., main article, ocr scanned article, supplementary material etc.)
Args:
dir_path (str): path to the input directory containing text files
"""
paper = WBPaper()
for f in sorted(os.listdir(dir_path)):
if os.path.isfile(os.path.join(dir_path, f)) and f.endswith(".txt"):
if paper.paper_id and not paper.has_same_wbpaper_id_as_filename(f):
self.add_or_update_wb_paper(paper)
paper = WBPaper()
paper.add_file(dir_path=dir_path, filename=f, remote_file=False, pdf=False)
def load_from_wb_database(self, db_name: str, db_user: str, db_password: str, db_host: str,
ssh_host: str = 'tazendra.caltech.edu', ssh_user: str = None, ssh_passwd: str = None,
paper_ids: list = None,
from_date: str = None, load_pdf_files: bool = True, load_bib_info: bool = True,
load_curation_info: bool = True, load_afp_info: bool = False, max_num_papers: int = None,
exclude_ids: List[str] = None, must_be_autclass_flagged: bool = False,
exclude_temp_pdf: bool = False, exclude_pap_types: List[str] = None,
pap_types: List[str] = None,
exclude_afp_processed: bool = False, exclude_afp_not_curatable: bool = False,
exclude_no_main_text: bool = False, exclude_no_author_email: bool = False) -> None:
"""load papers from WormBase database
Args:
db_name (str): database name
db_user (str): database user
db_password (str): database password
db_host (str): database host
ssh_host (str): host where to fetch the files via ssh
ssh_user (str): ssh user to fetch pdf files
ssh_passwd (str): ssh password to fetch pdf files
paper_ids (list): optional list of paper ids to be fetched
from_date (str): load papers added or modified from the specified date (only if paper_ids is not provided)
load_pdf_files (bool): load pdf files using ssh credentials
load_bib_info (bool): load bibliographic info of the papers
load_curation_info (bool): load curation info of the papers
load_afp_info (bool): load author first pass info of the papers
max_num_papers (int): limit number of papers to be loaded
exclude_ids (List[str]): list of paper ids to exclude
must_be_autclass_flagged (bool): whether to exclude papers that have not been flagged by WB classifiers
exclude_temp_pdf (bool): whether to exclude papers with temp pdfs only
exclude_pap_types (List[str]): list of pap_types (string value, not numeric) to exclude
pap_types (List(str]): list of paper types to load
exclude_afp_processed (bool): whether to exclude
exclude_afp_not_curatable (bool): whether to exclude papers that are not relevant for AFP curation
exclude_no_main_text (bool): whether to exclude papers without a fulltext that can be converted to txt
exclude_no_author_email (bool): whether to exclude papers without any contact email in WB
"""
main_db_manager = WBDBManager(db_name, db_user, db_password, db_host)
with main_db_manager:
if not paper_ids:
paper_ids = main_db_manager.generic.get_all_paper_ids(added_or_modified_after=from_date,
exclude_ids=exclude_ids)
if pap_types:
ids_to_include = set(main_db_manager.generic.get_paper_ids_with_pap_types(pap_types))
paper_ids = [paper_id for paper_id in paper_ids if paper_id in ids_to_include]
if exclude_pap_types:
ids_to_exclude = set(main_db_manager.generic.get_paper_ids_with_pap_types(exclude_pap_types))
paper_ids = [paper_id for paper_id in paper_ids if paper_id not in ids_to_exclude]
if load_afp_info or exclude_afp_processed:
afp_no_submission_ids = main_db_manager.afp.get_paper_ids_afp_no_submission()
afp_full_submission_ids = main_db_manager.afp.get_paper_ids_afp_full_submission()
afp_partial_submission_ids = main_db_manager.afp.get_paper_ids_afp_partial_submission()
else:
afp_no_submission_ids = []
afp_full_submission_ids = []
afp_partial_submission_ids = []
afp_processed_ids = set(afp_no_submission_ids) | set(afp_partial_submission_ids) | set(afp_full_submission_ids)
afp_curatable = set(main_db_manager.afp.get_afp_curatable_paper_ids() if exclude_afp_not_curatable else [])
blacklisted_email_addresses = main_db_manager.generic.get_blacklisted_email_addresses() if \
exclude_no_author_email else []
for paper_id in paper_ids:
paper = WBPaper(paper_id=paper_id, ssh_host=ssh_host, ssh_user=ssh_user,
ssh_passwd=ssh_passwd, db_manager=main_db_manager.paper)
if exclude_afp_processed and paper_id in afp_processed_ids:
logger.info("Skipping paper already processed by AFP")
continue
if exclude_afp_not_curatable and paper_id not in afp_curatable:
logger.info("Skipping paper not AFP curatable")
continue
if load_pdf_files:
logger.info("Loading text from PDF files for paper")
paper.load_text_from_pdf_files_in_db()
if exclude_temp_pdf and paper.is_temp():
logger.info("Skipping proof paper")
continue
if exclude_no_main_text and not paper.has_main_text():
logger.info("Skipping paper without main text")
continue
# functions with db access
with paper.db_manager:
if load_curation_info:
logger.info("Loading curation info for paper")
paper.load_curation_info_from_db()
if must_be_autclass_flagged and not paper.aut_class_values:
logger.info("Skipping paper without automated classification")
continue
if load_bib_info:
logger.info("Loading bib info for paper")
paper.load_bib_info_from_db()
if exclude_no_author_email and not paper.get_authors_with_email_address_in_wb(
blacklisted_email_addresses=blacklisted_email_addresses):
logger.info("Skipping paper without any email address in text with records in WB")
continue
if load_afp_info:
logger.info("Loading AFP info for paper")
paper.load_afp_info_from_db(paper_ids_no_submission=afp_no_submission_ids,
paper_ids_full_submission=afp_full_submission_ids,
paper_ids_partial_submission=afp_partial_submission_ids)
self.add_or_update_wb_paper(paper)
logger.info("Paper " + paper_id + " added to corpus. Corpus size: " + str(self.size()))
if max_num_papers and self.size() >= max_num_papers:
break
def size(self) -> int:
"""number of papers in the corpus manager
Returns:
int: the number of papers
"""
return len(self.corpus)
def get_flat_corpus_list_and_idx_paperid_map(self, split_sentences: bool = False,
remove_sections: List[PaperSections] = None,
must_be_present: List[PaperSections] = None,
lowercase: bool = False, tokenize: bool = False,
remove_stopwords: bool = False,
remove_alpha: bool = False) -> Tuple[List[str], Dict[int, str]]:
"""get a flat list of text documents from the papers in the corpus and a map to link the index in the resulting
list and the id of the related paper
Args:
split_sentences (bool): split sentences into separate documents
remove_sections (List[PaperSections]): list of sections to remove
must_be_present (List[PaperSections]): list of sections that must be present
lowercase (bool): transform text to lowercase
tokenize (bool): tokenize text into words
remove_stopwords (bool): remove common stopwords from text
remove_alpha (bool): remove special characters and punctuation from text
Returns:
Tuple[List[str], Dict[int, str]]: the flat list and the related index to paper id map
"""
flat_list_with_ids = [(doc, paper.paper_id) for paper in self.corpus.values() for doc in paper.get_text_docs(
include_supplemental=True, remove_sections=remove_sections, must_be_present=must_be_present,
split_sentences=split_sentences, lowercase=lowercase, tokenize=tokenize, remove_stopwords=remove_stopwords,
remove_alpha=remove_alpha)]
return [d[0] for d in flat_list_with_ids], {idx: d[1] for idx, d in enumerate(flat_list_with_ids)}
def get_paper(self, paper_id) -> WBPaper:
"""get a paper from the corpus by paper id
Args:
paper_id (str): paper id to retrieve
Returns:
WBPaper: the paper
"""
return self.corpus[paper_id]
def get_all_papers(self) -> Generator[WBPaper, None, None]:
"""get all the papers in the corpus
Returns:
Generator[WBPaper, None, None]: a generator to the papers in the corpus
"""
for paper in self.corpus.values():
yield paper
def save(self, file_path: str) -> None:
"""save corpus to file
Args:
file_path (str): path to file to save
"""
with open(file_path, 'wb') as out_file:
pickle.dump(self, out_file)
def load(self, file_path: str) -> None:
"""load corpus from previously saved file
Args:
file_path (str): path to file to load
"""
with open(file_path, 'rb') as in_file:
tmp_self = pickle.load(in_file)
self.__dict__ = tmp_self.__dict__
def query_papers_by_doc_similarity(self, query_docs: List[str], sentence_search: bool = False,
remove_sections: List[PaperSections] = None,
must_be_present: List[PaperSections] = None, path_to_model: str = None,
average_match: bool = True, num_best: int = 10) -> List[SimilarityResult]:
"""query papers in the corpus by similarity with the provided query documents, which can be fulltext documents
or sentences
Args:
query_docs (List[str]): list of query documents
sentence_search (bool): perform sentence level similarity search
remove_sections (List[PaperSections]): sections to be ignored from corpus papers
must_be_present (List[PaperSections]): sections that must be present in corpus papers before removing
sections
path_to_model (str): path to word2vec model
average_match (bool): merge query documents and calculate average similarity to them
num_best (int): limit to the first n results by similarity score
Returns:
List[SimilarityResult]: list of papers most similar to the provided query documents
"""
model = Word2Vec(common_texts, min_count=1) if not path_to_model else None
corpus_list_token, idx_paperid_map = self.get_flat_corpus_list_and_idx_paperid_map(
split_sentences=sentence_search, remove_sections=remove_sections, must_be_present=must_be_present,
lowercase=True, tokenize=True, remove_stopwords=True, remove_alpha=True)
corpus_list_token_orig, _ = self.get_flat_corpus_list_and_idx_paperid_map(
split_sentences=sentence_search, remove_sections=remove_sections, must_be_present=must_be_present,
lowercase=False, tokenize=False, remove_stopwords=False, remove_alpha=False)
docsim_index, dictionary = get_softcosine_index(model=model, model_path=path_to_model,
corpus_list_token=corpus_list_token, num_best=num_best)
query_docs_preprocessed = [preprocess(doc=sentence, lower=True, tokenize=True, remove_stopwords=True,
remove_alpha=True) for sentence in query_docs]
sims = get_similar_documents(docsim_index, dictionary, query_docs_preprocessed, idx_paperid_map,
average_match=average_match)
results = [SimilarityResult(score=sim.score, paper_id=sim.paper_id, match_idx=sim.match_idx,
query_idx=sim.query_idx, match="\"" + corpus_list_token_orig[sim.match_idx] + "\"",
query="\"" + (" ".join(query_docs) if average_match else query_docs[sim.query_idx]
) + "\"") for sim in sims]
return results[0:num_best] if len(results) > num_best else results
|
nilq/baby-python
|
python
|
# game.py (c) 2017 D.J.Whale 22/01/2017
# Star-Wars 'Use the Force, Luke' game
# Using many moving parts provided by Martin O'Hanlon
#----- CONFIGURATION ----------------------------------------------------------
DEATHSTAR_CENTRE_POS = (100,100,10)
TARGET_POS = (100,100,10)
IN_RANGE = ((100,100,10), (100,100,10))
XWING_START_POS = (46,10,-61)
PLAY_TIME_SECS = 5 #(2*60)
NUMBER_OF_TRIES = 3
FRAMES_PER_SEC = 10
#TODO: Mart's code animates the trench separately from deathstar
#so do we need to switch over to that animation at the right position?
#also is there a visual clue to where the trench is, in the deathstar model?
#TODO: xwing can turn or shift
#might make it turn if you tilt it left or right a long way
#in which case we need l,L and r,R for two ranges of left and right tilt
#----- LOAD ALL THE DEPENDENT PARTS -------------------------------------------
import sys
if sys.version_info[0] != 2:
print("Please run this game with Python version 2")
sys.exit()
import time
import controller # auto-connects to the controller
import starwars # auto-connects to Minecraft
#----- GAME STATE -------------------------------------------------------------
deathstar = None
xwing = None
missile = None
xwing_crashed = False
missile_missed = False
missile_hit = False
game_stop_time = 0
#----- BUILD THE GAME WORLD ---------------------------------------------------
def clear_space():
print("will clear_space")
#TODO:
def build_deathstar():
print("will build_deathstar")
#TODO: build at DEATHSTAR_CENTRE_POS
def create_xwing():
global xwing
if xwing is not None:
# kill off old x-wing
xwing.clear()
xwing = None
xwing = starwars.MCObject(starwars.XWING_BLOCKS, XWING_START_POS)
xwing.draw()
def setup_game():
clear_space()
build_deathstar()
create_xwing()
clear_flags()
def wait_for_start():
print("will wait_for_start")
raw_input("press RETURN to start")
#TODO: wait for A button press on micro:bit
#loop, read from micro:bit, until see 'A'
#----- GAME ACTIONS -----------------------------------------------------------
def fly_xwing():
buttons = controller.get_command_flags()
if buttons is not None:
up = 'U' in buttons
down = 'D' in buttons
left = 'L' in buttons
right = 'R' in buttons
fire = 'A' in buttons
eject = 'B' in buttons
# change xwing position based on u/d/l/r
if left:
xwing.rotate_by(yaw=-10)
print("left")
if right:
xwing.rotate_by(yaw=+10)
print("right")
if up:
xwing.move_by(y=+1)
print("up")
if down:
xwing.move_by(y=-1)
print("down")
if fire: print("boom!!")
if eject: print("yeehar!!")
# always move xwing forward by one block
xwing.fly()
# if xwing crashes into any block
# set_xwing_crashed()
#if fire: start_missile()
#if eject: ejector_seat()
def start_missile():
print("will start_missile")
#TODO:
# create missile object in front of xwing
# note we need to know what direction the xwing is flying in
# we also need to know a range of positions to succeed from
def move_missile():
print("will move_missile")
#TODO:
# if missile now out of range:
# set_missile_missed()
# elif missile not yet hit target:
# move missile forward by 1
# else must have hit
# set_missile_hit()
def ejector_seat():
print("will ejector_seat")
animate_eject()
animate_xwing_crashed()
set_xwing_crashed()
#------ GAME CONDITIONS -------------------------------------------------------
#
# Set various game conditions in the game state.
# The main loop will detect and action these appropriately.
# This prevents passing lots of variables around,
# but contains the global variables a bit more into a controlled space (here)
def clear_flags():
global xwing_crashed, missile_missed, missile_hit
xwing_crashed = False
missile_missed = False
missile_hit = False
def set_xwing_crashed():
global xwing_crashed
xwing_crashed = True
def set_missile_missed():
global missile_missed
missile_missed = True
def set_missile_hit():
global missile_hit
missile_hit = True
#----- ANIMATIONS -------------------------------------------------------------
def animate_missile_missed():
print("will animate_missile_missed")
#TODO:
def animate_missile_hit():
print("will animate_missile_hit")
#TODO:
def animate_eject():
print("will animate_eject")
#TODO:
def animate_xwing_crashed():
print("will xwing_crashed")
#TODO:
def animate_blow_up_deathstar():
print("will blow_up_deathstar")
#TODO:
# auto pilot the ship to a safe location
# animate the deathstar blowing up
# return when deathstar gone
#----- SPLASH SCREENS ---------------------------------------------------------
def splash_screen():
print("will splash_screen")
#TODO:
def game_over_failed():
print("will game_over_failed")
#TODO:
def game_over_succeeded():
print("will game_over_succeeded")
#TODO:
#----- GAME LOOP --------------------------------------------------------------
def start_game():
global game_stop_time
print("will start_game")
#TODO: move player to position on start (hides splash screen)
game_stop_time = time.time() + PLAY_TIME_SECS
def run_out_of_time():
return time.time() >= game_stop_time
def play_game():
missiles_left = NUMBER_OF_TRIES
while not run_out_of_time() and not xwing_crashed and not missile_hit and missiles_left > 0:
time.sleep(1/float(FRAMES_PER_SEC))
fly_xwing()
if missile is not None:
move_missile()
if missile_missed:
animate_missile_missed()
missiles_left -= 1
elif missile_hit:
animate_missile_hit()
animate_blow_up_deathstar()
return missile_hit
def whereami():
import starwars.mcpi.minecraft as minecraft
mc = minecraft.Minecraft.create()
x,y,z = mc.player.getTilePos()
print(x,y,z)
#----- MAIN PROGRAM -----------------------------------------------------------
#if __name__ == "__main__":
# while True:
# setup_game()
# splash_screen()
# wait_for_start()
# start_game()
#
# success = play_game()
#
# if success:
# game_over_succeeded()
# else:
# game_over_failed()
#whereami()
create_xwing()
while True:
print("fly")
fly_xwing()
time.sleep(0.1)
# END
|
nilq/baby-python
|
python
|
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr', 'name': 'LevelMgr', 'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_11/models/lawbotHQ/LB_Zone7a',
'wantDoors': 1},
1001: {'type': 'editMgr', 'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone', 'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10013: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(57.0219, 5.15024, 0),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(0.660517, 0.660517, 0.660517),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_C.bam'},
10015: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(-25.74, 58.3575, 9.73551),
'hpr': Vec3(95.4403, 0, 0),
'scale': Vec3(1.5379, 1.5379, 1.5379),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
10016: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(33.3395, -18.3643, 0),
'hpr': Vec3(180, 0, 0),
'scale': Vec3(0.66, 0.66, 0.66),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_D1.bam'},
10017: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 10018,
'pos': Point3(0, 0, 0),
'hpr': Point3(169.7, 0, 0),
'scale': Vec3(0.90247, 0.90247, 0.90247),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_D4.bam'},
10020: {'type': 'model', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 10018,
'pos': Point3(-12.0714, 0, 0),
'hpr': Vec3(288.435, 0, 0),
'scale': Vec3(0.90247, 0.90247, 0.90247),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_D4.bam'},
10022: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10021,
'pos': Point3(-5.97179, -60.3134, 0),
'hpr': Vec3(180, 0, 0),
'scale': Vec3(0.869391, 0.869391, 0.869391),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_C.bam'},
100015: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-1.31696, 0, 0.1),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1.6, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100016: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-12.7478, -11.9991, 0.05),
'hpr': Vec3(180.47, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_couchA'},
100017: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-17.0503, 0, 0.1),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100019: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(0.897832, -12.2053, 0.05),
'hpr': Vec3(180.47, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_couchA'},
100020: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(6.3491, -6.57612, 0.05),
'hpr': Vec3(90, 0, 0),
'scale': Point3(1, 1, 0.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA'},
100021: {'type': 'model', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-20.9336, -5.07158, 0.05),
'hpr': Vec3(90, 0, 0),
'scale': Vec3(1.00449, 1.00449, 1.00449),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_paper_twist_stacks'},
100022: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100018,
'pos': Point3(4.96172, -5.07158, 0.05),
'hpr': Vec3(272.49, 0, 0),
'scale': Vec3(1.00449, 1.00449, 1.00449),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_paper_twist_stacks'},
100023: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-20.5363, -8.42755, 0.05),
'hpr': Vec3(90, 0, 0),
'scale': Vec3(1.00449, 1.00449, 1.00449),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_chairA'},
100024: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-4.9392, -12.3495, 0.05),
'hpr': Vec3(180.47, 0, 0),
'scale': Vec3(3.79099, 3.79099, 3.79099),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_torch_lampA'},
100026: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 100025,
'pos': Point3(16.7866, 12.9562, 0.1),
'hpr': Vec3(185.194, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100027: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100025,
'pos': Point3(-21.2469, 12.8535, 0.0929851),
'hpr': Vec3(187.125, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100028: {'type': 'model', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(5.20127, 12.8535, 0.0929851),
'hpr': Vec3(180, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100029: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(-8.51009, 13.1118, 0.0929851),
'hpr': Vec3(180, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100030: {'type': 'model', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(-15.7803, 1.79844, 0.0929851),
'hpr': Vec3(188.13, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100031: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(14.449, 2.90238, 0.0929851),
'hpr': Vec3(184.764, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100032: {'type': 'model', 'name': 'copy of <unnamed> (4)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(12.2409, -22.0432, 0.0929851),
'hpr': Vec3(184.764, 0, 0),
'scale': Point3(1.4, 1.4, 1.4),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100033: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(-1.1837, 1.79844, 0.0929851),
'hpr': Vec3(170.538, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
10000: {'type': 'nodepath', 'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, -2),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10018: {'type': 'nodepath', 'name': 'rightVertPipes',
'comment': '',
'parentEntId': 10021,
'pos': Point3(-16.4537, -45.3982, -8.4),
'hpr': Vec3(0, 0, 0),
'scale': Point3(0.65, 0.65, 1.56)},
10021: {'type': 'nodepath', 'name': 'rightPipes',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100001: {'type': 'nodepath', 'name': 'cameraTarget1',
'comment': '',
'parentEntId': 0,
'pos': Point3(10, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100002: {'type': 'nodepath', 'name': 'copy of cameraTarget1',
'comment': '',
'parentEntId': 0,
'pos': Point3(30, -10, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100003: {'type': 'nodepath', 'name': 'copy of cameraTarget1',
'comment': '',
'parentEntId': 0,
'pos': Point3(40, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100005: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-30, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100006: {'type': 'nodepath', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-60, 15, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100007: {'type': 'nodepath', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 0,
'pos': Point3(-60, -15, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100009: {'type': 'nodepath', 'name': 'camera3 target',
'comment': '',
'parentEntId': 0,
'pos': Point3(25, -2, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100010: {'type': 'nodepath', 'name': 'copy of camera3 target',
'comment': '',
'parentEntId': 0,
'pos': Point3(-10, -2, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100011: {'type': 'nodepath', 'name': 'copy of camera3 target (2)',
'comment': '',
'parentEntId': 0,
'pos': Point3(-50, -2, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100013: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-10, 60, 10),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100014: {'type': 'nodepath', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-10, 40, 10),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100018: {'type': 'nodepath', 'name': 'wall1parent',
'comment': '',
'parentEntId': 0,
'pos': Point3(-7.53236, 20.7488, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100025: {'type': 'nodepath', 'name': 'wall2',
'comment': '',
'parentEntId': 0,
'pos': Point3(-7.36698, -23.6933, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100035: {'type': 'nodepath', 'name': 'targ1',
'comment': '',
'parentEntId': 0,
'pos': Point3(-27.7132, -17.0199, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100036: {'type': 'nodepath', 'name': 'copy of targ1',
'comment': '',
'parentEntId': 0,
'pos': Point3(9.37401, -17.0199, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100038: {'type': 'nodepath', 'name': 'tegrat',
'comment': '',
'parentEntId': 0,
'pos': Point3(8.31643, -40.4532, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100039: {'type': 'nodepath', 'name': 'copy of tegrat',
'comment': '',
'parentEntId': 0,
'pos': Point3(-27.6613, -37.0841, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100040: {'type': 'nodepath', 'name': 'copy of tegrat (2)',
'comment': '',
'parentEntId': 0,
'pos': Point3(-6.48412, -29.8115, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100042: {'type': 'nodepath', 'name': 'gettar',
'comment': '',
'parentEntId': 0,
'pos': Point3(-7.92397, 14.3026, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100043: {'type': 'nodepath', 'name': 'copy of gettar',
'comment': '',
'parentEntId': 0,
'pos': Point3(-23.1978, 15.1905, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100000: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(33.4453, -2.27555, 0),
'hpr': Point3(-3, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 6.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 10.0,
'modelPath': 0,
'projector': Point3(-3, -3, 25),
'radius': 6.0,
'switchId': 0,
'trackTarget1': 100001,
'trackTarget2': 100002,
'trackTarget3': 100003},
100004: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-58.4773, 4.03197, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 4.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 15.0,
'modelPath': 0,
'projector': Point3(6, 6, 25),
'radius': 5,
'switchId': 0,
'trackTarget1': 100005,
'trackTarget2': 100006,
'trackTarget3': 100007},
100008: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-22.5923, -33.41, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 17.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 20.0,
'modelPath': 0,
'projector': Point3(12, 16, 32),
'radius': 7.0,
'switchId': 0,
'trackTarget1': 100009,
'trackTarget2': 100010,
'trackTarget3': 100011},
100012: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-9.20073, 65.6563, 8.45),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 7.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 15.0,
'modelPath': 0,
'projector': Point3(0, 0, 17),
'radius': 5,
'switchId': 0,
'trackTarget1': 100014,
'trackTarget2': 100013,
'trackTarget3': 0},
100034: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, -10.5537, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 40.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 20.0,
'modelPath': 0,
'projector': Point3(10, 0, 25),
'radius': 4.0,
'switchId': 0,
'trackTarget1': 100035,
'trackTarget2': 100036,
'trackTarget3': 0},
100037: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-28.9964, -30.2849, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 5.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 12.0,
'modelPath': 0,
'projector': Point3(6, 6, 25),
'radius': 5,
'switchId': 0,
'trackTarget1': 100039,
'trackTarget2': 100038,
'trackTarget3': 100040},
100041: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-32.9569, 19.6137, 0.0470875),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 12.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 5,
'modelPath': 0,
'projector': Point3(12, 0, 25),
'radius': 5,
'switchId': 0,
'trackTarget1': 100042,
'trackTarget2': 100043,
'trackTarget3': 0}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities, 'scenarios': [
Scenario0]}
|
nilq/baby-python
|
python
|
__author__ = "The One & Only Javi"
__version__ = "1.0.0"
__start_date__ = "25th July 2020"
__end_date__ = "5th August 2020"
__maintainer__ = "me"
__email__ = "little_kh@hotmail.com"
__requirements__ = "SQL-Alchemy, MySQL," \
" Flask-SQLAlchemy, database.py, " \
"models.py, video_ops.py"
__status__ = "Production"
__description__ = """
This is the main background operations script.
It is meant to be used with app.py, which should call the methods
"""
import mysql.connector
import json
import os
import subprocess
import sys
import requests
import random
from flask import *
from database import *
from video_ops import *
from models import *
from main_ops import *
from sqlalchemy import *
from sqlalchemy.sql import *
from typing import List, Dict
from datetime import datetime
class Update_DB:
def update_after_fragment(con, input_content_id, output_file_path,
video_key, kid):
packaged_content_id = random.randint(0, 100)
result = con.execute(
uploaded_videos.update().where(
uploaded_videos.c.input_content_id
== input_content_id).values(
status='Fragmented', output_file_path=output_file_path,
video_key=video_key, kid=kid,
packaged_content_id=packaged_content_id))
output_string = ("\n\n" + datetime.now().strftime(
"%d/%m/%Y %H:%M:%S") +
" - Starting video encryptation with" +
" the following packaged_content_id:")
print(output_string, file=sys.stdout)
print(packaged_content_id, file=sys.stdout)
return packaged_content_id
def update_after_encrypt(con, input_content_id, output_file_path):
output_string = (
"\n\n" +
datetime.now().strftime("%d/%m/%Y %H:%M:%S") +
" - Starting MPEG-DASH transcoding")
print(output_string, file=sys.stdout)
result = con.execute(
uploaded_videos.update().where(
uploaded_videos.c.input_content_id
== input_content_id).values(
status='Encrypted', output_file_path=output_file_path))
def update_after_dash(con, input_content_id,
dash_output, packaged_content_id):
output_string = ("\n\n" + datetime.now().strftime(
"%d/%m/%Y %H:%M:%S") +
" - Everything went successful. Returning JSON")
print(output_string, file=sys.stdout)
result = con.execute(
uploaded_videos.update().where(
uploaded_videos.c.input_content_id
== input_content_id).values(
status='Ready', url=dash_output))
"""We return 1 for successful, url address,
and packaged_content_id"""
output = (1, dash_output, packaged_content_id)
return output
|
nilq/baby-python
|
python
|
from .PZT import PZTMountedGrounded
|
nilq/baby-python
|
python
|
from robofab.world import CurrentGlyph
from robofab.pens.filterPen import thresholdGlyph
d = 10
thresholdGlyph(CurrentGlyph(), d)
|
nilq/baby-python
|
python
|
# Two-sided notes
create_table_notes_ab = """
create table if not exists notes_ab
(
id text,
front text,
back text,
PRIMARY KEY(id)
)
"""
# One-sided notes
create_table_notes_qa = """
create table if not exists notes_qa
(
id text,
front text,
back text,
PRIMARY KEY(id)
)
"""
# The join table for notes/tags n:m relation
create_table_tags = """
create table if not exists tags
(
note_id text,
tag text,
PRIMARY KEY(note_id, tag)
)
"""
|
nilq/baby-python
|
python
|
import pysftp
server_host = "demo.wftpserver.com"
username = "demo"
password = "demo"
with pysftp.Connection(server_host, username=username, password=password,port=2222)as sftp:
print(sftp.pwd)
# with sftp.cd('public'): # temporarily chdir to public
# sftp.put('/my/local/filename') # upload file to public/ on remote
# sftp.get('remote_file') # get a remote file
#added a new file
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
netvisor.responses.products
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013-2016 by Fast Monkeys Oy.
:license: MIT, see LICENSE for more details.
"""
from ..schemas import GetProductSchema, ProductListSchema
from .base import Response
class ProductListResponse(Response):
schema_cls = ProductListSchema
tag_name = 'product_list'
class GetProductResponse(Response):
schema_cls = GetProductSchema
tag_name = 'product'
|
nilq/baby-python
|
python
|
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import requests
import urllib
import logging
import boundary.util as util
class ApiCall(object):
def __init__(self, api_host="api.truesight.bmc.com", email=None, api_token=None):
"""
:param api_host: api end point host
:param email: TrueSight Pulse account e-mail
:param api_token: TrueSight Pulse api token
:return: returns nothing
:Example:
from boundary import API
api = API(email="foo@bary.com", api_token="api.xxxxxxxxxx-yyyy"
"""
self._kwargs = None
self._methods = {"DELETE": self._do_delete,
"GET": self._do_get,
"POST": self._do_post,
"PUT": self._do_put}
self._api_host = "premium-api.boundary.com"
self._email = None
self._api_token = None
self._curl = False
# All member variables related to REST CALL
self._scheme = "https"
self._method = "GET"
self._headers = None
self._data = None
self._url = None
self._path = None
self._url_parameters = None
self._api_result = None
self.logLevel = None
# Set the api_host, email, api token set by environment
# variables then override with those passed in
self._get_environment()
if api_host is not None:
self._api_host = api_host
if email is not None:
self._email = email
if api_token is not None:
self._api_token = api_token
#
# data
#
@property
def data(self):
"""
Value of the HTTP payload
:return:
"""
return self._data
@data.setter
def data(self, data):
self._data = data
#
# headers
#
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, headers):
self._headers = headers
#
# method
#
@property
def method(self):
"""
"""
return self._method
@method.setter
def method(self, value):
"""
Before assigning the value validate that is in one of the
HTTP methods we implement
"""
keys = self._methods.keys()
if value not in keys:
raise AttributeError("Method value not in " + str(keys))
else:
self._method = value
#
# path
#
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
#
# url_parameters
#
@property
def url_parameters(self):
return self._url_parameters
@url_parameters.setter
def url_parameters(self, url_parameters):
self._url_parameters = url_parameters
def _get_environment(self):
"""
Gets the configuration stored in environment variables
"""
if 'TSP_EMAIL' in os.environ:
self._email = os.environ['TSP_EMAIL']
if 'TSP_API_TOKEN' in os.environ:
self._api_token = os.environ['TSP_API_TOKEN']
if 'TSP_API_HOST' in os.environ:
self._api_host = os.environ['TSP_API_HOST']
else:
self._api_host = 'api.truesight.bmc.com'
def _get_url_parameters(self):
"""
Encode URL parameters
"""
url_parameters = ''
if self._url_parameters is not None:
url_parameters = '?' + urllib.urlencode(self._url_parameters)
return url_parameters
def metric_get(self, enabled=False, custom=False):
"""
Returns a metric definition identified by name
:param enabled: Return only enabled metrics
:param custom: Return only custom metrics
:return Metrics:
"""
self.path = 'v1/metrics?enabled={0}&{1}'.format(enabled, custom)
self._call_api()
self._handle_results()
return self.metrics
def get_api_parameters(self):
pass
def handle_api_results(self):
pass
def _do_get(self):
"""
HTTP Get Request
"""
return requests.get(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_delete(self):
"""
HTTP Delete Request
"""
return requests.delete(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_post(self):
"""
HTTP Post Request
"""
return requests.post(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_put(self):
"""
HTTP Put Request
"""
return requests.put(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def good_response(self, status_code):
"""
Determines what status codes represent a good response from an API call.
"""
return status_code == requests.codes.ok
def form_url(self):
return "{0}://{1}/{2}{3}".format(self._scheme, self._api_host, self._path, self._get_url_parameters())
def _curl_output(self):
headers = ""
if self._headers is not None:
for key in self._headers:
headers = headers + ' -H "{0}: {1}"'.format(key, self._headers[key])
data = None
if self._data is not None:
data = " -d '{0}'".format(self._data)
else:
data = ''
url = ' "{0}"'.format(self.form_url())
print('curl -X {0} -u "{1}:{2}"{3}{4}{5}'.format(self._method,
self._email,
self._api_token,
headers,
data,
url))
def _call_api(self):
"""
Make an API call to get the metric definition
"""
self._url = self.form_url()
if self._headers is not None:
logging.debug(self._headers)
if self._data is not None:
logging.debug(self._data)
if len(self._get_url_parameters()) > 0:
logging.debug(self._get_url_parameters())
result = self._methods[self._method]()
if not self.good_response(result.status_code):
logging.error(self._url)
logging.error(self._method)
if self._data is not None:
logging.error(self._data)
logging.error(result)
self._api_result = result
def handle_key_word_args(self):
pass
def api_call(self):
self._get_environment()
self.handle_key_word_args()
self.get_api_parameters()
self._call_api()
return self._handle_api_results()
def _handle_api_results(self):
result = None
# Only process if we get HTTP result of 200
if self._api_result.status_code == requests.codes.ok:
result = json.loads(self._api_result.text)
return result
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
from venus.stock_base import StockEventBase
class EventStockFlag(StockEventBase):
def flag_quit_stock(self, stock_code):
import datetime
import pandas as pd
from datetime import date
from dev_global.env import TIME_FMT
result = self.mysql.select_values(stock_code, 'trade_date')
if not result.empty:
result = result[0].tolist()
d = datetime.date.today() - result[-1]
if d.days > 150:
return True
else:
return False
else:
return False
def flag_index(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 'i'}
)
self.mysql.session.commit()
return 1
def flag_stock(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 't'}
)
self.mysql.session.commit()
return 1
def flag_b_stock(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 'b'}
)
self.mysql.session.commit()
return 1
def flag_hk_stock(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 'h'}
)
self.mysql.session.commit()
return 1
if __name__ == "__main__":
import re
from dev_global.env import GLOBAL_HEADER
from venus.stock_flag import EventStockFlag
event = EventStockFlag(GLOBAL_HEADER)
stock_list = event.get_all_security_list()
for stock_code in stock_list:
if re.match(r'^SH000|^SH950|^SZ399', stock_code):
event.flag_index(stock_code)
|
nilq/baby-python
|
python
|
class Solution:
def makesquare(self, matchsticks: List[int]) -> bool:
if len(matchsticks) < 4:
return False
perimeter = sum(matchsticks)
if perimeter % 4 != 0:
return False
A = sorted(matchsticks)[::-1]
def dfs(selected: int, edges: List[int]) -> bool:
if selected == len(A):
return all(edge == edges[0] for edge in edges)
for i, edge in enumerate(edges):
if A[selected] > edge:
continue
edges[i] -= A[selected]
if dfs(selected + 1, edges):
return True
edges[i] += A[selected]
return False
return dfs(0, [perimeter // 4] * 4)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
tuple= ["Juan", 5, 20.80, "HOLA"]
tuple2= [1,2,3]
lista3= [[1,2,3], [1, 3, 6], "HOLA"]
print tuple[0]
print tuple[1:2]
print tuple[2:]
print tuple *2
print tuple+tuple2
print lista3
|
nilq/baby-python
|
python
|
# Copyright 2016-2017 Curtis Sand <curtissand@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks core library."""
import pkgutil
import inspect
import os.path
from escadrille.verbosity import dprint
from escadrille.verbosity import vprint
def find_tasks(module, prefix):
"""Return an enum of config file tasks mapping names to task callables."""
dprint('lib.tasks.core: finding tasks in %s (prefix: %s)' %
(module, prefix))
task_map = {}
for importer, modname, ispkg in pkgutil.walk_packages([module], prefix):
if ispkg:
continue
module = importer.find_module(modname).load_module(modname)
for _, cls in inspect.getmembers(module, inspect.isclass):
if issubclass(cls, Task) and cls != Task:
task_map[cls.config_name] = cls
dprint('task_map: %s' % task_map)
return task_map
class TaskCore(object):
"""An internal class to be shared by option mixins and Task objects."""
config_name = 'noop'
# constant for easy output formatting
indent = ' '
msg_template = '%s%s=%s\n'
def __init__(self, config_file=None, tag=None, shared_state=None):
"""Set up instance variables for an escadrille task object."""
self.config_file = config_file
self.tag = tag
self.shared_state = shared_state
self.warnings, self.errors, self.status = None, None, None
self._clear_status()
self.loaded = False
def load_config(self):
"""A method to be subclassed to load info from the config file."""
if not self.loaded:
self.dprint('Loading the config for %s.' % self.tag)
self._load_config()
self.loaded = True
def _load_config(self):
"""An internal method for subclasses to load their config values."""
pass
def _clear_status(self):
"""Reset the warnings and errors lists and the status code."""
self.warnings = []
self.errors = []
self.status = None
def _set_status(self):
"""Set error status to the length of the warnings and errors lists."""
self.status = len(self.errors)
def dprint(self, msg):
"""Call the conditional debug print method."""
dprint(msg)
def vprint(self, msg):
"""Call the conditional verbose print method."""
vprint(msg)
@staticmethod
def sanitize_path(path):
"""Take a string and run it through some sanitization methods."""
return os.path.abspath(os.path.expanduser(path))
@property
def config_snippet_name(self):
"""Create a config string for the name of the current task."""
return "%stask=%s\n" % (self.indent, self.config_name)
class Task(TaskCore):
"""Base Task object for Escadrille.
The config_name attribute is used to reference the task class from the
config file.
The __init__ and __call__ methods should be implemented by the subclasses.
The constructor should configure the task with everything needed to perform
the task. A well designed task does not have state and can therefore be
repeated. The task subclass needs to implement any checks or validation
required to operation in this way.
The call method clears the "warnings", "errors" and "status" attributes
before starting the task and then can use the "_set_status" method to
update the status appropriately at the end of the task.
"""
def __call__(self, *args, **kwargs):
"""Execute the core task behaviour."""
self._clear_status()
self.load_config()
self.dprint(self.debug_msg())
def debug_msg(self):
"""If supported, generate and return a debug string."""
self.load_config()
return "%s Debug" % self.__class__.__name__
@property
def default_config(self):
"""Return a string of default example section for config file."""
self.load_config()
return ""
|
nilq/baby-python
|
python
|
from plenum.common.constants import ALIAS, SERVICES, VALIDATOR
from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies
from plenum.test.node_catchup.helper import waitNodeDataEquality, \
checkNodeDataForInequality
from plenum.test.pool_transactions.helper import \
updateNodeData
from stp_core.common.log import getlogger
from plenum.test.node_catchup.conftest import whitelist
logger = getlogger()
def test_catch_up_after_demoted(
txnPoolNodeSet, nodeSetWithNodeAddedAfterSomeTxns):
logger.info(
"1. add a new node after sending some txns and check that catch-up "
"is done (the new node is up to date)")
looper, newNode, client, wallet, newStewardClient, \
newStewardWallet = nodeSetWithNodeAddedAfterSomeTxns
waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:4])
logger.info("2. turn the new node off (demote)")
node_data = {
ALIAS: newNode.name,
SERVICES: []
}
updateNodeData(looper, newStewardClient,
newStewardWallet, newNode,
node_data)
logger.info("3. send more requests, "
"so that the new node's state is outdated")
sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
checkNodeDataForInequality(newNode, *txnPoolNodeSet[:-1])
logger.info("4. turn the new node on")
node_data = {
ALIAS: newNode.name,
SERVICES: [VALIDATOR]
}
updateNodeData(looper, newStewardClient,
newStewardWallet, newNode,
node_data)
logger.info("5. make sure catch-up is done "
"(the new node is up to date again)")
waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])
logger.info("6. send more requests and make sure "
"that the new node participates in processing them")
sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 10)
waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])
|
nilq/baby-python
|
python
|
#!/usr/bin/python
"""
Commander.py - Python Backend for the WiFi Pineapple Commander module.
Version 2 Codename: Electric Boogaloo
Thanks to: sebkinne & tesla
Foxtrot (C) 2016 <foxtrotnull@gmail.com>
"""
import os
import ConfigParser
import sys
import socket
import time
import string
import select
import errno
class Commander(object):
print "[*] WiFi Pineapple Commander Module"
def run(self):
while True:
self.fillBuffer()
self.parseCommands()
def parseConfig(self):
if os.path.exists('commander.conf'):
self.config = ConfigParser.RawConfigParser()
self.config.read('commander.conf')
if self.config.has_section('Network') and self.config.has_section('Security') and self.config.has_section('Commands') and self.config.has_section('Other'):
print "[*] Valid configuration file found!"
print ""
else:
print "[!] No valid configuration file found... Exiting!"
sys.exit(1)
self.server = self.config.get('Network', 'Server')
self.port = self.config.getint('Network', 'Port')
self.nick = self.config.get('Network', 'Nickname')
self.channel = self.config.get('Network', 'Channel')
self.master = self.config.get('Security', 'Master')
self.trigger = self.config.get('Security', 'Trigger')
self.commands = self.config.options('Commands')
self.debugmode = self.config.get('Other', 'Debug')
def printConfig(self):
print "[*] Using the following connection settings:"
print " %s" % self.server
print " %d" % self.port
print " %s" % self.nick
print " %s" % self.channel
print ""
print "[*] Using the following security settings:"
print " Master: %s" % self.master
print " Trigger: %s\n" % self.trigger
print "[*] Listing commands:"
for command in self.commands:
print " %s%s" % (self.trigger, command)
print ""
def connect(self):
self.sock = socket.socket()
print "[*] Connecting!"
self.sock.connect((self.server, self.port))
print "[*] Sending nick and user information"
self.sock.send('NICK %s\r\n' % self.nick)
self.sock.send('USER %s 8 * :%s\r\n' % (self.nick, self.nick))
time.sleep(10)
self.sock.send('JOIN %s\r\n' % self.channel)
self.sock.send('PRIVMSG %s :Connected.\r\n' % self.channel)
print "[*] Connected!\n"
def fillBuffer(self):
self.buff = ""
self.sock.setblocking(0)
readable, _, _ = select.select([self.sock], [], [])
if self.sock in readable:
self.buff = ""
cont = True
while cont:
try:
self.buff += self.sock.recv(1024)
except socket.error,e:
if e.errno != errno.EWOULDBLOCK:
sys.exit(1)
cont = False
def parseCommands(self):
for line in self.buff.split('\r\n'):
if self.debugmode.lower() == "on":
print line
line = line.split()
if 'PING' in line:
print "[*] Replying to ping\n"
self.sock.send('PONG ' + line.split()[1] + '\r\n')
for command in self.commands:
if line and line[0].lower().startswith(":" + self.master.lower() + "!"):
if ":" + self.trigger + command in line:
print "[*] Found command %s%s\n" % (self.trigger, command)
self.sock.send('PRIVMSG %s :Executing command %s\r\n' % (self.channel, command))
cmd = self.config.get('Commands', command)
os.system(cmd)
if __name__ == '__main__':
commander = Commander()
commander.parseConfig()
commander.printConfig()
commander.connect()
commander.run()
|
nilq/baby-python
|
python
|
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from freeswitch import consoleLog
from core.apps import sms_credit_transfer
def chat(message, args):
consoleLog('info', "Credit transfer: %s\n" % args)
from_, request = args.split("|", 1)
sms_credit_transfer.handle_incoming(from_, request)
def fsapi(session, stream, env, args):
# chat doesn't use msg anyway
chat(None, args)
|
nilq/baby-python
|
python
|
from scrapy.selector import Selector
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.utils.url import urljoin_rfc
from sitegraph.items import SitegraphItem
class GraphspiderSpider(CrawlSpider):
name = 'graphspider'
allowed_domains = ['sastra.edu']
start_urls = ['https://www.sastra.edu/']
rules = (
Rule(LinkExtractor(allow=r'/'), callback='parse_item', follow=True),
)
def parse_item(self, response):
hxs = Selector(response)
i = SitegraphItem()
i['url'] = response.url
i['http_status'] = response.status
llinks=[]
for anchor in hxs.xpath('//a[@href]'):
href=anchor.xpath('@href').extract()[0]
if not href.lower().startswith("javascript"):
llinks.append(urljoin_rfc(response.url,href))
i['linkedurls'] = llinks
return i
|
nilq/baby-python
|
python
|
from ocean_spark.hooks import OceanSparkHook
from unittest.mock import MagicMock
def test_get_app(successful_get_app: None, get_connection_mock: None) -> None:
hook = OceanSparkHook()
app_dict = hook.get_app("test-app-name")
assert app_dict is not None
assert app_dict["displayName"] == "test app name"
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 26 15:13:29 2022
@author: jasy9
"""
from .topsis import TOPSIS
|
nilq/baby-python
|
python
|
# Author: Ackerley Cheng
# file encoding: utf-8
def sameList(listA, listB):
return (len(listA) == len(listB)) and (set(listA) == set(listB))
def listInListSet(list, listSet):
for idx, ls in enumerate(listSet):
if sameList(list, ls):
return idx
return -1
# check if listA is a subset of listB
def isSubSet(listA, listB):
for item in listA:
if item not in listB:
return False
return True
# dump rule with beautified format
def beautifiedRuleSet(ruleSet):
ruleSet = list(sorted(ruleSet, key=lambda rule: rule.lift, reverse=True))
if len(ruleSet) <= 0:
out = 'found nothing.'
else:
out = 'Association Rules:\n'
out += '[support,\tconfidence,\tlift,\t\trule\n'
for rule in ruleSet:
out += '[' + str(rule.sup) + ',\t\t' + str(rule.conf)
out += ',\t\t' + str(rule.lift) + ',\t\t'
out += '{IF ' + str(rule.IF) + ' THEN ' + str(rule.THEN) + '} ]'
out += '\n'
return out
|
nilq/baby-python
|
python
|
from django import forms
from consent.models import Privilege
class PrivilegeForm(forms.ModelForm):
class Meta:
model = Privilege
class ConsentForm(forms.Form):
consents = forms.ModelMultipleChoiceField(Privilege.objects,
widget=forms.CheckboxSelectMultiple, required=False)
|
nilq/baby-python
|
python
|
import numpy as np
from qm_2019_sss_6.NobleGasModel import NobleGasModel
from qm_2019_sss_6.scf import scf
from qm_2019_sss_6.mp2 import MP2
atomic_coordinates = np.array([[0.0, 0.0, 0.0], [3.0, 4.0, 5.0]])
# Derived from user input
number_of_atoms = len(atomic_coordinates)
# Argon parameters - these would change for other noble gases.
model_parameters = {
'r_hop' : 3.1810226927827516,
't_ss' : 0.03365982238611262,
't_sp' : -0.029154833035109226,
't_pp1' : -0.0804163845390335,
't_pp2' : -0.01393611496959445,
'r_pseudo' : 2.60342991362958,
'v_pseudo' : 0.022972992186364977,
'dipole' : 2.781629275106456,
'energy_s' : 3.1659446174413004,
'energy_p' : -2.3926873325346554,
'coulomb_s' : 0.3603533286088998,
'coulomb_p' : -0.003267991835806299
}
ionic_charge = 6
orbital_types = ['s', 'px', 'py', 'pz']
orbitals_per_atom = len(orbital_types)
p_orbitals = orbital_types[1:]
vec = {'px': [1, 0, 0], 'py': [0, 1, 0], 'pz': [0, 0, 1]}
orbital_occupation = { 's':0, 'px':1, 'py':1, 'pz':1 }
my_model = NobleGasModel(atomic_coordinates,model_parameters,ionic_charge,orbital_types,orbitals_per_atom,vec,orbital_occupation)
interaction_matrix,chi_tensor,hamiltonian_matrix,density_matrix,energy_ion = my_model.kernel()
my_scf = scf(hamiltonian_matrix,interaction_matrix,density_matrix,chi_tensor,energy_ion,ionic_charge,orbitals_per_atom)
print("SCF Energy: " + str(my_scf.kernel()))
my_mp2= MP2(my_scf)
print("MP2 Energy" + str(my_mp2.kernel()))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import os
import random
from collections import namedtuple, defaultdict
dir_path = os.path.dirname(os.path.realpath(__file__))
file = open(dir_path + "/input.txt", "r")
lines = [l.strip() for l in file.readlines()]
"""
acc increases or decreases a single global value called the accumulator by the value given in the argument. For example, acc +7 would increase the accumulator by 7. The accumulator starts at 0. After an acc instruction, the instruction immediately below it is executed next.
jmp jumps to a new instruction relative to itself. The next instruction to execute is found using the argument as an offset from the jmp instruction; for example, jmp +2 would skip the next instruction, jmp +1 would continue to the instruction immediately below it, and jmp -20 would cause the instruction 20 lines above to be executed next.
nop stands for No OPeration - it does nothing. The instruction immediately below it is executed next.
"""
# lines = [
# 'nop +0',
# 'acc +1',
# 'jmp +4',
# 'acc +3',
# 'jmp -3',
# 'acc -99',
# 'acc +1',
# 'jmp -4',
# 'acc +6',
# ]
accumulator = 0
instruction_counts = defaultdict(int)
instruction_history = []
instruction_idx = 0
def acc(arg):
global accumulator
accumulator += arg
def jmp(arg):
global instruction_idx
instruction_idx += arg
def nop(arg):
return None
instructions = {
'acc': acc,
'jmp': jmp,
'nop': nop,
}
I = namedtuple('I', ['op', 'arg'])
def parse(line):
op, arg = line.split()
arg = int(arg)
return I(op, arg)
program = [parse(line) for line in lines]
# print(program)
terminated = False
while not terminated:
if 2 in instruction_counts.values():
break
instruction = program[instruction_idx]
instruction_counts[instruction_idx] += 1
instructions[instruction.op](instruction.arg)
instruction_history.append(instruction)
if instruction.op != 'jmp':
instruction_idx += 1
print('Part 1:', accumulator - instruction_history[-1].arg)
# lines = [
# 'nop +0',
# 'acc +1',
# 'jmp +4',
# 'acc +3',
# 'jmp -3',
# 'acc -99',
# 'acc +1',
# 'nop -4',
# 'acc +6',
# ]
def run_program(program):
accumulator = 0
instruction_counts = defaultdict(int)
instruction_history = []
instruction_idx = 0
terminated = False
success = False
while not terminated:
try:
instruction = program[instruction_idx]
except IndexError:
success = True
break
if 2 in instruction_counts.values():
break
# print(instruction)
instruction_counts[instruction_idx] += 1
instruction_history.append(instruction)
if instruction.op == 'acc':
accumulator += instruction.arg
instruction_idx += 1
if instruction.op == 'jmp':
instruction_idx += instruction.arg
if instruction.op == 'nop':
instruction_idx += 1
return success, accumulator
accumulator = 0
success = False
while not success:
program = [parse(line) for line in lines]
idx = random.choice(range(0, len(program)))
if program[idx].op == 'jmp':
arg = program[idx].arg
program[idx] = I('nop', arg)
elif program[idx].op == 'nop':
arg = program[idx].arg
program[idx] = I('jmp', arg)
# print(program)
success, accumulator = run_program(program)
print('Part 2:', accumulator)
|
nilq/baby-python
|
python
|
import os
import pytz
from tweepy import OAuthHandler, API, TweepError
from . import Data
from abc import ABC, abstractmethod
import datetime
def check_if_datetime_offset_aware(date):
return date.tzinfo is not None and date.tzinfo.utcoffset(date) is not None
class SocialMediaDataFetch(ABC):
_start_date = datetime.datetime.now() - datetime.timedelta(days=7)
_end_date = datetime.datetime.now()
_data_lst = []
def __init__(self, start_date, end_date):
assert(start_date < end_date)
self._start_date = start_date
self._end_date = end_date
if (check_if_datetime_offset_aware(start_date)):
self._start_date = start_date.astimezone(pytz.utc).replace(tzinfo=None)
if (check_if_datetime_offset_aware(end_date)):
self._end_date = end_date.astimezone(pytz.utc).replace(tzinfo=None)
self._data_lst = []
@abstractmethod
def __get_api_access__(self):
pass
@abstractmethod
def __format_data__(self, data):
pass
@abstractmethod
def __filter_data_by_time__(self, data):
pass
@abstractmethod
def fetch_user_posts(self, user_id):
pass
def get_data_lst(self):
return self._data_lst
class TwitterDataFetch(SocialMediaDataFetch):
_api = None
def __init__(self, start_date = datetime.datetime.now() - datetime.timedelta(days=7), end_date = datetime.datetime.now()):
assert(start_date < end_date)
super().__init__(start_date, end_date)
def __get_user_timeline__(self, user_id, last_id = -1):
if last_id == -1:
new_tweets = self._api.user_timeline(screen_name=user_id, count=200, include_rts = False, tweet_mode = 'extended')
else:
new_tweets = self._api.user_timeline(screen_name=user_id, count=200, include_rts = False, max_id = str(last_id - 1), tweet_mode = 'extended')
return new_tweets
def __get_api_access__(self):
# TODO: move to env file once setup
consumer_key = "Ghsx0hbmNz5UKMrLaJX8Whlmv"
consumer_secret = "gBwqcEvRjJ4BVtvV3knQHgxnEXNzkikndtJsRpYkcz7rQ7eXkV"
access_token = "976956847138791424-oWw9Q00D5zMRpCMwcjiwUiFb7BelZb9"
access_token_secret = "iCDUsytTIjtpPtg3QopBmJifKIiw0Srbc06ROiOp0ZupF"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self._api = API(auth)
def __format_data__(self, searched_tweets):
for tweet in searched_tweets:
new_data = Data(tweet.full_text, tweet.created_at)
self._data_lst.append(new_data)
def __filter_data_by_time__(self, tweets):
# tweets are sorted by time from the latest to the earliest
if (self._start_date > tweets[0].created_at):
# means the latest tweet is earlier than the start date, search finished
return False
if (self._end_date < tweets[-1].created_at):
# means the earliest tweet is later than the end date, need to keep extracting
return True
earliest = (len(tweets) - 1)
latest = 0
if (self._start_date > tweets[-1].created_at):
earliest = self.__binary_search_get_time_index(tweets, self._start_date)
if (self._end_date < tweets[0].created_at):
latest = self.__binary_search_get_time_index(tweets[:earliest], self._end_date)
assert(latest >= 0) #prevent bug
self.__format_data__(tweets[latest: earliest])
return earliest == (len(tweets) - 1)
def __binary_search_get_time_index(self, tweets, time):
start_point = (len(tweets) - 1)
end_point = 0
pivot = (start_point + end_point)//2
while (start_point - end_point > 1):
if (time >= tweets[pivot].created_at):
start_point = pivot
else:
end_point = pivot
pivot = (start_point + end_point)//2
return start_point
def fetch_user_posts(self, user_id):
self._data_lst = []
self.__get_api_access__()
last_id = -1
keep_requesting = True
while (keep_requesting):
try:
new_tweets = self.__get_user_timeline__(user_id, last_id)
if not new_tweets:
break
last_id = new_tweets[-1].id
keep_requesting = self.__filter_data_by_time__(new_tweets)
except TweepError as e:
print(e)
break
if __name__ == '__main__':
dataFetch7 = TwitterDataFetch(start_date = datetime.datetime.now() - datetime.timedelta(days=1))
dataFetch7.fetch_user_posts("panettonepapi")
lst_7 = dataFetch7.get_data_lst()
print(len(lst_7))
dataFetch8 = TwitterDataFetch(start_date = datetime.datetime.now() - datetime.timedelta(days=2))
dataFetch8.fetch_user_posts("panettonepapi")
lst_8 = dataFetch8.get_data_lst()
print(len(lst_8))
dataFetch7.fetch_user_posts("panettonepapi")
lst_7 = dataFetch7.get_data_lst()
print(len(lst_7))
|
nilq/baby-python
|
python
|
import sys
import traceback
from django.core.management.base import BaseCommand
from django.conf import settings
from optparse import make_option
from cripts.config.config import CRIPTsConfig
from cripts.core.mongo_tools import mongo_find_one
from cripts.events.event import Event
from prep import prep_database
class Command(BaseCommand):
"""
Script Class.
"""
option_list = BaseCommand.option_list + (
make_option("-a", "--migrate_all", action="store_true", dest="mall",
default=False,
help="Migrate all collections."),
make_option("-E", "--migrate_events", action="store_true",
dest="events",
default=False,
help="Migrate events."),
)
help = 'Upgrades MongoDB to latest version using mass-migration.'
def handle(self, *args, **options):
"""
Script Execution.
"""
lv = settings.CRIPTS_VERSION
mall = options.get('mall')
events = options.get('events')
if (not mall and
not events and):
print "You must select something to upgrade. See '-h' for options."
sys.exit(1)
else:
upgrade(lv, options)
def migrate_collection(class_obj, sort_ids):
"""
Migrate a collection by opening each document. This will, by nature of the
core functionality in `cripts.core.cripts_mongoengine` check the
schema_version and migrate it if it is not the latest version.
:param class_obj: The class to migrate documents for.
:type class_obj: class that inherits from
:class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param sort_ids: If we should sort by ids ascending.
:type sort_ids: boolean
"""
# find all documents that don't have the latest schema version
# and migrate those.
version = class_obj._meta['latest_schema_version']
print "\nMigrating %ss" % class_obj._meta['cripts_type']
if sort_ids:
docs = (
class_obj.objects(schema_version__lt=version)
.order_by('+id')
.timeout(False)
)
else:
docs = class_obj.objects(schema_version__lt=version).timeout(False)
total = docs.count()
if not total:
print "\tNo %ss to migrate!" % class_obj._meta['cripts_type']
return
print "\tMigrated 0 of %d" % total,
count = 0
doc = None
try:
for doc in docs:
if 'migrated' in doc._meta and doc._meta['migrated']:
count += 1
print "\r\tMigrated %d of %d" % (count, total),
print ""
except Exception as e:
# Provide some basic info so admin can query their db and figure out
# what bad data is blowing up the migration.
print "\n\n\tAn error occurred during migration!"
print "\tMigrated: %d" % count
formatted_lines = traceback.format_exc().splitlines()
print "\tError: %s" % formatted_lines[-1]
if hasattr(e, 'tlo'):
print "\tDocument ID: %s" % e.tlo
else:
doc_id = mongo_find_one(class_obj._meta.get('collection'),
{'schema_version': {'$lt': version}}, '_id')
print "\tDocument ID: %s" % doc_id.get('_id')
if doc:
print "\tLast ID: %s" % doc.id
sys.exit(1)
def upgrade(lv, options):
"""
Perform the upgrade.
:param lv: The CRIPTs version we are running.
:type lv: str
:param options: The options passed in for what to upgrade.
:type options: dict
"""
# eventually we will do something to check to see what the current version
# of the CRIPTs DB is so we can upgrade through several versions at once.
# this is important if prep scripts need to be run for certain upgrades
# to work properly.
mall = options.get('mall')
events = options.get('events')
skip = options.get('skip')
# run prep migrations
if not skip:
prep_database()
# run full migrations
if mall or events:
migrate_collection(Event, sort_ids)
# Always bump the version to the latest in settings.py
config = CRIPTsConfig.objects()
if len(config) > 1:
print "You have more than one config object. This is really bad."
else:
config = config[0]
config.cripts_version = settings.CRIPTS_VERSION
config.save()
|
nilq/baby-python
|
python
|
from d20.Manual.Facts import (Fact,
registerFact)
from d20.Manual.Facts.Fields import StringField
@registerFact('hash')
class MD5HashFact(Fact):
_type_ = 'md5'
value = StringField()
@registerFact('hash')
class SHA1HashFact(Fact):
_type_ = 'sha1'
value = StringField()
@registerFact('hash')
class SHA256HashFact(Fact):
_type_ = 'sha256'
value = StringField()
@registerFact('hash')
class SSDeepHashFact(Fact):
_type_ = 'ssdeep'
value = StringField()
@registerFact()
class MimeTypeFact(Fact):
_type_ = 'mimetype'
mimetype = StringField()
filetype = StringField()
|
nilq/baby-python
|
python
|
"""Module contains the pydvdid package definition.
"""
from setuptools import setup
with open('README.rst') as readme_file:
README = readme_file.read()
setup(
name="pydvdid",
version="1.1",
description="A pure Python implementation of the Windows API IDvdInfo2::GetDiscID method, as used by Windows Media Center to compute a 'practically unique' 64-bit CRC for metadata retrieval.", # pylint: disable=locally-disabled, line-too-long
long_description=README,
author="Steve Wood",
author_email="octocat@nym.hush.com",
url="https://github.com/sjwood/pydvdid",
packages=[
"pydvdid"
],
scripts=[
"bin/pydvdid"
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Home Automation",
"Topic :: Multimedia :: Video",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"
],
license="Apache License 2.0",
)
|
nilq/baby-python
|
python
|
import pygame
from .math_helpers import *
def draw(surface, p1,p2, shading_function, section_length,section_offset):
#Adapted Bresenham's line algorithm from
#http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
x0,y0 = p1
x1,y1 = p2
dx = abs(x1 - x0)
dy = abs(y1 - y0)
if x0 < x1: sx = 1
else: sx = -1
if y0 < y1: sy = 1
else: sy = -1
err = dx - dy
while True:
displacement = vec_length(vec_sub([x0,y0],p1)) + section_offset
surface.set_at((x0,y0),shading_function( (displacement%section_length)/section_length ))
if x0 == x1 and y0 == y1: break
e2 = 2 * err
if e2 > -dy:
err = err - dy
x0 = x0 + sx
if e2 < dx:
err = err + dx
y0 = y0 + sy
def aadraw(surface, p1,p2, shading_function, section_length,section_offset, blend):
#Adapted Xiaolin Wu's line algorithm from
#http://en.wikipedia.org/wiki/Xiaolin_Wu%27s_line_algorithm
x0,y0 = p1
x1,y1 = p2
def plot(x,y, c):
displacement = vec_length(vec_sub([x,y],p1)) + section_offset
color2 = shading_function( (displacement%section_length)/float(section_length) )
if blend:
color1 = surface.get_at((x,y))
color = [rndint(color1[i]*(1.0-c) + c*color2[i]) for i in [0,1,2]]
else:
color = [rndint(c*color2[i]) for i in [0,1,2]]
surface.set_at((x,y),color)
def fpart(x): return x - int(x)
def rfpart(x): return 1.0 - fpart(x)
steep = abs(y1 - y0) > abs(x1 - x0)
if steep:
x0,y0 = y0,x0
x1,y1 = y1,x1
if x0 > x1:
x0,x1 = x1,x0
y0,y1 = y1,y0
dx = x1 - x0
dy = y1 - y0
gradient = float(dy) / float(dx)
#handle first endpoint
xend = round(x0)
yend = y0 + gradient * (xend - x0)
xgap = rfpart(x0 + 0.5)
xpxl1 = int(xend) #this will be used in the main loop
ypxl1 = int(yend)
if steep:
plot(ypxl1, xpxl1, rfpart(yend) * xgap)
plot(ypxl1+1, xpxl1, fpart(yend) * xgap)
else:
plot(xpxl1, ypxl1, rfpart(yend) * xgap)
plot(xpxl1, ypxl1+1, fpart(yend) * xgap)
intery = yend + gradient # first y-intersection for the main loop
#handle second endpoint
xend = round(x1)
yend = y1 + gradient * (xend - x1)
xgap = fpart(x1 + 0.5)
xpxl2 = int(xend) #this will be used in the main loop
ypxl2 = int(yend)
if steep:
plot(ypxl2 , xpxl2, rfpart(yend) * xgap)
plot(ypxl2+1, xpxl2, fpart(yend) * xgap)
else:
plot(xpxl2, ypxl2, rfpart(yend) * xgap)
plot(xpxl2, ypxl2+1, fpart(yend) * xgap)
#main loop
for x in range(xpxl1+1, xpxl2, 1):
#for x from xpxl1 + 1 to [through] xpxl2 - 1 do
if steep:
plot(int(intery), x, rfpart(intery))
plot(int(intery)+1, x, fpart(intery))
else:
plot(x, int(intery), rfpart(intery))
plot(x, int(intery)+1, fpart(intery))
intery = intery + gradient
|
nilq/baby-python
|
python
|
from flask_pyoidc.flask_pyoidc import OIDCAuthentication
from tenacity import retry
@retry
def get_auth(app):
auth = OIDCAuthentication(
app,
issuer=app.config['OIDC_ISSUER'],
client_registration_info=app.config['OIDC_CLIENT_CONFIG'],
)
return auth
|
nilq/baby-python
|
python
|
"""Functions to calculate signal-to-noise ratio in four different cases"""
import numpy as np
from legwork import strain, psd, utils, evol
import astropy.units as u
__all__ = ['snr_circ_stationary', 'snr_ecc_stationary',
'snr_circ_evolving', 'snr_ecc_evolving']
def snr_circ_stationary(m_c, f_orb, dist, t_obs, position=None, polarisation=None, inclination=None,
interpolated_g=None, interpolated_sc=None, instrument="LISA", custom_psd=None):
"""Computes SNR for circular and stationary sources
Parameters
----------
m_c : `float/array`
Chirp mass
f_orb : `float/array`
Orbital frequency
dist : `float/array`
Distance to the source
t_obs : `float`
Total duration of the observation
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
snr : `float/array`
SNR for each binary
"""
# only need to compute n=2 harmonic for circular
h_0_circ_2 = strain.h_0_n(m_c=m_c, f_orb=f_orb, ecc=np.zeros_like(f_orb).value, n=2, dist=dist,
position=position, polarisation=polarisation, inclination=inclination,
interpolated_g=interpolated_g).flatten()**2
h_f_src_circ_2 = h_0_circ_2 * t_obs
if interpolated_sc is not None:
h_f_lisa_2 = interpolated_sc(2 * f_orb)
else:
h_f_lisa_2 = psd.power_spectral_density(f=2 * f_orb, t_obs=t_obs, instrument=instrument,
custom_psd=custom_psd)
snr = (h_f_src_circ_2 / h_f_lisa_2)**0.5
return snr.decompose()
def snr_ecc_stationary(m_c, f_orb, ecc, dist, t_obs, harmonics_required,
position=None, polarisation=None, inclination=None,
interpolated_g=None, interpolated_sc=None,
ret_max_snr_harmonic=False, ret_snr2_by_harmonic=False,
instrument="LISA", custom_psd=None):
"""Computes SNR for eccentric and stationary sources
Parameters
----------
m_c : `float/array`
Chirp mass
f_orb : `float/array`
Orbital frequency
ecc : `float/array`
Eccentricity
dist : `float/array`
Distance to the source
t_obs : `float`
Total duration of the observation
harmonics_required : `integer`
Maximum integer harmonic to compute
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
ret_max_snr_harmonic : `boolean`
Whether to return (in addition to the snr), the harmonic with the maximum SNR
ret_snr2_by_harmonic : `boolean`
Whether to return the SNR^2 in each individual harmonic rather than the total.
The total can be retrieving by summing and then taking the square root.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
snr : `float/array`
SNR for each binary
max_snr_harmonic : `int/array`
harmonic with maximum SNR for each binary (only returned if ``ret_max_snr_harmonic=True``)
"""
# define range of harmonics
n_range = np.arange(1, harmonics_required + 1).astype(int)
# calculate source signal
h_0_ecc_n_2 = strain.h_0_n(m_c=m_c, f_orb=f_orb, ecc=ecc, n=n_range, dist=dist,
position=position, polarisation=polarisation,
inclination=inclination, interpolated_g=interpolated_g)**2
# reshape the output since only one timestep
h_0_ecc_n_2 = h_0_ecc_n_2.reshape(len(m_c), harmonics_required)
h_f_src_ecc_2 = h_0_ecc_n_2 * t_obs
# calculate harmonic frequencies and noise
f_n = n_range[np.newaxis, :] * f_orb[:, np.newaxis]
if interpolated_sc is not None:
h_f_lisa_n_2 = interpolated_sc(f_n.flatten())
h_f_lisa_n_2 = h_f_lisa_n_2.reshape(f_n.shape)
else:
h_f_lisa_n_2 = psd.power_spectral_density(f=f_n, t_obs=t_obs,
instrument=instrument, custom_psd=custom_psd)
snr_n_2 = (h_f_src_ecc_2 / h_f_lisa_n_2).decompose()
if ret_snr2_by_harmonic:
return snr_n_2
# calculate the signal-to-noise ratio
snr = (np.sum(snr_n_2, axis=1))**0.5
if ret_max_snr_harmonic:
max_snr_harmonic = np.argmax(snr_n_2, axis=1) + 1
return snr, max_snr_harmonic
else:
return snr
def snr_circ_evolving(m_1, m_2, f_orb_i, dist, t_obs, n_step,
position=None, polarisation=None, inclination=None, t_merge=None,
interpolated_g=None, interpolated_sc=None,
instrument="LISA", custom_psd=None):
"""Computes SNR for circular and stationary sources
Parameters
----------
m_1 : `float/array`
Primary mass
m_2 : `float/array`
Secondary mass
f_orb_i : `float/array`
Initial orbital frequency
dist : `float/array`
Distance to the source
t_obs : `float`
Total duration of the observation
n_step : `int`
Number of time steps during observation duration
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
t_merge : `float/array`
Time until merger
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
sn : `float/array`
SNR for each binary
"""
m_c = utils.chirp_mass(m_1=m_1, m_2=m_2)
# calculate minimum of observation time and merger time
if t_merge is None:
t_merge = evol.get_t_merge_circ(m_1=m_1, m_2=m_2, f_orb_i=f_orb_i)
t_evol = np.minimum(t_merge - (1 * u.s), t_obs)
# get f_orb evolution
f_orb_evol = evol.evol_circ(t_evol=t_evol, n_step=n_step, m_1=m_1, m_2=m_2, f_orb_i=f_orb_i)
maxes = np.where(f_orb_evol == 1e2 * u.Hz, -1 * u.Hz, f_orb_evol).max(axis=1)
for source in range(len(f_orb_evol)):
f_orb_evol[source][f_orb_evol[source] == 1e2 * u.Hz] = maxes[source]
# calculate the characteristic power
h_c_n_2 = strain.h_c_n(m_c=m_c, f_orb=f_orb_evol, ecc=np.zeros_like(f_orb_evol).value, n=2, dist=dist,
interpolated_g=interpolated_g)**2
h_c_n_2 = h_c_n_2.reshape(len(m_c), n_step)
# calculate the characteristic noise power
if interpolated_sc is not None:
h_f_lisa_2 = interpolated_sc(2 * f_orb_evol.flatten())
h_f_lisa_2 = h_f_lisa_2.reshape(f_orb_evol.shape)
else:
h_f_lisa_2 = psd.power_spectral_density(f=2 * f_orb_evol, t_obs=t_obs,
instrument=instrument, custom_psd=custom_psd)
h_c_lisa_2 = (2 * f_orb_evol)**2 * h_f_lisa_2
snr = np.trapz(y=h_c_n_2 / h_c_lisa_2, x=2 * f_orb_evol, axis=1)**0.5
return snr.decompose()
def snr_ecc_evolving(m_1, m_2, f_orb_i, dist, ecc, harmonics_required, t_obs, n_step,
position=None, polarisation=None, inclination=None, t_merge=None,
interpolated_g=None, interpolated_sc=None, n_proc=1,
ret_max_snr_harmonic=False, ret_snr2_by_harmonic=False,
instrument="LISA", custom_psd=None):
"""Computes SNR for eccentric and evolving sources.
Note that this function will not work for exactly circular (ecc = 0.0)
binaries.
Parameters
----------
m_1 : `float/array`
Primary mass
m_2 : `float/array`
Secondary mass
f_orb_i : `float/array`
Initial orbital frequency
dist : `float/array`
Distance to the source
ecc : `float/array`
Eccentricity
harmonics_required : `int`
Maximum integer harmonic to compute
t_obs : `float`
Total duration of the observation
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
n_step : `int`
Number of time steps during observation duration
t_merge : `float/array`
Time until merger
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
n_proc : `int`
Number of processors to split eccentricity evolution over, where
the default is n_proc=1
ret_max_snr_harmonic : `boolean`
Whether to return (in addition to the snr), the harmonic with the maximum SNR
ret_snr2_by_harmonic : `boolean`
Whether to return the SNR^2 in each individual harmonic rather than the total.
The total can be retrieving by summing and then taking the square root.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
snr : `float/array`
SNR for each binary
max_snr_harmonic : `int/array`
harmonic with maximum SNR for each binary (only returned if
``ret_max_snr_harmonic=True``)
"""
m_c = utils.chirp_mass(m_1=m_1, m_2=m_2)
# calculate minimum of observation time and merger time
if t_merge is None:
t_merge = evol.get_t_merge_ecc(m_1=m_1, m_2=m_2, f_orb_i=f_orb_i, ecc_i=ecc)
t_before = 0.1 * u.yr
t_evol = np.minimum(t_merge - t_before, t_obs).to(u.s)
# get eccentricity and f_orb evolutions
e_evol, f_orb_evol = evol.evol_ecc(ecc_i=ecc, t_evol=t_evol, n_step=n_step, m_1=m_1, m_2=m_2,
f_orb_i=f_orb_i, n_proc=n_proc, t_before=t_before, t_merge=t_merge)
maxes = np.where(np.logical_and(e_evol == 0.0, f_orb_evol == 1e2 * u.Hz),
-1 * u.Hz, f_orb_evol).max(axis=1)
for source in range(len(f_orb_evol)):
f_orb_evol[source][f_orb_evol[source] == 1e2 * u.Hz] = maxes[source]
# create harmonics list and multiply for nth frequency evolution
harms = np.arange(1, harmonics_required + 1).astype(int)
f_n_evol = harms[np.newaxis, np.newaxis, :] * f_orb_evol[..., np.newaxis]
# calculate the characteristic strain
h_c_n_2 = strain.h_c_n(m_c=m_c, f_orb=f_orb_evol, ecc=e_evol, n=harms, dist=dist,
position=position, polarisation=polarisation, inclination=inclination,
interpolated_g=interpolated_g)**2
# calculate the characteristic noise power
if interpolated_sc is not None:
h_f_lisa = interpolated_sc(f_n_evol.flatten())
else:
h_f_lisa = psd.power_spectral_density(f=f_n_evol.flatten(), t_obs=t_obs,
instrument=instrument, custom_psd=custom_psd)
h_f_lisa = h_f_lisa.reshape(f_n_evol.shape)
h_c_lisa_2 = f_n_evol**2 * h_f_lisa
snr_evol = h_c_n_2 / h_c_lisa_2
# integrate, sum and square root to get SNR
snr_n_2 = np.trapz(y=snr_evol, x=f_n_evol, axis=1)
if ret_snr2_by_harmonic:
return snr_n_2
snr_2 = snr_n_2.sum(axis=1)
snr = np.sqrt(snr_2)
if ret_max_snr_harmonic:
max_snr_harmonic = np.argmax(snr_n_2, axis=1) + 1
return snr, max_snr_harmonic
else:
return snr
|
nilq/baby-python
|
python
|
"""
QUESTION:
This is an interview question asked by Amazon.
There exists a staircase with N steps, and you can climb up either 1 or 2 steps at a time.
Given N, write a function that returns the number of unique ways you can climb the staircase. The order of the steps matters.
For example, if N is 4, then there are 5 unique ways:
1, 1, 1, 1
2, 1, 1
1, 2, 1
1, 1, 2
2, 2
"""
def staircase(s):
return n if s <= 3 else (staircase(s-1) + staircase(s-2))
staircase(4)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from setuptools import setup
setup(name='dvaclient',
version='1.0',
description='Deep Video Analytics Client',
author='Akshay Bhat',
author_email='dvaclient@deepvideoanalytics.com',
url='https://www.deepvideoanalytics.com/',
packages=['dvaclient'],
package_data={'dvaclient': ['schema.json']},
include_package_data=True,
install_requires=[
'jsonschema',
'requests'
],
)
|
nilq/baby-python
|
python
|
from chainer.backends import cuda
import numpy as np
def mask_to_bbox(mask):
"""Compute the bounding boxes around the masked regions.
This function accepts both :obj:`numpy.ndarray` and :obj:`cupy.ndarray` as
inputs.
Args:
mask (array): An array whose shape is :math:`(R, H, W)`.
:math:`R` is the number of masks.
The dtype should be :obj:`numpy.bool`.
Returns:
array:
The bounding boxes around the masked regions.
This is an array whose shape is :math:`(R, 4)`.
:math:`R` is the number of bounding boxes.
The dtype should be :obj:`numpy.float32`.
"""
R, H, W = mask.shape
xp = cuda.get_array_module(mask)
instance_index, ys, xs = xp.nonzero(mask)
bbox = xp.zeros((R, 4), dtype=np.float32)
for i in range(R):
ys_i = ys[instance_index == i]
xs_i = xs[instance_index == i]
if len(ys_i) == 0:
continue
y_min = ys_i.min()
x_min = xs_i.min()
y_max = ys_i.max() + 1
x_max = xs_i.max() + 1
bbox[i] = xp.array([y_min, x_min, y_max, x_max], dtype=np.float32)
return bbox
|
nilq/baby-python
|
python
|
from draco.programs import constraints, definitions, generate, hard, helpers
def test_has_definitions():
assert len(definitions.program)
assert len(definitions.blocks)
def test_definitions_has_marktype():
assert "mark_type" in definitions.blocks
def test_has_constraints():
assert len(constraints.program)
assert len(constraints.blocks)
def test_constraints_has_invalid_domain():
assert "invalid_domain" in constraints.blocks
def test_has_generate():
assert len(generate.program)
assert len(generate.blocks)
def test_generate_has_marktype():
assert "mark_type" in generate.blocks
def test_has_hard():
assert len(hard.program)
assert len(hard.blocks)
def test_has_helpers():
assert len(helpers.program)
assert len(helpers.blocks)
|
nilq/baby-python
|
python
|
from wpilib import DigitalInput
import robotmap
gear_mech_switch = None
def init():
"""
Initialize switch objects.
"""
global gear_mech_switch
gear_mech_switch = DigitalInput(robotmap.switches.gear_switch_channel)
|
nilq/baby-python
|
python
|
class Solution:
def multiply(self, T, M):
a = (T[0][0] * M[0][0] + T[0][1] * M[1][0] + T[0][2] * M[2][0])
b = (T[0][0] * M[0][1] + T[0][1] * M[1][1] + T[0][2] * M[2][1])
c = (T[0][0] * M[0][2] + T[0][1] * M[1][2] + T[0][2] * M[2][2])
d = (T[1][0] * M[0][0] + T[1][1] * M[1][0] + T[1][2] * M[2][0])
e = (T[1][0] * M[0][1] + T[1][1] * M[1][1] + T[1][2] * M[2][1])
f = (T[1][0] * M[0][2] + T[1][1] * M[1][2] + T[1][2] * M[2][2])
g = (T[2][0] * M[0][0] + T[2][1] * M[1][0] + T[2][2] * M[2][0])
h = (T[2][0] * M[0][1] + T[2][1] * M[1][1] + T[2][2] * M[2][1])
i = (T[2][0] * M[0][2] + T[2][1] * M[1][2] + T[2][2] * M[2][2])
T[0][0] = a
T[0][1] = b
T[0][2] = c
T[1][0] = d
T[1][1] = e
T[1][2] = f
T[2][0] = g
T[2][1] = h
T[2][2] = i
def power(self, T, n):
if n == 0 or n == 1: return
self.power(T, n // 2)
self.multiply(T, T)
if n % 2: self.multiply(T, [[1, 1, 1], [1, 0, 0], [0, 1, 0]])
def tribonacci(self, n: int) -> int:
if n == 0: return 0
if n <= 2: return 1
T = [[1, 1, 1], [1, 0, 0], [0, 1, 0]]
self.power(T, n - 1)
return T[0][0]
|
nilq/baby-python
|
python
|
# [START method_one]
# [START method_two]
def _bar():
return 'Underscores in method names denote helper methods.'
# [END method_one]
# [END method_two]
# [START method_one]
def return_one():
return 1
# [END method_one]
# [START method_two]
def return_two():
return 2
# [END method_two]
|
nilq/baby-python
|
python
|
class Song:
"""Class to represent a song
Attributes:
title (str): The title of the song
artist (Artist): An artist object representing the songs creator.
duration (int): The duration of the song in seconds. May be zero
"""
def __init__(self, title, artist, duration=0):
self.title = title
self.artist = artist
self.duration = duration
class Album:
"""Class to represent an Album, using it's track list
Attributes:
album_name (str): The name of the album.
year (int): The year was album was released.
artist: (Artist): The artist responsible for the album. If not specified,
the artist will default to an artist with the name "Various Artists".
tracks (List[Song]): A list of the songs on the album.
Methods:
add_song: Used to add a new song to the album's track list.
"""
def __init__(self, name, year, artist=None):
self.name = name
self.year = year
if artist is None:
self.artist = Artist("Various Artists")
else:
self.artist = artist
self.tracks = []
def add_song(self, song, position=None):
"""Adds a song to the track list
Args:
song (Song): A song to add.
position (Optional[int]): If specified, the song will be added to that position
in the track list - inserting it between other songs if necessary.
Otherwise, the song will be added to the end of the list.
"""
if position is None:
self.tracks.append(song)
else:
self.tracks.insert(position, song)
# help(Song)
# help(Song.__init__)
|
nilq/baby-python
|
python
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.piratesgui.BarSelectionMenu
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPGlobals
from pirates.piratesgui import GuiPanel
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.reputation import ReputationGlobals
from pirates.battle import WeaponGlobals
from pirates.economy import EconomyGlobals
from pirates.economy.EconomyGlobals import *
from pirates.piratesbase import Freebooter
from pirates.inventory import ItemGlobals
class BarSelectionMenu(GuiPanel.GuiPanel):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('BarSelectionMenu')
ICON_WIDTH = 0.13
HEIGHT = 0.15
SelectionDelay = 0.6
def __init__(self, items, command=None):
GuiPanel.GuiPanel.__init__(self, None, 1.0, self.HEIGHT, showClose=0)
self.items = items
self.icons = []
self.hotkeys = []
self.repMeters = []
self.choice = 0
self.command = command
self.hideTask = None
card = loader.loadModel('models/textureCards/selectionGui')
texCard = card.find('**/main_gui_general_box_over')
self.cursor = DirectFrame(parent=self, state=DGG.DISABLED, relief=None, frameSize=(0,
0.08,
0,
0.08), pos=(0.08,
0,
0.07), geom=texCard, geom_scale=0.12)
self.cursor.setTransparency(1)
self.cursor.resetFrameSize()
card.removeNode()
self.initialiseoptions(BarSelectionMenu)
self.card = loader.loadModel('models/gui/gui_icons_weapon')
self.accept('escape', self.__handleCancel)
self.loadWeaponButtons()
self.hide()
return
def loadWeaponButtons(self):
for hotkey in self.hotkeys:
hotkey.destroy()
self.hotkeys = []
for icon in self.icons:
icon.destroy()
self.icons = []
for repMeter in self.repMeters:
repMeter.destroy()
self.repMeters = []
self['frameSize'] = (
0, self.ICON_WIDTH * len(self.items) + 0.04, 0, self.HEIGHT)
self.setX(-((self.ICON_WIDTH * len(self.items) + 0.04) / 2.0))
topGui = loader.loadModel('models/gui/toplevel_gui')
kbButton = topGui.find('**/keyboard_button')
for i in range(len(self.items)):
if self.items[i]:
category = WeaponGlobals.getRepId(self.items[i][0])
icon = DirectFrame(parent=self, state=DGG.DISABLED, relief=None, frameSize=(0,
0.08,
0,
0.08), pos=(self.ICON_WIDTH * i + 0.08, 0, 0.082))
icon.setTransparency(1)
hotkeyText = 'F%s' % self.items[i][1]
hotkey = DirectFrame(parent=icon, state=DGG.DISABLED, relief=None, text=hotkeyText, text_align=TextNode.ACenter, text_scale=0.045, text_pos=(0,
0), text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, image=kbButton, image_scale=0.06, image_pos=(0,
0,
0.01), image_color=(0.5,
0.5,
0.35,
1), pos=(0,
0,
0.08))
self.hotkeys.append(hotkey)
category = WeaponGlobals.getRepId(self.items[i][0])
if Freebooter.getPaidStatus(base.localAvatar.getDoId()) or Freebooter.allowedFreebooterWeapon(category):
asset = ItemGlobals.getIcon(self.items[i][0])
if asset:
texCard = self.card.find('**/%s' % asset)
icon['geom'] = texCard
icon['geom_scale'] = 0.08
icon.resetFrameSize()
self.icons.append(icon)
else:
texCard = topGui.find('**/pir_t_gui_gen_key_subscriber*')
icon['geom'] = texCard
icon['geom_scale'] = 0.2
icon.resetFrameSize()
self.icons.append(icon)
repMeter = DirectWaitBar(parent=icon, relief=DGG.SUNKEN, state=DGG.DISABLED, borderWidth=(0.002,
0.002), range=0, value=0, frameColor=(0.24,
0.24,
0.21,
1), barColor=(0.8,
0.8,
0.7,
1), pos=(-0.05, 0, -0.0525), hpr=(0,
0,
0), frameSize=(0.005,
0.095,
0,
0.0125))
self.repMeters.append(repMeter)
inv = base.localAvatar.getInventory()
if inv:
repValue = inv.getReputation(category)
level, leftoverValue = ReputationGlobals.getLevelFromTotalReputation(category, repValue)
max = ReputationGlobals.getReputationNeededToLevel(category, level)
repMeter['range'] = max
repMeter['value'] = leftoverValue
return
def selectPrev(self):
if len(self.items) < 1:
return
self.show()
if len(self.items) > 1:
keepTrying = True
else:
keepTrying = False
while keepTrying:
keepTrying = False
self.choice = self.choice - 1
if self.choice < 0 or self.choice > len(self.items) - 1:
self.choice = len(self.items) - 1
if not Freebooter.getPaidStatus(base.localAvatar.getDoId()):
if self.items[self.choice]:
category = WeaponGlobals.getRepId(self.items[self.choice][0])
if not Freebooter.allowedFreebooterWeapon(category):
keepTrying = True
else:
keepTrying = True
self.cursor.setPos(self.ICON_WIDTH * self.choice + 0.08, 0, 0.072)
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hideTask = taskMgr.doMethodLater(self.SelectionDelay, self.confirmSelection, 'BarSelectHideTask' + str(self.getParent()), extraArgs=[])
def selectNext(self):
if len(self.items) < 1:
return
self.show()
if len(self.items) > 1:
keepTrying = True
else:
keepTrying = False
while keepTrying:
keepTrying = False
self.choice = self.choice + 1
if self.choice > len(self.items) - 1:
self.choice = 0
if not Freebooter.getPaidStatus(base.localAvatar.getDoId()):
category = WeaponGlobals.getRepId(self.items[self.choice][0])
if not Freebooter.allowedFreebooterWeapon(category):
keepTrying = True
self.cursor.setPos(self.ICON_WIDTH * self.choice + 0.08, 0, 0.072)
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hideTask = taskMgr.doMethodLater(self.SelectionDelay, self.confirmSelection, 'BarSelectHideTask' + str(self.getParent()), extraArgs=[])
def selectChoice(self, weaponId):
if len(self.items) < 1:
return
if weaponId not in self.items:
return
self.show()
self.choice = self.items.index(weaponId)
self.cursor.setPos(self.ICON_WIDTH * self.choice + 0.08, 0, 0.072)
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hideTask = taskMgr.doMethodLater(self.SelectionDelay * 2, self.hide, 'BarSelectHideTask' + str(self.getParent()), extraArgs=[])
def confirmSelection(self):
self.hide()
if self.command and self.choice < len(self.items):
self.command(self.items[self.choice][0], self.items[self.choice][1], fromWheel=1)
def update(self, items):
if self.items != items:
self.items = items
self.loadWeaponButtons()
def updateRep(self, category, value):
for i in range(len(self.items)):
repId = WeaponGlobals.getRepId(self.items[i][0])
if repId == category:
level, leftoverValue = ReputationGlobals.getLevelFromTotalReputation(category, value)
max = ReputationGlobals.getReputationNeededToLevel(category, level)
if len(self.repMeters) - 1 >= i:
self.repMeters[i]['range'] = max
self.repMeters[i]['value'] = leftoverValue
def destroy(self):
if hasattr(self, 'destroyed'):
return
self.destroyed = 1
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.ignore('escape')
for icon in self.icons:
icon.destroy()
icon = None
self.icons = []
if self.card:
self.card.removeNode()
self.card = None
GuiPanel.GuiPanel.destroy(self)
return
def __handleCancel(self):
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hide()
for item in self.items:
if item:
index = localAvatar.currentWeaponId == item[0] and self.items.index(item)
self.choice = index
return
def hide(self):
if hasattr(base, 'localAvatar'):
if hasattr(localAvatar.guiMgr.combatTray, 'skillTray'):
localAvatar.guiMgr.combatTray.skillTray.show()
GuiPanel.GuiPanel.hide(self)
|
nilq/baby-python
|
python
|
import argparse
from paz.pipelines import SSD300FAT, SSD300VOC, SSD512COCO, SSD512YCBVideo
from paz.backend.camera import VideoPlayer, Camera
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
parser = argparse.ArgumentParser(description='SSD object detection demo')
parser.add_argument('-c', '--camera_id', type=int, default=0,
help='Camera device ID')
parser.add_argument('-s', '--score_thresh', type=float, default=0.6,
help='Box/class score threshold')
parser.add_argument('-n', '--nms_thresh', type=float, default=0.45,
help='non-maximum suppression threshold')
parser.add_argument('-d', '--dataset', type=str, default='VOC',
choices=['VOC', 'COCO', 'YCBVideo', 'FAT'],
help='Dataset name')
args = parser.parse_args()
name_to_model = {'VOC': SSD300VOC,
'FAT': SSD300FAT,
'COCO': SSD512COCO,
'YCBVideo': SSD512YCBVideo}
pipeline = name_to_model[args.dataset]
detect = pipeline(args.score_thresh, args.nms_thresh)
camera = Camera(args.camera_id)
player = VideoPlayer((1280, 960), detect, camera)
player.run()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, SRI International
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of SRI International nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Acorn Pooley, Mike Lautman
# 7/11/18: Updated for use by Robust Autonomy and Decisions Group by Samantha Kim
# Action code based on ROS wiki tutorial "Writing a Simple Action Server using the
# Execute Callback (Python)".
import sys
import copy
import rospy
import tf
import actionlib
import object_experiments.msg
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
class MoveGroupPythonInterface(object):
def __init__(self):
super(MoveGroupPythonInterface, self).__init__()
## Initialize `moveit_commander`_ and a `rospy`_ node:
moveit_commander.roscpp_initialize(sys.argv)
## Instantiate a `RobotCommander`_ object. This object is the outer-level interface to
## the robot:
robot = moveit_commander.RobotCommander()
## Instantiate a `PlanningSceneInterface`_ object. This object is an interface
## to the world surrounding the robot:
scene = moveit_commander.PlanningSceneInterface()
## Instantiate a `MoveGroupCommander`_ object. This object is an interface
## to one group of joints. In this case the group is the joints in the Panda
## arm so we set ``group_name = panda_arm``. If you are using a different robot,
## you should change this value to the name of your robot arm planning group.
## This interface can be used to plan and execute motions on the Panda:
group_name = "manipulator"
group = moveit_commander.MoveGroupCommander(group_name)
# Initialize velocity and acceleration scaling factors to prevent
# overly fast movements. Can be changed later using the go_to_pose_goal
# and go_to_joint_state functions.
group.set_max_acceleration_scaling_factor(0.1)
group.set_max_velocity_scaling_factor(0.1)
## Create a `DisplayTrajectory`_ publisher which may be used to publish
## trajectories for RViz to visualize:
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory,
queue_size=20)
# Print name of the reference frame for this robot:
planning_frame = group.get_planning_frame()
print "============ Reference frame: %s" % planning_frame
# Print the name of the end-effector link for this group:
eef_link = group.get_end_effector_link()
print "============ End effector: %s" % eef_link
# List of all the groups in the robot:
group_names = robot.get_group_names()
print "============ Robot Groups:", robot.get_group_names()
# Print the state of the robot:
print "============ Printing robot state"
print robot.get_current_state()
print ""
# Misc variables
self.box_name = ''
self.robot = robot
self.scene = scene
self.group = group
self.display_trajectory_publisher = display_trajectory_publisher
self.planning_frame = planning_frame
self.eef_link = eef_link
self.group_names = group_names
def knock_blocks(self):
"""
Function: knock_blocks
----------------------------------
UR10 will go to a pre-collision pose, bend at the elbow joint,
colliding with the block setup, and return to a neutral pose.
Poses were found using the teaching pendant.
"""
# Init pose
init_pose = geometry_msgs.msg.Pose()
init_pose.position.x = -0.488798937651
init_pose.position.y = 0.104866858129
init_pose.position.z = -1.0074033753
init_pose.orientation.x = 0.495021656851
init_pose.orientation.y = 0.516180354965
init_pose.orientation.z = -0.48224425657
init_pose.orientation.w = 0.505916868074
# Pre-collision pose
pre_coll_pose = geometry_msgs.msg.Pose()
pre_coll_pose.position.x = -0.558980015093
pre_coll_pose.position.y = 0.290542710322
pre_coll_pose.position.z = -1.04752385597
pre_coll_pose.orientation.x = 0.485564471115
pre_coll_pose.orientation.y = 0.524631938133
pre_coll_pose.orientation.z = -0.503994513944
pre_coll_pose.orientation.w = 0.484745297859
# Post-collision pose
post_coll_pose = geometry_msgs.msg.Pose()
post_coll_pose.position.x = -0.397289172218
post_coll_pose.position.y = 0.290860833622
post_coll_pose.position.z = -1.07770150547
post_coll_pose.orientation.x = 0.485054814234
post_coll_pose.orientation.y = 0.524070568573
post_coll_pose.orientation.z = -0.504404664407
post_coll_pose.orientation.w = 0.485448275813
self.go_to_pose_goal(pre_coll_pose, .1, .1)
# Post-collision joint state
post_coll_joint_goal = self.group.get_current_joint_values()
post_coll_joint_goal[3] -= pi / 6
self.go_to_joint_state(post_coll_joint_goal, .1, .1)
self.go_to_pose_goal(init_pose, .1, .1)
def go_to_joint_state(self, joint_goal, velocity, acceleration):
"""
Function: go_to_joint_state
------------------------------------
Moves the robot to the specified joint state with the
specified velocity and acceleration. Velocity
and acceleration are values between [0,1], corresponding
to the scaling factor for the reduction of the maximum
joint velocity and acceleration.
"""
# Set velocity and acceleration scaling factors.
self.group.set_max_velocity_scaling_factor(velocity)
self.group.set_max_acceleration_scaling_factor(acceleration)
self.group.go(joint_goal, wait=True)
# Calling ``stop()`` ensures that there is no residual movement
self.group.stop()
def go_to_pose_goal(self, pose_goal, velocity, acceleration):
"""
Function: go_to_pose_goal
------------------------------------
Plans a pose goal and executes the path. This method is preferable
to cartesian path planning and execution because velocity and
acceleration limitations can be set.
"""
# Set velocity and acceleration scaling factors.
self.group.set_max_velocity_scaling_factor(velocity)
self.group.set_max_acceleration_scaling_factor(acceleration)
# Add pose goals and execute path
self.group.set_pose_target(pose_goal)
plan = self.group.go(wait=True)
# Calling `stop()` ensures that there is no residual movement
self.group.stop()
# It is always good to clear your targets after planning with poses.
# Note: there is no equivalent function for clear_joint_value_targets()
self.group.clear_pose_targets()
def get_formatted_current_pose(self, pose_name):
"""
Function: get_formatted_current_pose
------------------------------------
Prints to screen the current pose of the robot in a format that allows
for easy hardcoding of a particular pose.
"""
current_pose = self.group.get_current_pose()
print pose_name + " = geometry_msgs.msg.Pose()"
print pose_name + ".position.x = " + str(current_pose.pose.position.x)
print pose_name + ".position.y = " + str(current_pose.pose.position.y)
print pose_name + ".position.z = " + str(current_pose.pose.position.z)
print pose_name + ".orientation.x = " + str(current_pose.pose.orientation.x)
print pose_name + ".orientation.y = " + str(current_pose.pose.orientation.y)
print pose_name + ".orientation.z = " + str(current_pose.pose.orientation.z)
print pose_name + ".orientation.w = " + str(current_pose.pose.orientation.w)
class Choreography(object):
"""
Set up an Action Server that expects ChoreographyAction messages.
When a ChoreographyAction goal is received, it executes the type of
choreography specified by the message.
"""
feedback = object_experiments.msg.ChoreographyFeedback()
result = object_experiments.msg.ChoreographyResult()
success = True
def __init__(self, name):
self.action_name = name
self.server = actionlib.SimpleActionServer(self.action_name,
object_experiments.msg.ChoreographyAction,
self.execute,
auto_start = False)
self.server.start()
print("ActionServer initialized.")
def execute(self,goal):
self.success = True
rospy.loginfo('Starting choreography: %s' % (goal))
execute_choreography(goal)
if self.success:
rospy.loginfo('%s: Succeeded' % self.action_name)
self.server.set_succeeded(self.result)
def check_preempt(self):
if self.server.is_preempt_requested():
rospy.loginfo('%s: Preemepted' % self.action_name)
self.server.set_preempted()
self.success = False
return False
return True
def execute_choreography(goal):
"""
Function: execute_choreography
------------------------------------
Executes the choreography specified as a goal.
Additional choreography options should be included here
as if/elif cases.
"""
try:
# Initialize MoveIt commander
robot_commander = MoveGroupPythonInterface()
# Execute choreography
print("Executing: " + goal.choreography.data)
if goal.choreography.data == "knock_blocks":
robot_commander.knock_blocks()
elif goal.choreography.data == "get_formatted_current_pose":
robot_commander.get_formatted_current_pose("your_pose_name")
print "============ Choreography complete!"
except rospy.ROSInterruptException:
return
except KeyboardInterrupt:
return
if __name__ == '__main__':
rospy.init_node('choreography')
server = Choreography(rospy.get_name())
rospy.spin()
|
nilq/baby-python
|
python
|
import os
import urllib.request
import zipfile
from random import shuffle
from math import floor
def download_dataset():
print('Beginning dataset download with urllib2')
url = "http://cs231n.stanford.edu/tiny-imagenet-200.zip"
path = "%s/tiny-imagenet-200.zip" % os.getcwd()
urllib.request.urlretrieve(url, path)
print("Dataset downloaded")
def unzip_data():
path_to_zip_file = "%s/tiny-imagenet-200.zip" % os.getcwd()
directory_to_extract_to = os.getcwd()
print("Extracting zip file: %s" % path_to_zip_file)
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
print("Extracted at: %s" % directory_to_extract_to)
def format_val():
val_dir = "%s/tiny-imagenet-200/val" % os.getcwd()
print("Formatting: %s" % val_dir)
val_annotations = "%s/val_annotations.txt" % val_dir
val_dict = {}
with open(val_annotations, 'r') as f:
for line in f:
line = line.strip().split()
assert(len(line) == 6)
wnind = line[1]
img_name = line[0]
boxes = '\t'.join(line[2:])
if wnind not in val_dict:
val_dict[wnind] = []
entries = val_dict[wnind]
entries.append((img_name, boxes))
assert(len(val_dict) == 200)
for wnind, entries in val_dict.items():
val_wnind_dir = "%s/%s" % (val_dir, wnind)
val_images_dir = "%s/images" % val_dir
val_wnind_images_dir = "%s/images" % val_wnind_dir
os.mkdir(val_wnind_dir)
os.mkdir(val_wnind_images_dir)
wnind_boxes = "%s/%s_boxes.txt" % (val_wnind_dir, wnind)
f = open(wnind_boxes, "w")
for img_name, box in entries:
source = "%s/%s" % (val_images_dir, img_name)
dst = "%s/%s" % (val_wnind_images_dir, img_name)
os.system("cp %s %s" % (source, dst))
f.write("%s\t%s\n" % (img_name, box))
f.close()
os.system("rm -rf %s" % val_images_dir)
print("Cleaning up: %s" % val_images_dir)
print("Formatting val done")
def split_train_test():
split_quota = 0.7
print("Splitting Train+Val into %s-%s" % (split_quota*100, (1 - split_quota)*100))
base_dir = "%s/tiny-imagenet-200" % os.getcwd()
train_dir = "%s/train" % base_dir
val_dir = "%s/val" % base_dir
fwnind = "%s/wnids.txt" % base_dir
wninds = set()
with open(fwnind, "r") as f:
for wnind in f:
wninds.add(wnind.strip())
assert(len(wninds) == 200)
new_train_dir = "%s/new_train" % base_dir
new_test_dir = "%s/new_test" % base_dir
os.mkdir(new_train_dir)
os.mkdir(new_test_dir)
total_ntrain = 0
total_ntest = 0
for wnind in wninds:
wnind_ntrain = 0
wnind_ntest = 0
new_train_wnind_dir = "%s/%s" % (new_train_dir, wnind)
new_test_wnind_dir = "%s/%s" % (new_test_dir, wnind)
os.mkdir(new_train_wnind_dir)
os.mkdir(new_test_wnind_dir)
os.mkdir(new_train_wnind_dir+"/images")
os.mkdir(new_test_wnind_dir+"/images")
new_train_wnind_boxes = "%s/%s_boxes.txt" % (new_train_wnind_dir, wnind)
f_ntrain = open(new_train_wnind_boxes, "w")
new_test_wnind_boxes = "%s/%s_boxes.txt" % (new_test_wnind_dir, wnind)
f_ntest = open(new_test_wnind_boxes, "w")
dirs = [train_dir, val_dir]
for wdir in dirs:
wnind_dir = "%s/%s" % (wdir, wnind)
wnind_boxes = "%s/%s_boxes.txt" % (wnind_dir, wnind)
imgs = []
with open(wnind_boxes, "r") as f:
for line in f:
line = line.strip().split()
img_name = line[0]
boxes = '\t'.join(line[1:])
imgs.append((img_name, boxes))
print("[Old] wind: %s - #: %s" % (wnind, len(imgs)))
shuffle(imgs)
split_n = floor(len(imgs)*0.7)
train_imgs = imgs[:split_n]
test_imgs = imgs[split_n:]
for img_name, box in train_imgs:
source = "%s/images/%s" % (wnind_dir, img_name)
dst = "%s/images/%s" % (new_train_wnind_dir, img_name)
os.system("cp %s %s" % (source, dst))
f_ntrain.write("%s\t%s\n" % (img_name, box))
wnind_ntrain += 1
for img_name, box in test_imgs:
source = "%s/images/%s" % (wnind_dir, img_name)
dst = "%s/images/%s" % (new_test_wnind_dir, img_name)
os.system("cp %s %s" % (source, dst))
f_ntest.write("%s\t%s\n" % (img_name, box))
wnind_ntest += 1
f_ntrain.close()
f_ntest.close()
print("[New] wnind: %s - #train: %s - #test: %s" % (wnind, wnind_ntrain,
wnind_ntest))
total_ntrain += wnind_ntrain
total_ntest += wnind_ntest
print("[New] #train: %s - #test: %s" % (total_ntrain, total_ntest))
os.system("rm -rf %s" % train_dir)
os.system("rm -rf %s" % val_dir)
print("Cleaning up: %s" % train_dir)
print("Cleaning up: %s" % val_dir)
print("Created new train data at: %s" % new_train_dir)
print("Cleaning new test data at: %s" % new_test_dir)
print("Splitting dataset done")
def main():
# download_dataset()
unzip_data()
format_val()
# split_train_test()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from behaviors.button import ButtonBehavior, ToggleButtonBehavior
from behaviors.touch_effecs import EffectBehavior
from kivy.uix.image import Image
from kivy.uix.anchorlayout import AnchorLayout
from kivy.properties import (
ListProperty, ObjectProperty,
)
from kivy.graphics import Color, Rectangle
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.clock import Clock
Builder.load_string('''
<KVAnchorIcon>:
size_hint_x:None
width:'70dp'
anchor_x:'center'
anchor_y:'center'
<KVButtonIcon>:
icon_color:[1, 1, 1, 1]
size_hint:None, None
size:'40dp', '40dp'
mipmap:True
allow_strech:True
keep_ratio:False
canvas:
Clear
canvas.before:
Clear
Color:
rgba:self.icon_color
Rectangle:
texture:self.texture
pos:self.pos
size:self.size
<KVToggleButtonIcon>:
size:'30dp', '30dp'
''', filename="KVIcon.kv")
class KVAnchorIcon(AnchorLayout):
background_color = ListProperty([0, 0, 0, 0])
back = ObjectProperty(None)
def on_background_color(self, *args):
self.unbind(size=self.update_background)
self.unbind(pos=self.update_background)
self.bind(size=self.update_background)
self.bind(pos=self.update_background)
with self.canvas.before:
Color(rgba=self.background_color)
self.back = Rectangle(size=self.size, pos=self.pos)
def update_background(self, *args):
self.back.size = self.size
self.back.pos = self.pos
class KVButtonIcon(EffectBehavior, ButtonBehavior, Image):
effect_color = ListProperty([0, 0, 0, 0])
defaut_color = ListProperty([1, 1, 1, 1])
pos_color = ListProperty([0, 0, 0, 0])
pos_sources = ListProperty([])
state_sources = ListProperty([])
enter_pos = False
def __init__(self, **kwargs):
self.register_event_type('on_mouse_inside')
self.register_event_type('on_mouse_outside')
self.bind(
pos_sources=self.config,
state_sources=self.config,
defaut_color=self.config,
)
super(KVButtonIcon, self).__init__(**kwargs)
self.type_button = 'Rounded'
Window.bind(mouse_pos=self.on_mouse_pos)
Clock.schedule_once(self.config)
def config(self, *args):
if len(self.pos_sources) == 2:
self.source = self.pos_sources[0]
if len(self.state_sources) == 2:
self.source = self.state_sources[0]
if self.defaut_color != [1, 1, 1, 1]:
self.icon_color = self.defaut_color
def on_state(self, widget, state):
if len(self.state_sources) == 2:
if state == 'normal':
self.source = self.state_sources[0]
elif state == 'down':
self.source = self.state_sources[1]
def on_mouse_pos(self, window, mouse_pos):
if self.collide_point(*self.to_widget(*mouse_pos)):
self.enter_pos = True
self.dispatch('on_mouse_inside')
if len(self.pos_sources) == 2:
self.source = self.pos_sources[1]
if self.pos_color != [0, 0, 0, 0]:
self.icon_color = self.pos_color
return None
if len(self.pos_sources) == 2:
self.source = self.pos_sources[0]
if self.defaut_color != [1, 1, 1, 1]:
self.icon_color = self.defaut_color
if self.enter_pos:
self.enter_pos = False
self.dispatch('on_mouse_outside')
def on_touch_down(self, touch):
if touch.is_mouse_scrolling:
return False
elif self in touch.ud:
return False
if self.collide_point(*touch.pos):
touch.grab(self)
self.ripple_show(touch)
return super(KVButtonIcon, self).on_touch_down(touch)
return False
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
self.ripple_fade()
return super(KVButtonIcon, self).on_touch_up(touch)
def on_mouse_inside(self):
pass
def on_mouse_outside(self):
pass
class KVToggleButtonIcon(ToggleButtonBehavior, KVButtonIcon):
pass
|
nilq/baby-python
|
python
|
import os
from sys import argv
tobs = int(argv[1])
for i in range(6):
if i != 4:
for j in range(10):
if j==0:
os.system('tail -10000 tobs%d/window%d/hb_list_tobs_%d_task_%d.dat > tobs%d/window%d/window%d.dat'%(tobs,i,tobs,j,tobs,i,i))
else:
os.system('tail -10000 tobs%d/window%d/hb_list_tobs_%d_task_%d.dat >> tobs%d/window%d/window%d.dat'%(tobs,i,tobs,j,tobs,i,i))
|
nilq/baby-python
|
python
|
from binaryninja import *
from binaryninja.binaryview import BinaryView
from binaryninja.platform import Platform
from .browser import ImagePickerDialog
# binja doesn't want to load mods in a plugin's dir
# so hopefully we can just hack that in manually
# We do this after importing binaryninja, because in my local workspace I embed a copy of
# the binaryninja API so my IDE can handle intellisense
# This wont interfere since binja wont see that dir properly
this_script = os.path.realpath(__file__)
this_dir = os.path.dirname(this_script)
sys.path.insert(0, this_dir)
sys.path.insert(0, this_dir + os.path.sep + 'ktool')
from io import BytesIO
from DyldExtractor.extraction_context import ExtractionContext
from DyldExtractor.macho.macho_context import MachOContext
from DyldExtractor.dyld.dyld_context import DyldContext
from DyldExtractor.converter import (
slide_info,
macho_offset,
linkedit_optimizer,
stub_fixer,
objc_fixer
)
import ktool
def internal_print_rewrite(msg):
log.log(LogLevel.InfoLog, msg)
print = internal_print_rewrite
class DyldCacheHander:
def __init__(self, filename):
self.filename = filename
self.images = []
self.image_map = {}
self.fp = open(filename, 'rb')
self.dyld_context = None
def populate_image_list(self):
self.dyld_context = DyldContext(self.fp)
for imageData in self.dyld_context.images:
path = self.dyld_context.readString(imageData.pathFileOffset)
path = path[0:-1] # remove null terminator
path = path.decode("utf-8")
self.images.append(path)
self.image_map[path] = imageData
# noinspection PyAbstractClass
class DyldSharedCacheView(BinaryView):
name = "DyldSharedCache"
long_name = "Dyld Shared Cache Loader"
def __init__(self, data):
BinaryView.__init__(self, parent_view=data, file_metadata=data.file)
self.cache_handler = DyldCacheHander(data.file.filename)
def init(self):
# TODO: not hardcode
self.platform = Platform[f"mac-aarch64"]
self.cache_handler.populate_image_list()
# Use the fancy image picker if the UI is enabled
if core_ui_enabled():
ipd = ImagePickerDialog(self.cache_handler.images)
ipd.run()
# Can happen if the dialog is rejected
if ipd.chosen_image is None:
return False
image = self.cache_handler.image_map[ipd.chosen_image]
else:
mod_index = get_choice_input(f'Found {len(self.cache_handler.images)} Images', f'Select Image',
self.cache_handler.images)
mod = self.cache_handler.images[mod_index]
image = self.cache_handler.image_map[mod]
_macho_offset, context = self.cache_handler.dyld_context.convertAddr(image.address)
macho_ctx = MachOContext(context.fileObject, _macho_offset, True)
extraction_ctx = ExtractionContext(self.cache_handler.dyld_context, macho_ctx)
slide_info.processSlideInfo(extraction_ctx)
linkedit_optimizer.optimizeLinkedit(extraction_ctx)
stub_fixer.fixStubs(extraction_ctx)
objc_fixer.fixObjC(extraction_ctx)
write_procedures = macho_offset.optimizeOffsets(extraction_ctx)
virt_macho = BytesIO()
# Write the MachO file
for procedure in write_procedures:
virt_macho.seek(0)
virt_macho.seek(procedure.writeOffset)
virt_macho.write(
procedure.fileCtx.getBytes(procedure.readOffset, procedure.size)
)
virt_macho.seek(0)
image = ktool.load_image(virt_macho)
for segment in image.segments.values():
segment: ktool.macho.Segment = segment
seg_dat = image.get_bytes_at(segment.file_address, segment.size)
# We can map all of these as RWX or ---, it makes no difference.
# This view wont be analyzing, and MachO or ObjectiveNinja will properly map them.
self.add_auto_segment(segment.vm_address, segment.size, segment.file_address, segment.size, SegmentFlag.SegmentReadable)
self.write(segment.vm_address, bytes(seg_dat))
self.abort_analysis()
return True
@classmethod
def is_valid_for_data(cls, data):
hdr = data.read(0, 16)
if len(hdr) < 16:
return False
if b'dyld_v1' not in hdr:
return False
return True
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations # must import to defer parsing of annotations
import pytest
import tvm
from tvm import relax
from tvm.script import relax as R
import numpy as np
@tvm.script.ir_module
class InputModule:
@R.function
def foo(x: Tensor((m, n), "int64")):
y = relax.unique(x, sorted=False)
y_sorted = relax.unique(x)
return y, y_sorted
def test_unique():
mod = InputModule
# TODO(prakalp): also add test for compiling and running on cuda device.
target = tvm.target.Target("llvm")
ex = relax.vm.build(mod, target)
vm = relax.VirtualMachine(ex, tvm.cpu())
data_numpy = np.random.randint(0, 16, (16, 16))
data = tvm.nd.array(data_numpy)
result, result_sorted = vm["foo"](data)
expected_output_sorted, indices = np.unique(data_numpy, return_index=True)
expected_output = [data_numpy.flatten()[index] for index in sorted(indices, reverse=True)]
np.testing.assert_array_equal(expected_output_sorted, result_sorted.numpy())
np.testing.assert_array_equal(expected_output, result.numpy())
if __name__ == "__main__":
pytest.main([__file__])
|
nilq/baby-python
|
python
|
import argparse
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dp_autoencoder import Autoencoder
import mimic_dataset
import dp_optimizer
import sampling
import analysis
# Deterministic output
torch.manual_seed(0)
np.random.seed(0)
class Generator(nn.Module):
def __init__(self, input_dim, output_dim, binary=True, device='cpu'):
super(Generator, self).__init__()
def block(inp, out, Activation, device):
return nn.Sequential(
nn.Linear(inp, out, bias=False),
nn.LayerNorm(out),
Activation(),
).to(device)
self.block_0 = block(input_dim, input_dim, nn.Tanh if binary else lambda: nn.LeakyReLU(0.2), device)
self.block_1 = block(input_dim, input_dim, nn.Tanh if binary else lambda: nn.LeakyReLU(0.2), device)
self.block_2 = block(input_dim, output_dim, nn.Tanh if binary else lambda: nn.LeakyReLU(0.2), device)
def forward(self, x):
x = self.block_0(x) + x
x = self.block_1(x) + x
x = self.block_2(x)
return x
class Discriminator(nn.Module):
def __init__(self, input_dim, device='cpu'):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, (2 * input_dim) // 3),
nn.LeakyReLU(0.2),
nn.Linear((2 * input_dim) // 3, input_dim // 3),
nn.LeakyReLU(0.2),
nn.Linear(input_dim // 3, 1),
).to(device)
def forward(self, x):
return self.model(x)
def train(params):
dataset = {
'mimic': mimic_dataset,
}[params['dataset']]
_, train_dataset, _, _ = dataset.get_datasets()
with open('dp_autoencoder.dat', 'rb') as f:
autoencoder = torch.load(f)
decoder = autoencoder.get_decoder()
generator = Generator(
input_dim=params['latent_dim'],
output_dim=autoencoder.get_compression_dim(),
binary=params['binary'],
device=params['device'],
)
g_optimizer = torch.optim.RMSprop(
params=generator.parameters(),
lr=params['lr'],
alpha=params['alpha'],
weight_decay=params['l2_penalty'],
)
discriminator = Discriminator(
input_dim=np.prod(train_dataset[0].shape),
device=params['device'],
)
d_optimizer = dp_optimizer.DPRMSprop(
l2_norm_clip=params['l2_norm_clip'],
noise_multiplier=params['noise_multiplier'],
minibatch_size=params['minibatch_size'],
microbatch_size=params['microbatch_size'],
params=discriminator.parameters(),
lr=params['lr'],
alpha=params['alpha'],
weight_decay=params['l2_penalty'],
)
print('Achieves ({}, {})-DP'.format(
analysis.epsilon(
len(train_dataset),
params['minibatch_size'],
params['noise_multiplier'],
params['iterations'],
params['delta']
),
params['delta'],
))
minibatch_loader, microbatch_loader = sampling.get_data_loaders(
params['minibatch_size'],
params['microbatch_size'],
params['iterations'],
)
iteration = 0
for X_minibatch in minibatch_loader(train_dataset):
d_optimizer.zero_grad()
for real in microbatch_loader(X_minibatch):
real = real.to(params['device'])
z = torch.randn(real.size(0), params['latent_dim'], device=params['device'], requires_grad=False)
fake = decoder(generator(z)).detach()
d_optimizer.zero_microbatch_grad()
d_loss = -torch.mean(discriminator(real)) + torch.mean(discriminator(fake))
d_loss.backward()
d_optimizer.microbatch_step()
d_optimizer.step()
for parameter in discriminator.parameters():
parameter.data.clamp_(-params['clip_value'], params['clip_value'])
if iteration % params['d_updates'] == 0:
z = torch.randn(X_minibatch.size(0), params['latent_dim'], device=params['device'], requires_grad=False)
fake = decoder(generator(z))
g_optimizer.zero_grad()
g_loss = -torch.mean(discriminator(fake))
g_loss.backward()
g_optimizer.step()
if iteration % 100 == 0:
print('[Iteration %d/%d] [D loss: %f] [G loss: %f]' % (iteration, params['iterations'], d_loss.item(), g_loss.item()))
iteration += 1
if iteration % 1000 == 0:
with open('dpwgans1/{}.dat'.format(iteration), 'wb') as f:
torch.save(generator, f)
return generator
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--alpha', type=float, default=0.99, help='smoothing parameter for RMS prop (default: 0.99)')
parser.add_argument('--binary', type=bool, default=True, help='whether data type is binary (default: true)')
parser.add_argument('--clip-value', type=float, default=0.01, help='upper bound on weights of the discriminator (default: 0.01)')
parser.add_argument('--d-updates', type=int, default=10, help='number of iterations to update discriminator per generator update (default: 2)')
parser.add_argument('--dataset', type=str, default='mimic', help='the dataset to be used for training (default: mimic)')
parser.add_argument('--delta', type=float, default=1e-5, help='delta for epsilon calculation (default: ~1e-5)')
parser.add_argument('--device', type=str, default=('cuda' if torch.cuda.is_available() else 'cpu'), help='whether or not to use cuda (default: cuda if available)')
parser.add_argument('--iterations', type=int, default=10000, help='number of iterations to train (default: 30000)')
parser.add_argument('--l2-norm-clip', type=float, default=0.022, help='upper bound on the l2 norm of gradient updates (default: 0.35)')
parser.add_argument('--l2-penalty', type=float, default=0., help='l2 penalty on model weights (default: 0.001)')
parser.add_argument('--latent-dim', type=int, default=64, help='dimensionality of the latent space (default: 128)')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate (default: 1e-3)')
parser.add_argument('--microbatch-size', type=int, default=1, help='input microbatch size for training (default: 10)')
parser.add_argument('--minibatch-size', type=int, default=128, help='input minibatch size for training (default: 1000)')
parser.add_argument('--noise-multiplier', type=float, default=3.5, help='ratio between clipping bound and std of noise applied to gradients (default: 3.5)')
params = vars(parser.parse_args())
generator = train(params)
with open('dp_generator.dat', 'wb') as f:
torch.save(generator, f)
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.1 on 2020-01-06 22:04
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0008_message_front_key'),
]
operations = [
migrations.AddField(
model_name='message',
name='pending_read',
field=models.ManyToManyField(related_name='unread_messages', to=settings.AUTH_USER_MODEL),
),
]
|
nilq/baby-python
|
python
|
# the first import can't be removed
import scripted_rest_sys_path
from dbx2.dbx_logger import logger
import splunk, os, json
from json import loads
from dbx2.simple_rest import SimpleRest
from dbx2.java_home_detector import JavaHomeDetector
from dbx2.splunk_client.splunk_service_factory import SplunkServiceFactory
import splunklib.client as client
import jvm_options
from dbx2.jre_validator import validateJRE, checkDependencies
import requests
class Settings(SimpleRest):
endpoint = "configs/conf-dbx_settings/java"
defaultPort = 9998
commands_endpoint = "configs/conf-commands/%s"
java_commands = ["dbxquery", "dbxoutput", "dbxlookup"]
customized_java_path = "customized.java.path"
taskserverPortProperty = "dw.server.applicationConnectors[0].port"
taskserverPortRegex = r'dw\.server\.applicationConnectors\[0\]\.port=(\d+)'
restart_url = "https://localhost:%s/api/taskserver"
def illegalAction(self, verb):
self.response.setStatus(405)
self.addMessage('ERROR', 'HTTP %s not supported by the settings handler' % verb, 405)
def handle_DELETE(self):
self.illegalAction('DELETE')
def handle_PUT(self):
self.handle_POST(self)
def handle_PATCH(self):
self.handle_POST(self)
def handle_GET(self):
try:
splunk_service = SplunkServiceFactory.create(self.sessionKey, app='splunk_app_db_connect',
owner=self.userName)
content = client.Entity(splunk_service, self.endpoint).content
self.check_java_home(content)
self.read_vmopts(content)
self.writeJson(content)
except Exception as ex:
self.response.setStatus(500)
self.writeJson({
"code": 500,
"message": ex.message,
"detail": str(ex)
})
def handle_POST(self):
try:
pre_taskserverport = self.read_taskserver_port()
payload = loads(self.request['payload'])
self.check_java_home(payload)
# check whether the javaHome is valid
self.validate_java_home(payload["javaHome"])
self.update_vmopts(payload)
splunk_service = SplunkServiceFactory.create(self.sessionKey, app='splunk_app_db_connect',
owner=self.userName)
entity = client.Entity(splunk_service, self.endpoint)
entity.update(**payload).refresh()
logger.debug('updated java settings')
self.update_dbx_java_home(payload["javaHome"])
self.reset_java_command_filename(splunk_service)
self.read_vmopts(entity.content)
self.restart_task_server(pre_taskserverport)
self.writeJson(entity.content)
except Exception as ex:
self.response.setStatus(500)
self.writeJson({
"code": 500,
"message": ex.message,
"detail": str(ex)
})
def check_java_home(self, content):
if "javaHome" not in content:
if "JAVA_HOME" in os.environ:
java_home = os.environ["JAVA_HOME"].replace('"', '')
content["javaHome"] = java_home
else:
try:
java_home = JavaHomeDetector.detect()
content["javaHome"] = java_home
except Exception as ex:
logger.warn("java home auto detection failed")
content["javaHome"] = ""
# DBX-3248 write java home to specific file so that it can be used to start server and java search command.
def update_dbx_java_home(self, javaHome):
app_dir = os.path.join(os.path.dirname(__file__), '..')
java_path_darwin = os.path.join(app_dir, "darwin_x86_64", "bin", self.customized_java_path)
java_path_linux32 = os.path.join(app_dir, "linux_x86", "bin", self.customized_java_path)
java_path_linux64 = os.path.join(app_dir, "linux_x86_64", "bin", self.customized_java_path)
java_path_win32 = os.path.join(app_dir, "windows_x86", "bin", self.customized_java_path)
java_path_win64 = os.path.join(app_dir, "windows_x86_64", "bin", self.customized_java_path)
java_home_files = [
{"filename": java_path_darwin, "suffix": "/bin/java"},
{"filename": java_path_linux32, "suffix": "/bin/java"},
{"filename": java_path_linux64, "suffix": "/bin/java"},
{"filename": java_path_win32, "suffix": "\\bin\\java.exe"},
{"filename": java_path_win64, "suffix": "\\bin\\java.exe"}
]
for java_home_file in java_home_files:
try:
with open(java_home_file["filename"], "w") as file:
file.write(javaHome + java_home_file["suffix"])
logger.info('update java path file [%s]' % java_home_file["filename"])
except IOError as er:
logger.error('unable to update java path file [%s]' % java_home_file["filename"])
raise
def reset_java_command_filename(self, splunk_service):
for java_command in self.java_commands:
entity = client.Entity(splunk_service, self.commands_endpoint % java_command)
# If customer have set the filename to "customized.java.path", we need to reset it to "java.path"
# Related issue: DBX-3746
if entity["filename"] == self.customized_java_path:
entity.update(filename="java.path").refresh()
logger.debug("action=reset_java_command_filename command=%s" % java_command)
def read_vmopts(self, content):
content['jvmOptions'] = ''
content['taskServerPort'] = self.defaultPort
try:
jvmopts = jvm_options.read()
content['jvmOptions'] = jvmopts
taskServerPort = jvm_options.get_property(jvmopts, self.taskserverPortProperty, self.taskserverPortRegex)
if taskServerPort:
content['taskServerPort'] = int(taskServerPort)
except Exception as ex:
logger.error('unable to read vmopts file [%s]' % ex)
raise
def update_vmopts(self, content):
try:
jvmopts = content.pop('jvmOptions', '') # jvmOptions may contain taskServerPort settings
taskServerPort = content.pop('taskServerPort', self.defaultPort)
logger.debug('action=get_vmopts_from_postdata, jvmOptions: [%s], taskServerPort: [%s]'
% (jvmopts, taskServerPort))
if not isinstance(taskServerPort, int):
raise Exception("task server port must be a int value")
if taskServerPort < 1024 or taskServerPort > 65535:
raise Exception('task server port must be a number in [1024, 65535]')
jvmopts = jvm_options.set_property(jvmopts, self.taskserverPortProperty, self.taskserverPortRegex, str(taskServerPort))
jvm_options.write(jvmopts)
except Exception as ex:
logger.error('unable to update vmopts file [%s]' % ex)
raise
def validate_java_home(self, java_home):
if os.path.isdir(java_home):
java_cmd = os.path.join(java_home, "bin", "java")
is_valid, reason = validateJRE(java_cmd)
if is_valid:
is_valid, reason = checkDependencies(java_home)
if not is_valid:
raise Exception(reason)
else:
raise Exception("JAVA_HOME path not exist")
@classmethod
def read_taskserver_port(cls):
try:
jvmopts = jvm_options.read()
taskServerPort = jvm_options.get_property(jvmopts, cls.taskserverPortProperty, cls.taskserverPortRegex)
if taskServerPort:
return taskServerPort
else:
return cls.defaultPort
except Exception as ex:
logger.error('unable to read vmopts file, use default port 8080, error info: [%s]' % ex)
return cls.defaultPort
def restart_task_server(self, taskserver_port):
try:
# settings update successfully, then trigger restart server api to make change taking effect
requests.delete(self.restart_url % taskserver_port, verify=False)
except Exception as ex:
# if task server is not running, this request will failed
logger.warn("action=restart_task_server_request_failed", ex)
|
nilq/baby-python
|
python
|
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# a ^ 0 == a
# a ^ a == 0
# a ^ b ^ a = a ^ a ^ b = 0 ^ b = b
a = 0
for i in nums:
a ^= i
print(a)
return a
print(15 ^ 10)
print(10 ^ 15)
solver = Solution()
solver.singleNumber([4,1,2,1,2])
|
nilq/baby-python
|
python
|
# This software is open source software available under the BSD-3 license.
#
# Copyright (c) 2020 Triad National Security, LLC. All rights reserved.
# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights
# reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
#
# Additional copyright and license information can be found in the LICENSE file
# distributed with this code, or at
# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE
"""
Utilities for handling color maps and color bars
"""
# Authors
# -------
# Xylar Asay-Davis, Milena Veneziani, Luke Van Roekel, Greg Streletz
from __future__ import absolute_import, division, print_function, \
unicode_literals
import matplotlib.pyplot as plt
import matplotlib.colors as cols
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
import xml.etree.ElementTree as ET
from six.moves import configparser
import cmocean
import pkg_resources
from six import string_types
import mpas_analysis.shared.plot.ScientificColourMaps5
def setup_colormap(config, configSectionName, suffix=''):
'''
Set up a colormap from the registry
Parameters
----------
config : instance of ConfigParser
the configuration, containing a [plot] section with options that
control plotting
configSectionName : str
name of config section
suffix: str, optional
suffix of colormap related options
Returns
-------
colormapDict : dict
A dictionary of colormap information.
'colormap' specifies the name of the new colormap
'norm' is a matplotlib norm object used to normalize the colormap
'levels' is an array of contour levels or ``None`` if not using indexed
color map
'ticks' is an array of values where ticks should be placed
'contours' is an array of contour values to plot or ``None`` if none
have been specified
'lineWidth' is the width of contour lines or ``None`` if not specified
'lineColor' is the color of contour lines or ``None`` if not specified
'''
# Authors
# -------
# Xylar Asay-Davis, Milena Veneziani, Greg Streletz
register_custom_colormaps()
colormapType = config.get(configSectionName,
'colormapType{}'.format(suffix))
if colormapType == 'indexed':
(colormap, norm, levels, ticks) = _setup_indexed_colormap(
config, configSectionName, suffix=suffix)
elif colormapType == 'continuous':
(colormap, norm, ticks) = _setup_colormap_and_norm(
config, configSectionName, suffix=suffix)
levels = None
else:
raise ValueError('config section {} option colormapType{} is not '
'"indexed" or "continuous"'.format(
configSectionName, suffix))
option = 'contourLevels{}'.format(suffix)
if config.has_option(configSectionName, option):
contours = config.getExpression(configSectionName,
option,
usenumpyfunc=True)
if isinstance(contours, string_types) and contours == 'none':
contours = None
else:
contours = None
option = 'contourThickness{}'.format(suffix)
if config.has_option(configSectionName, option):
lineWidth = config.getfloat(configSectionName, option)
else:
lineWidth = None
option = 'contourColor{}'.format(suffix)
if config.has_option(configSectionName, option):
lineColor = config.get(configSectionName, option)
else:
lineColor = None
return {'colormap': colormap, 'norm': norm, 'levels': levels,
'ticks': ticks, 'contours': contours, 'lineWidth': lineWidth,
'lineColor': lineColor}
def register_custom_colormaps():
name = 'ferret'
backgroundColor = (0.9, 0.9, 0.9)
red = np.array([[0, 0.6],
[0.15, 1],
[0.35, 1],
[0.65, 0],
[0.8, 0],
[1, 0.75]])
green = np.array([[0, 0],
[0.1, 0],
[0.35, 1],
[1, 0]])
blue = np.array([[0, 0],
[0.5, 0],
[0.9, 0.9],
[1, 0.9]])
colorCount = 21
colorList = np.ones((colorCount, 4), float)
colorList[:, 0] = np.interp(np.linspace(0, 1, colorCount),
red[:, 0], red[:, 1])
colorList[:, 1] = np.interp(np.linspace(0, 1, colorCount),
green[:, 0], green[:, 1])
colorList[:, 2] = np.interp(np.linspace(0, 1, colorCount),
blue[:, 0], blue[:, 1])
colorList = colorList[::-1, :]
colorMap = cols.LinearSegmentedColormap.from_list(
name, colorList, N=255)
colorMap.set_bad(backgroundColor)
_register_colormap_and_reverse(name, colorMap)
name = 'erdc_iceFire_H'
colorArray = np.array([
[-1, 4.05432e-07, 0, 5.90122e-06],
[-0.87451, 0, 0.120401, 0.302675],
[-0.74902, 0, 0.216583, 0.524574],
[-0.623529, 0.0552475, 0.345025, 0.6595],
[-0.498039, 0.128047, 0.492588, 0.720288],
[-0.372549, 0.188955, 0.641309, 0.792092],
[-0.247059, 0.327673, 0.784935, 0.873434],
[-0.121569, 0.60824, 0.892164, 0.935547],
[0.00392157, 0.881371, 0.912178, 0.818099],
[0.129412, 0.951407, 0.835621, 0.449279],
[0.254902, 0.904481, 0.690489, 0],
[0.380392, 0.85407, 0.510864, 0],
[0.505882, 0.777093, 0.33018, 0.00088199],
[0.631373, 0.672862, 0.139087, 0.00269398],
[0.756863, 0.508815, 0, 0],
[0.882353, 0.299417, 0.000366289, 0.000547829],
[1, 0.0157519, 0.00332021, 4.55569e-08]], float)
colorCount = 255
colorList = np.ones((colorCount, 4), float)
x = colorArray[:, 0]
for cIndex in range(3):
colorList[:, cIndex] = np.interp(
np.linspace(-1., 1., colorCount),
x, colorArray[:, cIndex + 1])
colorMap = cols.LinearSegmentedColormap.from_list(
name, colorList, N=255)
_register_colormap_and_reverse(name, colorMap)
name = 'erdc_iceFire_L'
colorArray = np.array([
[-1, 0.870485, 0.913768, 0.832905],
[-0.87451, 0.586919, 0.887865, 0.934003],
[-0.74902, 0.31583, 0.776442, 0.867858],
[-0.623529, 0.18302, 0.632034, 0.787722],
[-0.498039, 0.117909, 0.484134, 0.713825],
[-0.372549, 0.0507239, 0.335979, 0.654741],
[-0.247059, 0, 0.209874, 0.511832],
[-0.121569, 0, 0.114689, 0.28935],
[0.00392157, 0.0157519, 0.00332021, 4.55569e-08],
[0.129412, 0.312914, 0, 0],
[0.254902, 0.520865, 0, 0],
[0.380392, 0.680105, 0.15255, 0.0025996],
[0.505882, 0.785109, 0.339479, 0.000797922],
[0.631373, 0.857354, 0.522494, 0],
[0.756863, 0.910974, 0.699774, 0],
[0.882353, 0.951921, 0.842817, 0.478545],
[1, 0.881371, 0.912178, 0.818099]], float)
colorCount = 255
colorList = np.ones((colorCount, 4), float)
x = colorArray[:, 0]
for cIndex in range(3):
colorList[:, cIndex] = np.interp(
np.linspace(-1., 1., colorCount),
x, colorArray[:, cIndex + 1])
colorMap = cols.LinearSegmentedColormap.from_list(
name, colorList, N=255)
_register_colormap_and_reverse(name, colorMap)
name = 'BuOr'
colors1 = plt.cm.PuOr(np.linspace(0., 1, 256))
colors2 = plt.cm.RdBu(np.linspace(0, 1, 256))
# combine them and build a new colormap, just the orange from the first
# and the blue from the second
colorList = np.vstack((colors1[0:128, :], colors2[128:256, :]))
# reverse the order
colorList = colorList[::-1, :]
colorMap = cols.LinearSegmentedColormap.from_list(name, colorList)
_register_colormap_and_reverse(name, colorMap)
name = 'Maximenko'
colorArray = np.array([
[-1, 0., 0.45882352941, 0.76470588235],
[-0.666667, 0., 0.70196078431, 0.90588235294],
[-0.333333, 0.3294117647, 0.87058823529, 1.],
[0., 0.76470588235, 0.94509803921, 0.98039215686],
[0.333333, 1., 1., 0.],
[0.666667, 1., 0.29411764705, 0.],
[1, 1., 0., 0.]], float)
colorCount = 255
colorList = np.ones((colorCount, 4), float)
x = colorArray[:, 0]
for cIndex in range(3):
colorList[:, cIndex] = np.interp(
np.linspace(-1., 1., colorCount),
x, colorArray[:, cIndex + 1])
colorMap = cols.LinearSegmentedColormap.from_list(
name, colorList, N=255)
_register_colormap_and_reverse(name, colorMap)
# add the cmocean color maps
mapNames = list(cmocean.cm.cmapnames)
# don't bother with gray (already exists, I think)
mapNames.pop(mapNames.index('gray'))
for mapName in mapNames:
_register_colormap_and_reverse(mapName, getattr(cmocean.cm, mapName))
# add SciVisColor colormaps from
# https://sciviscolor.org/home/colormaps/
for mapName in ['3wave-yellow-grey-blue', '3Wbgy5',
'4wave-grey-red-green-mgreen', '5wave-yellow-brown-blue',
'blue-1', 'blue-3', 'blue-6', 'blue-8', 'blue-orange-div',
'brown-2', 'brown-5', 'brown-8', 'green-1', 'green-4',
'green-7', 'green-8', 'orange-5', 'orange-6',
'orange-green-blue-gray', 'purple-7', 'purple-8', 'red-1',
'red-3', 'red-4', 'yellow-1', 'yellow-7']:
xmlFile = pkg_resources.resource_filename(
__name__, 'SciVisColorColormaps/{}.xml'.format(mapName))
_read_xml_colormap(xmlFile, mapName)
name = 'white_cmo_deep'
# modify cmo.deep to start at white
colors2 = plt.cm.get_cmap('cmo.deep')(np.linspace(0, 1, 224))
colorCount = 32
colors1 = np.ones((colorCount, 4), float)
x = np.linspace(0., 1., colorCount+1)[0:-1]
white = [1., 1., 1., 1.]
for cIndex in range(4):
colors1[:, cIndex] = np.interp(x, [0., 1.],
[white[cIndex], colors2[0, cIndex]])
colors = np.vstack((colors1, colors2))
# generating a smoothly-varying LinearSegmentedColormap
cmap = LinearSegmentedColormap.from_list(name, colors)
_register_colormap_and_reverse(name, cmap)
def _setup_colormap_and_norm(config, configSectionName, suffix=''):
'''
Set up a colormap from the registry
Parameters
----------
config : instance of ConfigParser
the configuration, containing a [plot] section with options that
control plotting
configSectionName : str
name of config section
suffix: str, optional
suffix of colormap related options
Returns
-------
colormap : srt
new colormap
norm : ``mapplotlib.colors.Normalize``
the norm used to normalize the colormap
ticks : array of float
the tick marks on the colormap
'''
# Authors
# -------
# Xylar Asay-Davis
register_custom_colormaps()
colormap = plt.get_cmap(config.get(configSectionName,
'colormapName{}'.format(suffix)))
normType = config.get(configSectionName, 'normType{}'.format(suffix))
kwargs = config.getExpression(configSectionName,
'normArgs{}'.format(suffix))
if normType == 'symLog':
norm = cols.SymLogNorm(**kwargs)
elif normType == 'log':
norm = cols.LogNorm(**kwargs)
elif normType == 'linear':
norm = cols.Normalize(**kwargs)
else:
raise ValueError('Unsupported norm type {} in section {}'.format(
normType, configSectionName))
try:
ticks = config.getExpression(
configSectionName, 'colorbarTicks{}'.format(suffix),
usenumpyfunc=True)
except(configparser.NoOptionError):
ticks = None
return (colormap, norm, ticks)
def _setup_indexed_colormap(config, configSectionName, suffix=''):
'''
Set up a colormap from the registry
Parameters
----------
config : instance of ConfigParser
the configuration, containing a [plot] section with options that
control plotting
configSectionName : str
name of config section
suffix: str, optional
suffix of colormap related options
colorMapType
Returns
-------
colormap : srt
new colormap
norm : ``mapplotlib.colors.Normalize``
the norm used to normalize the colormap
ticks : array of float
the tick marks on the colormap
'''
# Authors
# -------
# Xylar Asay-Davis, Milena Veneziani, Greg Streletz
colormap = plt.get_cmap(config.get(configSectionName,
'colormapName{}'.format(suffix)))
indices = config.getExpression(configSectionName,
'colormapIndices{}'.format(suffix),
usenumpyfunc=True)
try:
levels = config.getExpression(
configSectionName, 'colorbarLevels{}'.format(suffix),
usenumpyfunc=True)
except(configparser.NoOptionError):
levels = None
if levels is not None:
# set under/over values based on the first/last indices in the colormap
underColor = colormap(indices[0])
overColor = colormap(indices[-1])
if len(levels) + 1 == len(indices):
# we have 2 extra values for the under/over so make the colormap
# without these values
indices = indices[1:-1]
elif len(levels) - 1 != len(indices):
# indices list must be either one element shorter
# or one element longer than colorbarLevels list
raise ValueError('length mismatch between indices and '
'colorbarLevels')
colormap = cols.ListedColormap(colormap(indices),
'colormapName{}'.format(suffix))
colormap.set_under(underColor)
colormap.set_over(overColor)
norm = cols.BoundaryNorm(levels, colormap.N)
try:
ticks = config.getExpression(
configSectionName, 'colorbarTicks{}'.format(suffix),
usenumpyfunc=True)
except(configparser.NoOptionError):
ticks = levels
return (colormap, norm, levels, ticks)
def _read_xml_colormap(xmlFile, mapName):
'''Read in an XML colormap'''
xml = ET.parse(xmlFile)
root = xml.getroot()
colormap = root.findall('ColorMap')
if len(colormap) > 0:
colormap = colormap[0]
colorDict = {'red': [], 'green': [], 'blue': []}
for point in colormap.findall('Point'):
x = float(point.get('x'))
color = [float(point.get('r')), float(point.get('g')),
float(point.get('b'))]
colorDict['red'].append((x, color[0], color[0]))
colorDict['green'].append((x, color[1], color[1]))
colorDict['blue'].append((x, color[2], color[2]))
cmap = LinearSegmentedColormap(mapName, colorDict, 256)
_register_colormap_and_reverse(mapName, cmap)
def _register_colormap_and_reverse(mapName, cmap):
if mapName not in plt.colormaps():
plt.register_cmap(mapName, cmap)
plt.register_cmap('{}_r'.format(mapName), cmap.reversed())
def _plot_color_gradients():
'''from https://matplotlib.org/tutorials/colors/colormaps.html'''
cmap_list = [m for m in plt.colormaps() if not m.endswith("_r")]
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
nrows = len(cmap_list)
fig, axes = plt.subplots(figsize=(7.2, 0.25 * nrows), nrows=nrows)
fig.subplots_adjust(top=0.99, bottom=0.01, left=0.35, right=0.99)
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
plt.savefig('colormaps.png', dpi=100)
# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
|
nilq/baby-python
|
python
|
from typing import Tuple, Type, List, Dict, Union
from pyfileconf.main import PipelineManager, SpecificClassConfigDict
def get_pipeline_dict_path_and_specific_class_config_dicts_from_manager(manager: PipelineManager
) -> Tuple[
str,
List[SpecificClassConfigDict]
]:
return manager.pipeline_dict_path, manager.specific_class_config_dicts
|
nilq/baby-python
|
python
|
"""
Task-Specific consistency training on downstream task (BreastPathQ)
"""
import argparse
import os
import time
import random
import numpy as np
from PIL import Image
import cv2
import copy
import pingouin as pg
import statsmodels.api as sm
import pandas as pd
from tqdm import tqdm
import torch.backends.cudnn as cudnn
import torch
from torch.utils.data import Dataset, Subset
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
from util import AverageMeter, plot_confusion_matrix
from collections import OrderedDict
from torchvision import transforms, datasets
from dataset import DatasetBreastPathQ_eval, DatasetBreastPathQ_SSLtrain, DatasetBreastPathQ_Supervised_train, TransformFix
import models.net as net
from albumentations import Compose
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
from torch.utils.data.sampler import SubsetRandomSampler
###########
def train(args, model_teacher, model_student, classifier_teacher, classifier_student, labeled_train_loader, unlabeled_train_loader, optimizer, epoch):
"""
Consistency training
"""
model_teacher.eval()
classifier_teacher.eval()
model_student.train()
classifier_student.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_x = AverageMeter()
losses_u = AverageMeter()
total_feats = []
total_targets = []
end = time.time()
train_loader = zip(labeled_train_loader, unlabeled_train_loader)
for batch_idx, (data_x, data_u) in enumerate(tqdm(train_loader, disable=False)):
# Get inputs and target
inputs_x, targets_x = data_x
inputs_u_w, inputs_u_s = data_u
inputs_x, inputs_u_w, inputs_u_s, targets_x = inputs_x.float(), inputs_u_w.float(), inputs_u_s.float(), targets_x.float()
# Move the variables to Cuda
inputs_x, inputs_u_w, inputs_u_s, targets_x = inputs_x.cuda(), inputs_u_w.cuda(), inputs_u_s.cuda(), targets_x.cuda()
# Compute output
inputs_x = inputs_x.reshape(-1, 3, 256, 256) #Reshape
# Compute pseudolabels for weak_unlabeled images using the teacher model
with torch.no_grad():
feat_u_w = model_teacher(inputs_u_w) # weak unlabeled data
logits_u_w = classifier_teacher(feat_u_w)
# Compute output for labeled and strong_unlabeled images using the student model
inputs = torch.cat((inputs_x, inputs_u_s))
feats = model_student(inputs)
logits = classifier_student(feats)
batch_size = inputs_x.shape[0]
logits_x = logits[:batch_size] #labeled data
logits_u_s = logits[batch_size:] # unlabeled data
del logits
# Compute loss
Supervised_loss = F.mse_loss(logits_x, targets_x.view(-1, 1), reduction='mean')
Consistency_loss = F.mse_loss(logits_u_w, logits_u_s, reduction='mean')
final_loss = Supervised_loss + args.lambda_u * Consistency_loss
# compute gradient and do SGD step #############
optimizer.zero_grad()
final_loss.backward()
optimizer.step()
# compute loss and accuracy ####################
losses.update(final_loss.item(), batch_size)
losses_x.update(Supervised_loss.item(), batch_size)
losses_u.update(Consistency_loss.item(), batch_size)
# Save features
total_feats.append(feats)
total_targets.append(targets_x)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print statistics and write summary every N batch
if (batch_idx + 1) % args.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'final_loss {final_loss.val:.3f} ({final_loss.avg:.3f})\t'
'Supervised_loss {Supervised_loss.val:.3f} ({Supervised_loss.avg:.3f})\t'
'Consistency_loss {Consistency_loss.val:.3f} ({Consistency_loss.avg:.3f})'.format(epoch, batch_idx + 1, len(labeled_train_loader), batch_time=batch_time,
data_time=data_time, final_loss=losses, Supervised_loss=losses_x, Consistency_loss=losses_u))
final_feats = torch.cat(total_feats).detach()
final_targets = torch.cat(total_targets).detach()
return losses.avg, losses_x.avg, losses_u.avg, final_feats, final_targets
def validate(args, model_student, classifier_student, val_loader, epoch):
# switch to evaluate mode
model_student.eval()
classifier_student.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
with torch.no_grad():
end = time.time()
for batch_idx, (input, target) in enumerate(tqdm(val_loader, disable=False)):
# Get inputs and target
input, target = input.float(), target.float()
# Move the variables to Cuda
input, target = input.cuda(), target.cuda()
# compute output ###############################
feats = model_student(input)
output = classifier_student(feats)
loss = F.mse_loss(output, target.view(-1, 1), reduction='mean')
# compute loss and accuracy ####################
batch_size = target.size(0)
losses.update(loss.item(), batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print statistics and write summary every N batch
if (batch_idx + 1) % args.print_freq == 0:
print('Val: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(epoch, batch_idx + 1, len(val_loader),
batch_time=batch_time, data_time=data_time, loss=losses))
return losses.avg
def test(args, model_student, classifier_student, test_loader):
# switch to evaluate mode
model_student.eval()
classifier_student.eval()
batch_time = AverageMeter()
losses = AverageMeter()
total_feats = []
total_output = []
total_targetA = []
total_targetB = []
with torch.no_grad():
end = time.time()
for batch_idx, (input, targetA, targetB) in enumerate(tqdm(test_loader, disable=False)):
# Get inputs and target
input, targetA, targetB = input.float(), targetA.float(), targetB.float()
# Move the variables to Cuda
input, targetA, targetB = input.cuda(), targetA.cuda(), targetB.cuda()
# compute output ###############################
feats = model_student(input)
output = classifier_student(feats)
#######
loss = F.mse_loss(output, targetA.view(-1, 1), reduction='mean')
# compute loss and accuracy
batch_size = targetA.size(0)
losses.update(loss.item(), batch_size)
# Save pred, target to calculate metrics
output = output.view(-1, 1).reshape(-1, )
total_output.append(output)
total_feats.append(feats)
total_targetA.append(targetA)
total_targetB.append(targetB)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print statistics and write summary every N batch
if (batch_idx + 1) % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
batch_idx, len(test_loader), batch_time=batch_time, loss=losses))
# Pred and target for performance metrics
final_outputs = torch.cat(total_output).to('cpu')
final_feats = torch.cat(total_feats).to('cpu')
final_targetsA = torch.cat(total_targetA).to('cpu')
final_targetsB = torch.cat(total_targetB).to('cpu')
return final_outputs, final_feats, final_targetsA, final_targetsB
def parse_args():
parser = argparse.ArgumentParser('Argument for BreastPathQ - Consistency training/Evaluation')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency')
parser.add_argument('--save_freq', type=int, default=10, help='save frequency')
parser.add_argument('--gpu', default='0', help='GPU id to use.')
parser.add_argument('--num_workers', type=int, default=8, help='num of workers to use.')
parser.add_argument('--seed', type=int, default=42, help='seed for initializing training.')
# model definition
parser.add_argument('--model', type=str, default='resnet18', help='choice of network architecture.')
parser.add_argument('--mode', type=str, default='fine-tuning', help='fine-tuning/evaluation')
parser.add_argument('--modules_teacher', type=int, default=64,
help='which modules to freeze for the fine-tuned teacher model. (full-finetune(0), fine-tune only FC layer (60). Full_network(64) - Resnet18')
parser.add_argument('--modules_student', type=int, default=60,
help='which modules to freeze for fine-tuning the student model. (full-finetune(0), fine-tune only FC layer (60) - Resnet18')
parser.add_argument('--num_classes', type=int, default=1, help='# of classes.')
parser.add_argument('--num_epoch', type=int, default=90, help='epochs to train for.')
parser.add_argument('--batch_size', type=int, default=4, help='batch_size - 48/64.')
parser.add_argument('--mu', default=7, type=int, help='coefficient of unlabeled batch size - 7')
parser.add_argument('--NAug', default=7, type=int, help='No of Augmentations for strong unlabeled data')
parser.add_argument('--lr', default=0.0001, type=float, help='learning rate. - 1e-4(Adam)')
parser.add_argument('--weight_decay', default=1e-4, type=float,
help='weight decay/weights regularizer for sgd. - 1e-4')
parser.add_argument('--beta1', default=0.9, type=float, help='momentum for sgd, beta1 for adam.')
parser.add_argument('--beta2', default=0.999, type=float, help=' beta2 for adam.')
parser.add_argument('--lambda_u', default=1, type=float, help='coefficient of unlabeled loss')
# Consistency training
parser.add_argument('--model_path_finetune', type=str,
default='/home/csrinidhi/SSL_Eval/Save_Results/SSL/0.1/',
help='path to load SSL fine-tuned model to intialize "Teacher and student network" for consistency training')
parser.add_argument('--model_save_pth', type=str,
default='/home/srinidhi/Research/Code/SSL_Resolution/Save_Results/Results/Cellularity/Results/', help='path to save consistency trained model')
parser.add_argument('--save_loss', type=str,
default='/home/srinidhi/Research/Code/SSL_Resolution/Save_Results/Results/Cellularity/Results/',
help='path to save loss and other performance metrics')
# Testing
parser.add_argument('--model_path_eval', type=str,
default='/home/srinidhi/Research/Code/SSL_Resolution/Save_Results/Results/Cellularity/Results/',
help='path to load consistency trained model')
# Data paths
parser.add_argument('--train_image_pth',
default='/home/srinidhi/Research/Data/Cellularity/Tumor_Cellularity_Compare/TrainSet/')
parser.add_argument('--test_image_pth',
default='/home/srinidhi/Research/Data/Cellularity/Tumor_Cellularity_Compare/')
parser.add_argument('--validation_split', default=0.2, type=float,
help='portion of the data that will be used for validation')
parser.add_argument('--labeled_train', default=0.1, type=float,
help='portion of the train data with labels - 1(full), 0.1/0.25/0.5')
# Tiling parameters
parser.add_argument('--image_size', default=256, type=int, help='patch size width 256')
args = parser.parse_args()
return args
def main():
# parse the args
args = parse_args()
# Set the data loaders (train, val, test)
### BreastPathQ ##################
if args.mode == 'fine-tuning':
# Train set
transform_train = transforms.Compose([]) # None
train_labeled_dataset = DatasetBreastPathQ_Supervised_train(args.train_image_pth, args.image_size, transform=transform_train)
train_unlabeled_dataset = DatasetBreastPathQ_SSLtrain(args.train_image_pth, transform=TransformFix(args.image_size, args.NAug))
# Validation set
transform_val = transforms.Compose([transforms.Resize(size=args.image_size)])
val_dataset = DatasetBreastPathQ_SSLtrain(args.train_image_pth, transform=transform_val)
# train and validation split
num_train = len(train_labeled_dataset.datalist)
indices = list(range(num_train))
split = int(np.floor(args.validation_split * num_train))
np.random.shuffle(indices)
train_idx, val_idx = indices[split:], indices[:split]
#### Semi-Supervised Split (10, 25, 50, 100)
labeled_train_idx = np.random.choice(train_idx, int(args.labeled_train * len(train_idx)))
unlabeled_train_sampler = SubsetRandomSampler(train_idx)
labeled_train_sampler = SubsetRandomSampler(labeled_train_idx)
val_sampler = SubsetRandomSampler(val_idx)
# Data loaders
labeled_train_loader = torch.utils.data.DataLoader(train_labeled_dataset, batch_size=args.batch_size, sampler=labeled_train_sampler,
shuffle=True if labeled_train_sampler is None else False, num_workers=args.num_workers, pin_memory=True, drop_last=True)
unlabeled_train_loader = torch.utils.data.DataLoader(train_unlabeled_dataset, batch_size=args.batch_size*args.mu, sampler=unlabeled_train_sampler,
shuffle=True if unlabeled_train_sampler is None else False, num_workers=args.num_workers, pin_memory=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, sampler=val_sampler,
shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=False)
# number of samples
num_label_data = len(labeled_train_sampler)
print('number of labeled training samples: {}'.format(num_label_data))
num_unlabel_data = len(unlabeled_train_sampler)
print('number of unlabeled training samples: {}'.format(num_unlabel_data))
num_val_data = len(val_sampler)
print('number of validation samples: {}'.format(num_val_data))
elif args.mode == 'evaluation':
# Test set
test_transforms = transforms.Compose([transforms.Resize(size=args.image_size)])
test_dataset = DatasetBreastPathQ_eval(args.test_image_pth, args.image_size, test_transforms)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
# number of samples
n_data = len(test_dataset)
print('number of testing samples: {}'.format(n_data))
else:
raise NotImplementedError('invalid mode {}'.format(args.mode))
########################################
# set the model
if args.model == 'resnet18':
model_teacher = net.TripletNet_Finetune(args.model)
model_student = net.TripletNet_Finetune(args.model)
classifier_teacher = net.FinetuneResNet(args.num_classes)
classifier_student = net.FinetuneResNet(args.num_classes)
if args.mode == 'fine-tuning':
###### Intialize both teacher and student network with fine-tuned SSL model ###############
# Load model
state_dict = torch.load(args.model_path_finetune)
# Load fine-tuned model
model_teacher.load_state_dict(state_dict['model'])
model_student.load_state_dict(state_dict['model'])
# Load fine-tuned classifier
classifier_teacher.load_state_dict(state_dict['classifier'])
classifier_student.load_state_dict(state_dict['classifier'])
################# Freeze Teacher model (Entire network) ####################
# look at the contents of the teacher model and freeze it
idx = 0
for layer_name, param in model_teacher.named_parameters():
print(layer_name, '-->', idx)
idx += 1
# Freeze the teacher model
for name, param in enumerate(model_teacher.named_parameters()):
if name < args.modules_teacher: # No of layers(modules) to be freezed
print("module", name, "was frozen")
param = param[1]
param.requires_grad = False
else:
print("module", name, "was not frozen")
param = param[1]
param.requires_grad = True
############## Freeze Student model (Except last FC layer) #########################
# look at the contents of the student model and freeze it
idx = 0
for layer_name, param in model_student.named_parameters():
print(layer_name, '-->', idx)
idx += 1
# Freeze the teacher model
for name, param in enumerate(model_student.named_parameters()):
if name < args.modules_student: # No of layers(modules) to be freezed
print("module", name, "was frozen")
param = param[1]
param.requires_grad = False
else:
print("module", name, "was not frozen")
param = param[1]
param.requires_grad = True
elif args.mode == 'evaluation':
# Load fine-tuned model
state = torch.load(args.model_path_eval)
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state['model_student'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model_student.load_state_dict(new_state_dict)
# create new OrderedDict that does not contain `module.`
new_state_dict_cls = OrderedDict()
for k, v in state['classifier_student'].items():
name = k[7:] # remove `module.`
new_state_dict_cls[name] = v
classifier_student.load_state_dict(new_state_dict_cls)
else:
raise NotImplementedError('invalid training {}'.format(args.mode))
else:
raise NotImplementedError('model not supported {}'.format(args.model))
# Load model to CUDA
if torch.cuda.is_available():
model_teacher = torch.nn.DataParallel(model_teacher)
model_student = torch.nn.DataParallel(model_student)
classifier_teacher = torch.nn.DataParallel(classifier_teacher)
classifier_student = torch.nn.DataParallel(classifier_student)
cudnn.benchmark = True
# Optimiser & scheduler
optimizer = optim.Adam(filter(lambda p: p.requires_grad, list(model_student.parameters()) + list(classifier_student.parameters())), lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30, 60], gamma=0.1)
# Training Model
start_epoch = 1
prev_best_val_loss = float('inf')
# Start log (writing into XL sheet)
with open(os.path.join(args.save_loss, 'fine_tuned_results.csv'), 'w') as f:
f.write('epoch, train_loss, train_losses_x, train_losses_u, val_loss\n')
# Routine
for epoch in range(start_epoch, args.num_epoch + 1):
if args.mode == 'fine-tuning':
print("==> fine-tuning the pretrained SSL model...")
time_start = time.time()
train_losses, train_losses_x, train_losses_u, final_feats, final_targets = train(args, model_teacher, model_student, classifier_teacher, classifier_student, labeled_train_loader, unlabeled_train_loader, optimizer, epoch)
print('Epoch time: {:.2f} s.'.format(time.time() - time_start))
print("==> validating the fine-tuned model...")
val_losses = validate(args, model_student, classifier_student, val_loader, epoch)
# Log results
with open(os.path.join(args.save_loss, 'fine_tuned_results.csv'), 'a') as f:
f.write('%03d,%0.6f,%0.6f,%0.6f,%0.6f,\n' % ((epoch + 1), train_losses, train_losses_x, train_losses_u, val_losses))
'adjust learning rate --- Note that step should be called after validate()'
scheduler.step()
# Iterative training: Use the student as a teacher after every epoch
model_teacher = copy.deepcopy(model_student)
classifier_teacher = copy.deepcopy(classifier_student)
# Save model every 10 epochs
if epoch % args.save_freq == 0:
print('==> Saving...')
state = {
'args': args,
'model_student': model_student.state_dict(),
'model_teacher': model_teacher.state_dict(),
'classifier_teacher': classifier_teacher.state_dict(),
'classifier_student': classifier_student.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'train_loss': train_losses,
'train_losses_x': train_losses_x,
'train_losses_u': train_losses_u,
}
torch.save(state, '{}/fine_CR_trained_model_{}.pt'.format(args.model_save_pth, epoch))
# help release GPU memory
del state
torch.cuda.empty_cache()
# Save model for the best val
if (val_losses < prev_best_val_loss) & (epoch>1):
print('==> Saving...')
state = {
'args': args,
'model_student': model_student.state_dict(),
'model_teacher': model_teacher.state_dict(),
'classifier_teacher': classifier_teacher.state_dict(),
'classifier_student': classifier_student.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'train_loss': train_losses,
'train_losses_x': train_losses_x,
'train_losses_u': train_losses_u,
}
torch.save(state, '{}/best_CR_trained_model_{}.pt'.format(args.model_save_pth, epoch))
prev_best_val_loss = val_losses
# help release GPU memory
del state
torch.cuda.empty_cache()
elif args.mode == 'evaluation':
print("==> testing final test data...")
final_predicitions, final_feats, final_targetsA, final_targetsB = test(args, model_student, classifier_student, test_loader)
final_predicitions = final_predicitions.numpy()
final_targetsA = final_targetsA.numpy()
final_targetsB = final_targetsB.numpy()
# BreastPathQ dataset #######
d = {'targets': np.hstack(
[np.arange(1, len(final_predicitions) + 1, 1), np.arange(1, len(final_predicitions) + 1, 1)]),
'raters': np.hstack([np.tile(np.array(['M']), len(final_predicitions)),
np.tile(np.array(['A']), len(final_predicitions))]),
'scores': np.hstack([final_predicitions, final_targetsA])}
df = pd.DataFrame(data=d)
iccA = pg.intraclass_corr(data=df, targets='targets', raters='raters', ratings='scores')
iccA.to_csv(os.path.join(args.save_loss, 'BreastPathQ_ICC_Eval_2way_MA.csv'))
print(iccA)
d = {'targets': np.hstack(
[np.arange(1, len(final_predicitions) + 1, 1), np.arange(1, len(final_predicitions) + 1, 1)]),
'raters': np.hstack([np.tile(np.array(['M']), len(final_predicitions)),
np.tile(np.array(['B']), len(final_predicitions))]),
'scores': np.hstack([final_predicitions, final_targetsB])}
df = pd.DataFrame(data=d)
iccB = pg.intraclass_corr(data=df, targets='targets', raters='raters', ratings='scores')
iccB.to_csv(os.path.join(args.save_loss, 'BreastPathQ_ICC_Eval_2way_MB.csv'))
print(iccB)
d = {'targets': np.hstack(
[np.arange(1, len(final_targetsA) + 1, 1), np.arange(1, len(final_targetsB) + 1, 1)]),
'raters': np.hstack(
[np.tile(np.array(['A']), len(final_targetsA)), np.tile(np.array(['B']), len(final_targetsB))]),
'scores': np.hstack([final_targetsA, final_targetsB])}
df = pd.DataFrame(data=d)
iccC = pg.intraclass_corr(data=df, targets='targets', raters='raters', ratings='scores')
iccC.to_csv(os.path.join(args.save_loss, 'BreastPathQ_ICC_Eval_2way_AB.csv'))
print(iccC)
# Plots
fig, ax = plt.subplots() # P1 vs automated
ax.scatter(final_targetsA, final_predicitions, edgecolors=(0, 0, 0))
ax.plot([final_targetsA.min(), final_targetsA.max()], [final_targetsA.min(), final_targetsA.max()], 'k--',
lw=2)
ax.set_xlabel('Pathologist1')
ax.set_ylabel('Automated Method')
plt.savefig(os.path.join(args.save_loss, 'BreastPathQ_Eval_2way_MA_plot.png'), dpi=300)
plt.show()
fig, ax = plt.subplots() # P2 vs automated
ax.scatter(final_targetsB, final_predicitions, edgecolors=(0, 0, 0))
ax.plot([final_targetsB.min(), final_targetsB.max()], [final_targetsB.min(), final_targetsB.max()], 'k--',
lw=2)
ax.set_xlabel('Pathologist2')
ax.set_ylabel('Automated Method')
plt.savefig(os.path.join(args.save_loss, 'BreastPathQ_Eval_2way_MB_plot.png'), dpi=300)
plt.show()
fig, ax = plt.subplots() # P1 vs P2
ax.scatter(final_targetsA, final_targetsB, edgecolors=(0, 0, 0))
ax.plot([final_targetsA.min(), final_targetsA.max()], [final_targetsA.min(), final_targetsA.max()], 'k--',
lw=2)
ax.set_xlabel('Pathologist1')
ax.set_ylabel('Pathologist2')
plt.savefig(os.path.join(args.save_loss, 'BreastPathQ_Eval_2way_AB_plot.png'), dpi=300)
plt.show()
# Bland altman plot
fig, ax = plt.subplots(1, figsize=(8, 8))
sm.graphics.mean_diff_plot(final_targetsA, final_predicitions, ax=ax)
plt.savefig(os.path.join(args.save_loss, 'BDPlot_Eval_2way_MA_plot.png'), dpi=300)
plt.show()
fig, ax = plt.subplots(1, figsize=(8, 8))
sm.graphics.mean_diff_plot(final_targetsB, final_predicitions, ax=ax)
plt.savefig(os.path.join(args.save_loss, 'BDPlot_Eval_2way_MB_plot.png'), dpi=300)
plt.show()
fig, ax = plt.subplots(1, figsize=(8, 8))
sm.graphics.mean_diff_plot(final_targetsA, final_targetsB, ax=ax)
plt.savefig(os.path.join(args.save_loss, 'BDPlot_Eval_2way_AB_plot.png'), dpi=300)
plt.show()
else:
raise NotImplementedError('mode not supported {}'.format(args.mode))
if __name__ == "__main__":
args = parse_args()
print(vars(args))
# Force the pytorch to create context on the specific device
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
if args.seed:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu:
torch.cuda.manual_seed_all(args.seed)
# Main function
main()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import datetime
from .config import get_config_file_paths
from .util import *
# config file path
GOALS_CONFIG_FILE_PATH = get_config_file_paths()['GOALS_CONFIG_FILE_PATH']
GOALS_CONFIG_FOLDER_PATH = get_folder_path_from_file_path(
GOALS_CONFIG_FILE_PATH)
def strike(text):
"""
strikethrough text
:param text:
:return:
"""
return u'\u0336'.join(text) + u'\u0336'
def get_goal_file_path(goal_name):
return GOALS_CONFIG_FOLDER_PATH + '/' + goal_name + '.yaml'
def process(input):
"""
the main process
:param input:
"""
_input = input.lower().strip()
check_sub_command(_input)
def check_sub_command(c):
"""
command checker
:param c:
:return:
"""
sub_commands = {
'new': new_goal,
'tasks': view_related_tasks,
'view': list_goals,
'complete': complete_goal,
'analyze': goals_analysis,
}
try:
return sub_commands[c]()
except KeyError:
click.echo(chalk.red('Command does not exist!'))
click.echo('Try "yoda goals --help" for more info')
def goals_dir_check():
"""
check if goals directory exists. If not, create
"""
if not os.path.exists(GOALS_CONFIG_FOLDER_PATH):
try:
os.makedirs(GOALS_CONFIG_FOLDER_PATH)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def append_data_into_file(data, file_path):
"""
append data into existing file
:param data:
:param file_path:
"""
with open(file_path) as file:
# read contents
contents = yaml.load(file)
contents['entries'].append(
data
)
# enter data
with open(file_path, "w") as file:
yaml.dump(contents, file, default_flow_style=False)
def complete_goal():
"""
complete a goal
"""
not_valid_goal_number = 1
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
with open(GOALS_CONFIG_FILE_PATH) as todays_tasks_entry:
contents = yaml.load(todays_tasks_entry)
i = 0
no_goal_left = True
for entry in contents['entries']:
i += 1
if entry['status'] == 0:
no_goal_left = False
if no_goal_left:
click.echo(chalk.green(
'All goals have been completed! Add a new goal by entering "yoda goals new"'))
else:
click.echo('Goals:')
click.echo('----------------')
click.echo("Number | Deadline | Goal")
click.echo("-------|-------------|-----")
i = 0
for entry in contents['entries']:
i += 1
deadline = entry['deadline']
text = entry['text'] if entry['status'] == 0 else strike(
entry['text'])
if entry['status'] == 0:
click.echo(" " + str(i) + " | " +
deadline + " | " + text)
while not_valid_goal_number:
click.echo(chalk.blue(
'Enter the goal number that you would like to set as completed'))
goal_to_be_completed = int(input())
if goal_to_be_completed > len(contents['entries']):
click.echo(chalk.red('Please Enter a valid goal number!'))
else:
contents['entries'][goal_to_be_completed - 1]['status'] = 1
input_data(contents, GOALS_CONFIG_FILE_PATH)
not_valid_goal_number = 0
else:
click.echo(chalk.red(
'There are no goals set. Set a new goal by entering "yoda goals new"'))
def goal_name_exists(goal_name):
file_name = get_goal_file_path(goal_name)
return os.path.isfile(file_name)
def new_goal():
"""
new goal
"""
goals_dir_check()
goal_name_not_ok = True
click.echo(chalk.blue('Input a single-word name of the goal:'))
while goal_name_not_ok:
goal_name = input().strip()
if goal_name.isalnum():
goal_name_not_ok = False
else:
click.echo(chalk.red('Only alphanumeric characters can be used! Please input the goal name:'))
if goal_name_exists(goal_name):
click.echo(chalk.red(
'A goal with this name already exists. Please type "yoda goals view" to see a list of existing goals'))
else:
click.echo(chalk.blue('Input description of the goal:'))
text = input().strip()
click.echo(chalk.blue('Input due date for the goal (YYYY-MM-DD):'))
incorrect_date_format = True
while incorrect_date_format:
deadline = input().strip()
try:
date_str = datetime.datetime.strptime(deadline, '%Y-%m-%d').strftime('%Y-%m-%d')
if date_str != deadline:
raise ValueError
incorrect_date_format = False
except ValueError:
click.echo(chalk.red("Incorrect data format, should be YYYY-MM-DD. Please repeat:"))
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
setup_data = dict(
name=goal_name,
text=text,
deadline=deadline,
status=0
)
append_data_into_file(setup_data, GOALS_CONFIG_FILE_PATH)
else:
setup_data = dict(
entries=[
dict(
name=goal_name,
text=text,
deadline=deadline,
status=0
)
]
)
input_data(setup_data, GOALS_CONFIG_FILE_PATH)
input_data(dict(entries=[]), get_goal_file_path(goal_name))
def goals_analysis():
"""
goals alysis
"""
now = datetime.datetime.now()
total_goals = 0
total_incomplete_goals = 0
total_missed_goals = 0
total_goals_next_week = 0
total_goals_next_month = 0
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
with open(GOALS_CONFIG_FILE_PATH) as goals_file:
contents = yaml.load(goals_file)
for entry in contents['entries']:
total_goals += 1
if entry['status'] == 0:
total_incomplete_goals += 1
deadline = datetime.datetime.strptime(entry['deadline'], '%Y-%m-%d')
total_missed_goals += (1 if deadline < now else 0)
total_goals_next_week += (1 if (deadline-now).days <= 7 else 0)
total_goals_next_month += (1 if (deadline - now).days <= 30 else 0)
percent_incomplete_goals = total_incomplete_goals * 100 / total_goals
percent_complete_goals = 100 - percent_incomplete_goals
click.echo(chalk.red('Percentage of incomplete goals : ' + str(percent_incomplete_goals)))
click.echo(chalk.green('Percentage of completed goals : ' + str(percent_complete_goals)))
click.echo(chalk.blue('Number of missed deadlines : ' + str(total_missed_goals)))
click.echo(chalk.blue('Number of goals due within the next week : ' + str(total_goals_next_week)))
click.echo(chalk.blue('Number of goals due within the next month : ' + str(total_goals_next_month)))
else:
click.echo(chalk.red(
'There are no goals set. Set a new goal by entering "yoda goals new"'))
def add_task_to_goal(goal_name, date, timestamp):
goal_filename = get_goal_file_path(goal_name)
if os.path.isfile(goal_filename):
setup_data = dict(
date=date,
timestamp=timestamp
)
append_data_into_file(setup_data, goal_filename)
return True
return False
def list_goals():
"""
get goals listed chronologically by deadlines
"""
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
with open(GOALS_CONFIG_FILE_PATH) as goals_file:
contents = yaml.load(goals_file)
if len(contents):
contents['entries'].sort(key=lambda x: x['deadline'].split('-'))
click.echo('Goals')
click.echo('----------------')
click.echo("Status | Deadline | Name: text")
click.echo("-------|-------------|---------------")
incomplete_goals = 0
total_tasks = 0
total_missed_deadline = 0
for entry in contents['entries']:
total_tasks += 1
incomplete_goals += (1 if entry['status'] == 0 else 0)
deadline = entry['deadline']
name = entry['name']
text = entry['text'] if entry['status'] == 0 else strike(
entry['text'])
status = "O" if entry['status'] == 0 else "X"
deadline_time = datetime.datetime.strptime(deadline, '%Y-%m-%d')
now = datetime.datetime.now()
total_missed_deadline += (1 if deadline_time < now else 0)
click.echo(" " + status + " | " + deadline + " | #" + name + ": " + text)
click.echo('----------------')
click.echo('')
click.echo('Summary:')
click.echo('----------------')
if incomplete_goals == 0:
click.echo(chalk.green(
'All goals have been completed! Set a new goal by entering "yoda goals new"'))
else:
click.echo(chalk.red("Incomplete tasks: " + str(incomplete_goals)))
click.echo(chalk.red("Tasks with missed deadline: " + str(total_missed_deadline)))
click.echo(chalk.green("Completed tasks: " +
str(total_tasks - incomplete_goals)))
else:
click.echo(
'There are no goals set. Set a new goal by entering "yoda goals new"')
else:
click.echo(
'There are no goals set. Set a new goal by entering "yoda goals new"')
def view_related_tasks():
"""
list tasks assigned to the goal
"""
from .diary import get_task_info
not_valid_name = True
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
while not_valid_name:
click.echo(chalk.blue(
'Enter the goal name that you would like to examine'))
goal_name = input()
goal_file_name = get_goal_file_path(goal_name)
if os.path.isfile(goal_file_name):
not_valid_name = False
with open(goal_file_name) as goals_file:
contents = yaml.load(goals_file)
if len(contents['entries']):
total_tasks = 0
total_incomplete = 0
click.echo('Tasks assigned to the goal:')
click.echo('----------------')
click.echo("Status | Date | Text")
click.echo("-------|---------|-----")
for entry in contents['entries']:
timestamp = entry['timestamp']
date = entry['date']
status, text = get_task_info(timestamp, date)
total_tasks += 1
total_incomplete += (1 if status == 0 else 0)
text = text if status == 0 else strike(text)
status = "O" if status == 0 else "X"
click.echo(" " + status + " | " + date + "| " + text)
click.echo('----------------')
click.echo('')
click.echo('Summary:')
click.echo('----------------')
click.echo(chalk.red("Incomplete tasks assigned to the goal: " + str(total_incomplete)))
click.echo(chalk.green("Completed tasks assigned to the goal: " +
str(total_tasks - total_incomplete)))
else:
click.echo(chalk.red(
'There are no tasks assigned to the goal. Add a new task by entering "yoda diary nt"'))
else:
click.echo(chalk.red(
'There are no goals set. Set a new goal by entering "yoda goals new"'))
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import argparse
import os
import sys
# sys.path.append(".")
import time
import torch
from tensorboardX import SummaryWriter
from metrics.evaluation import evaluate
from metrics.vae_metrics import VaeEvaluator
from struct_self.dataset import Dataset
from struct_self.dataset import to_example
from utils.config_utils import dict_to_args
from utils.config_utils import yaml_load_dict
from utils.vae_utils import get_eval_dir
from utils.vae_utils import get_exp_info
from utils.vae_utils import create_model
from utils.vae_utils import load_data
from utils.vae_utils import load_model
from utils.vae_utils import log_tracker
from utils.vae_utils import lr_schedule
def train_ae(main_args, model_args, model=None):
train_set, dev_set = load_data(main_args)
model, optimizer, vocab = create_model(main_args, model_args, model)
print('begin training, %d training examples, %d dev examples' % (len(train_set), len(dev_set)), file=sys.stderr)
print('vocab: %s' % repr(vocab.src), file=sys.stderr)
epoch = 0
train_iter = 0
report_loss = report_examples = 0.
history_dev_scores = []
num_trial = patience = 0
model_dir, log_dir = get_exp_info(main_args=main_args, model_args=model_args)
model_file = model_dir + '.bin'
writer = SummaryWriter(log_dir)
while True:
epoch += 1
epoch_begin = time.time()
for batch_examples in train_set.batch_iter(batch_size=main_args.batch_size, shuffle=True):
train_iter += 1
optimizer.zero_grad()
loss = -model.score(batch_examples)
loss_val = torch.sum(loss).item()
report_loss += loss_val
report_examples += len(batch_examples)
loss = torch.mean(loss)
loss.backward()
if main_args.clip_grad > 0.:
torch.nn.utils.clip_grad_norm_(model.parameters(), main_args.clip_grad)
optimizer.step()
if train_iter % main_args.log_every == 0:
print('\r[Iter %d] encoder loss=%.5f' %
(train_iter,
report_loss / report_examples),
file=sys.stderr, end=" ")
writer.add_scalar(
tag='AutoEncoder/Train/loss',
scalar_value=report_loss / report_examples,
global_step=train_iter
)
writer.add_scalar(
tag='optimize/lr',
scalar_value=optimizer.param_groups[0]['lr'],
global_step=train_iter,
)
report_loss = report_examples = 0.
if train_iter % main_args.dev_every == 0:
print()
print('\r[Iter %d] begin validation' % train_iter, file=sys.stderr)
eval_start = time.time()
eval_results = evaluate(examples=dev_set.examples, model=model, eval_src='src', eval_tgt='src')
dev_acc = eval_results['accuracy']
print('\r[Iter %d] auto_encoder %s=%.5f took %ds' % (
train_iter, model.args.eval_mode, dev_acc, time.time() - eval_start),
file=sys.stderr)
writer.add_scalar(
tag='AutoEncoder/Dev/%s' % model.args.eval_mode,
scalar_value=dev_acc,
global_step=train_iter
)
is_better = history_dev_scores == [] or dev_acc > max(history_dev_scores)
history_dev_scores.append(dev_acc)
writer.add_scalar(
tag='AutoEncoder/Dev/best %s' % model.args.eval_mode,
scalar_value=max(history_dev_scores),
global_step=train_iter
)
model, optimizer, num_trial, patience = lr_schedule(
is_better=is_better,
model_dir=model_dir,
model_file=model_file,
main_args=main_args,
patience=patience,
num_trial=num_trial,
epoch=epoch,
model=model,
optimizer=optimizer,
reload_model=False
)
epoch_time = time.time() - epoch_begin
print('\r[Epoch %d] epoch elapsed %ds' % (epoch, epoch_time), file=sys.stderr)
writer.add_scalar(
tag='AutoEncoder/epoch elapsed',
scalar_value=epoch_time,
global_step=epoch
)
def train_vae(main_args, model_args, model=None):
ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime())
train_set, dev_set = load_data(main_args)
model, optimizer, vocab = create_model(main_args, model_args, model)
model_dir, logdir = get_exp_info(main_args=main_args, model_args=model_args)
model_file = model_dir + '.bin'
eval_dir = get_eval_dir(main_args=main_args, model_args=model_args, mode='Trains')
evaluator = VaeEvaluator(
model=model,
out_dir=eval_dir,
train_batch_size=main_args.batch_size,
eval_batch_size=model_args.eval_bs,
)
# if model_args.tensorboard_logging:
writer = SummaryWriter(logdir)
writer.add_text("model", str(model))
writer.add_text("args", str(main_args))
writer.add_text("ts", ts)
train_iter = main_args.start_iter
epoch = num_trial = patience = 0
history_elbo = []
history_bleu = []
max_kl_item = -1
max_kl_weight = None
continue_anneal = model_args.peak_anneal
if model_args.peak_anneal:
model_args.warm_up = 0
memory_temp_count = 0
t_type = torch.Tensor
adv_select = ["ADVCoupleVAE", "VSAE", "ACVAE", "DVAE", "SVAE"]
if model_args.model_select in adv_select:
if not model_args.dis_train:
x = input("you forget set the dis training?,switch it?[Y/N]")
model_args.dis_train = (x.lower() == "y")
adv_training = model_args.dis_train and model_args.model_select in adv_select
if adv_training:
print("has the adv training process")
adv_syn = model_args.adv_syn > 0. or model_args.infer_weight * model_args.inf_sem
adv_sem = model_args.adv_sem > 0. or model_args.infer_weight * model_args.inf_syn
print(model_args.dev_item.lower())
while True:
epoch += 1
train_track = {}
for batch_examples in train_set.batch_iter(batch_size=main_args.batch_size, shuffle=True):
train_iter += 1
if adv_training:
ret_loss = model.get_loss(batch_examples, train_iter, is_dis=True)
if adv_syn:
dis_syn_loss = ret_loss['dis syn']
optimizer.zero_grad()
dis_syn_loss.backward()
if main_args.clip_grad > 0.:
torch.nn.utils.clip_grad_norm_(model.parameters(), main_args.clip_grad)
# optimizer.step()
if adv_sem:
ret_loss = model.get_loss(batch_examples, train_iter, is_dis=True)
dis_sem_loss = ret_loss['dis sem']
optimizer.zero_grad()
dis_sem_loss.backward()
if main_args.clip_grad > 0.:
torch.nn.utils.clip_grad_norm_(model.parameters(), main_args.clip_grad)
# optimizer.step()
ret_loss = model.get_loss(batch_examples, train_iter)
loss = ret_loss['Loss']
optimizer.zero_grad()
loss.backward()
if main_args.clip_grad > 0.:
torch.nn.utils.clip_grad_norm_(model.parameters(), main_args.clip_grad)
optimizer.step()
train_iter += 1
# tracker = update_track(loss, train_avg_kl, train_avg_nll, tracker)
train_track = log_tracker(ret_loss, train_track)
if train_iter % main_args.log_every == 0:
train_avg_nll = ret_loss['NLL Loss']
train_avg_kl = ret_loss['KL Loss']
_kl_weight = ret_loss['KL Weight']
for key, val in ret_loss.items():
writer.add_scalar(
'Train-Iter/VAE/{}'.format(key),
val.item() if isinstance(val, t_type) else val,
train_iter
)
print("\rTrain-Iter %04d, Loss %9.4f, NLL-Loss %9.4f, KL-Loss %9.4f, KL-Weight %6.3f, WD-Drop %6.3f"
% (train_iter, loss.item(), train_avg_nll, train_avg_kl, _kl_weight, model.step_unk_rate),
end=' ')
writer.add_scalar(
tag='optimize/lr',
scalar_value=optimizer.param_groups[0]['lr'],
global_step=train_iter,
)
if train_iter % main_args.dev_every == 0 and train_iter > model_args.warm_up:
# dev_track, eval_results = _test_vae(model, dev_set, main_args, train_iter)
dev_track, eval_results = evaluator.evaluate_reconstruction(examples=dev_set.examples,
eval_desc="dev{}".format(train_iter),
eval_step=train_iter, write_down=False)
_weight = model.get_kl_weight(step=train_iter)
_kl_item = torch.mean(dev_track['KL Item'])
# writer.add_scalar("VAE/Valid-Iter/KL Item", _kl_item, train_iter)
for key, val in dev_track.items():
writer.add_scalar(
'Valid-Iter/VAE/{}'.format(key),
torch.mean(val) if isinstance(val, t_type) else val,
train_iter
)
if continue_anneal and model.step_kl_weight is None:
if _kl_item > max_kl_item:
max_kl_item = _kl_item
max_kl_weight = _weight
else:
if (max_kl_item - _kl_item) > model_args.stop_clip_kl:
model.step_kl_weight = max_kl_weight
writer.add_text(tag='peak_anneal',
text_string="fixed the kl weight:{} with kl peak:{} at step:{}".format(
max_kl_weight,
max_kl_item,
train_iter
), global_step=train_iter)
continue_anneal = False
dev_elbo = torch.mean(dev_track['Model Score'])
writer.add_scalar("Evaluation/VAE/Dev Score", dev_elbo, train_iter)
# evaluate bleu
dev_bleu = eval_results['accuracy']
print()
print("Valid-Iter %04d, NLL_Loss:%9.4f, KL_Loss: %9.4f, Sum Score:%9.4f BLEU:%9.4f" % (
train_iter,
torch.mean(dev_track['NLL Loss']),
torch.mean(dev_track['KL Loss']),
dev_elbo,
eval_results['accuracy']), file=sys.stderr
)
writer.add_scalar(
tag='Evaluation/VAE/Iter %s' % model.args.eval_mode,
scalar_value=dev_bleu,
global_step=train_iter
)
if model_args.dev_item == "ELBO" or model_args.dev_item.lower() == "para-elbo" or model_args.dev_item.lower() == "gen-elbo":
is_better = history_elbo == [] or dev_elbo < min(history_elbo)
elif model_args.dev_item == "BLEU" or model_args.dev_item.lower() == "para-bleu" or model_args.dev_item.lower() == "gen-bleu":
is_better = history_bleu == [] or dev_bleu > max(history_bleu)
history_elbo.append(dev_elbo)
writer.add_scalar("Evaluation/VAE/Best Score", min(history_elbo), train_iter)
history_bleu.append(dev_bleu)
writer.add_scalar("Evaluation/VAE/Best BLEU Score", max(history_bleu), train_iter)
if is_better:
writer.add_scalar(
tag='Evaluation/VAE/Best %s' % model.args.eval_mode,
scalar_value=dev_bleu,
global_step=train_iter
)
writer.add_scalar(
tag='Evaluation/VAE/Best NLL-LOSS',
scalar_value=torch.mean(dev_track['NLL Loss']),
global_step=train_iter
)
writer.add_scalar(
tag='Evaluation/VAE/Best KL-LOSS',
scalar_value=torch.mean(dev_track['KL Loss']),
global_step=train_iter
)
if train_iter * 2 > model_args.x0:
memory_temp_count = 3
if model_args.dev_item.lower().startswith("gen") and memory_temp_count > 0:
evaluator.evaluate_generation(
sample_size=len(dev_set.examples),
eval_desc="gen_iter{}".format(train_iter),
)
memory_temp_count -= 1
if model_args.dev_item.lower().startswith("para") and memory_temp_count > 0:
para_score = evaluator.evaluate_para(
eval_dir="/home/user_data/baoy/projects/seq2seq_parser/data/quora-mh/unsupervised",
# eval_list=["para.raw.text", "para.text"])
# eval_list=["para.raw.text"])
eval_list=["dev.para.txt", "test.para.txt"],
eval_desc="para_iter{}".format(train_iter))
if memory_temp_count == 3:
writer.add_scalar(
tag='Evaluation/VAE/Para Dev Ori-BLEU',
scalar_value=para_score[0][0],
global_step=train_iter
)
writer.add_scalar(
tag='Evaluation/VAE/Para Dev Tgt-BLEU',
scalar_value=para_score[0][1],
global_step=train_iter
)
if len(para_score) > 1:
writer.add_scalar(
tag='Evaluation/VAE/Para Test Ori-BLEU',
scalar_value=para_score[1][0],
global_step=train_iter
)
writer.add_scalar(
tag='Evaluation/VAE/Para Test Tgt-BLEU',
scalar_value=para_score[1][1],
global_step=train_iter
)
memory_temp_count -= 1
model, optimizer, num_trial, patience = lr_schedule(
is_better=is_better,
model_dir=model_dir,
model_file=model_file,
main_args=main_args,
patience=patience,
num_trial=num_trial,
epoch=epoch,
model=model,
optimizer=optimizer,
reload_model=model_args.reload_model,
)
model.train()
elbo = torch.mean(train_track['Model Score'])
print()
print("Train-Epoch %02d, Score %9.4f" % (epoch, elbo))
for key, val in train_track.items():
writer.add_scalar(
'Train-Epoch/VAE/{}'.format(key),
torch.mean(val) if isinstance(val, t_type) else val,
epoch
)
def test_vae(main_args, model_args, input_mode=0):
model = load_model(main_args, model_args, check_dir=False)
out_dir = get_eval_dir(main_args=main_args, model_args=model_args, mode="Test")
model.eval()
if not os.path.exists(out_dir):
sys.exit(-1)
if model_args.model_select.startswith("Origin"):
model_args.eval_bs = 20 if model_args.eval_bs < 20 else model_args.eval_bs
evaluator = VaeEvaluator(
model=model,
out_dir=out_dir,
eval_batch_size=model_args.eval_bs,
train_batch_size=main_args.batch_size
)
train_exam = Dataset.from_bin_file(main_args.train_file).examples
para_eval_dir = "/home/user_data/baoy/projects/seq2seq_parser/data/quora-mh/unsupervised"
para_eval_list = ["dev.para.txt"]
# ["dev.para.txt", "test.para.txt"]
if input_mode == 0:
print("========dev reconstructor========")
test_set = Dataset.from_bin_file(main_args.dev_file)
evaluator.evaluate_reconstruction(examples=test_set.examples, eval_desc="dev")
print("finish")
print("========test reconstructor=======")
test_set = Dataset.from_bin_file(main_args.test_file)
evaluator.evaluate_reconstruction(examples=test_set.examples, eval_desc="test")
print("finish")
print("========generating samples=======")
evaluator.evaluate_generation(corpus_examples=train_exam, sample_size=len(test_set.examples), eval_desc="gen")
print("finish")
elif input_mode == 1:
print("========generating samples=======")
test_exam = Dataset.from_bin_file(main_args.test_file).examples
evaluator.evaluate_generation(corpus_examples=train_exam, sample_size=len(test_exam), eval_desc="gen")
print("finish")
elif input_mode == 2:
print("========generating paraphrase========")
evaluator.evaluate_para(eval_dir=para_eval_dir, eval_list=para_eval_list)
print("finish")
elif input_mode == 3:
print("========supervised generation========")
# evaluator.evaluate_control()
evaluator.evaluate_control(eval_dir=para_eval_dir, eval_list=para_eval_list)
print("finish")
elif input_mode == 4:
trans_eval_list = ["trans.length.txt", "trans.random.txt"]
print("========style transfer========")
evaluator.evaluate_style_transfer(eval_dir=para_eval_dir, eval_list=trans_eval_list, eval_desc="unmatch")
evaluator.evaluate_style_transfer(eval_dir=para_eval_dir, eval_list=para_eval_list, eval_desc="match")
print("finish")
elif input_mode == 5:
print("========random syntax select========")
evaluator.evaluate_pure_para(eval_dir=para_eval_dir, eval_list=para_eval_list)
print("finish")
else:
raw = input("raw sent: ")
while not raw.startswith("EXIT"):
e = to_example(raw)
words = model.predict(e)
print("origin:", " ".join(words[0][0][0]))
to_ref = input("ref syn : ")
while not to_ref.startswith("NEXT"):
syn_ref = to_example(to_ref)
ret = model.eval_adv(e, syn_ref)
if not model_args.model_select == "OriginVAE":
print("ref syntax: ", " ".join(ret['ref syn'][0][0][0]))
print("ori syntax: ", " ".join(ret['ori syn'][0][0][0]))
print("switch result: ", " ".join(ret['res'][0][0][0]))
to_ref = input("ref syn: ")
raw = input("input : ")
def process_args():
opt_parser = argparse.ArgumentParser()
opt_parser.add_argument('--config_files', type=str, help='config_files')
opt_parser.add_argument('--exp_name', type=str, help='config_files')
opt_parser.add_argument('--load_src_lm', type=str, default=None)
opt_parser.add_argument('--mode', type=str, default=None)
opt = opt_parser.parse_args()
configs = yaml_load_dict(opt.config_files)
base_args = dict_to_args(configs['base_configs']) if 'base_configs' in configs else None
baseline_args = dict_to_args(configs['baseline_configs']) if 'baseline_configs' in configs else None
prior_args = dict_to_args(configs['prior_configs']) if 'prior_configs' in configs else None
encoder_args = dict_to_args(configs['encoder_configs']) if 'encoder_configs' in configs else None
decoder_args = dict_to_args(configs['decoder_configs']) if 'decoder_configs' in configs else None
vae_args = dict_to_args(configs['vae_configs']) if 'vae_configs' in configs else None
ae_args = dict_to_args(configs["ae_configs"]) if 'ae_configs' in configs else None
if base_args is not None:
if opt.mode is not None:
base_args.mode = opt.mode
if opt.exp_name is not None:
base_args.exp_name = opt.exp_name
if opt.load_src_lm is not None:
base_args.load_src_lm = opt.load_src_lm
return {
'base': base_args,
"baseline": baseline_args,
'prior': prior_args,
'encoder': encoder_args,
"decoder": decoder_args,
"vae": vae_args,
"ae": ae_args,
}
if __name__ == "__main__":
config_args = process_args()
args = config_args['base']
if args.mode == "train_sent":
train_vae(args, config_args['vae'])
elif args.mode == "train_ae":
train_ae(args, config_args['ae'])
elif args.mode == "test_vae":
raw_sent = int(input("select test mode: "))
test_vae(args, config_args['vae'], input_mode=raw_sent)
elif args.mode == "test_vaea":
test_vae(args, config_args['vae'], input_mode=0)
elif args.mode == "test_generating":
test_vae(args, config_args['vae'], input_mode=1)
elif args.mode == "test_paraphrase":
test_vae(args, config_args['vae'], input_mode=2)
elif args.mode == "test_control":
test_vae(args, config_args['vae'], input_mode=3)
elif args.mode == "test_transfer":
test_vae(args, config_args['vae'], input_mode=4)
elif args.mode == "test_pure_para":
test_vae(args, config_args['vae'], input_mode=5)
else:
raise NotImplementedError
|
nilq/baby-python
|
python
|
# coding= UTF-8
### Command line of python <filename> was getting angry till I added the line above. I think ### it needs to know what types of characters to expect (i.e. latin, korean, etc..)
import datetime
### I needed to switch to single quotes instead of double. I’m not sure why...
name = raw_input('Yo what yo name sucka? ')
age = int(raw_input('Just how ancient are you? '))
now = datetime.datetime.now()
#calculate when a person will be 100 years old by taking the
#current year, subtracting their current age then adding 100
#years.
def year_when_99(age):
when_you_will_be_99 = now.year - age + 99
return when_you_will_be_99
### The line below would not work until I put the variable answer before the function. From a
### computer’s mind, reading code left to right it will identify that there is an ‘empty’ variable
### named answer and oh okay we are going to run this function and put the returned result ### in it
answer = year_when_99(age)
print "%s, you will be 100 years old in the year %s\n" % (name, answer)
rub_it_in_num = int(raw_input('Give me a number, old fart? '))
print "%s, you will be 100 years old in the year %s\n" % (name, answer) * rub_it_in_num
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# type: ignore
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
from math import sin, exp, pi, fabs, copysign, log, isinf, acos, cos, sin, sqrt
import sys
REQUIRE_DEPENDENCIES = False
if not REQUIRE_DEPENDENCIES:
IS_PYPY = True
else:
try:
# The right way imports the platform module which costs to ms to load!
# implementation = platform.python_implementation()
IS_PYPY = 'PyPy' in sys.version
except AttributeError:
IS_PYPY = False
#IS_PYPY = True # for testing
#if not IS_PYPY and not REQUIRE_DEPENDENCIES:
# try:
# import numpy as np
# except ImportError:
# np = None
__all__ = ['dot', 'inv', 'det', 'solve', 'norm2', 'inner_product',
'eye', 'array_as_tridiagonals', 'solve_tridiagonal', 'subset_matrix']
primitive_containers = frozenset([list, tuple])
def det(matrix):
"""Seem sto work fine.
>> from sympy import *
>> from sympy.abc import *
>> Matrix([[a, b], [c, d]]).det()
a*d - b*c
>> Matrix([[a, b, c], [d, e, f], [g, h, i]]).det()
a*e*i - a*f*h - b*d*i + b*f*g + c*d*h - c*e*g
A few terms can be slightly factored out of the 3x dim.
>> Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]]).det()
a*f*k*p - a*f*l*o - a*g*j*p + a*g*l*n + a*h*j*o - a*h*k*n - b*e*k*p + b*e*l*o + b*g*i*p - b*g*l*m - b*h*i*o + b*h*k*m + c*e*j*p - c*e*l*n - c*f*i*p + c*f*l*m + c*h*i*n - c*h*j*m - d*e*j*o + d*e*k*n + d*f*i*o - d*f*k*m - d*g*i*n + d*g*j*m
72 mult vs ~48 in cse'd version'
Commented out - takes a few seconds
>> #Matrix([[a, b, c, d, e], [f, g, h, i, j], [k, l, m, n, o], [p, q, r, s, t], [u, v, w, x, y]]).det()
260 multiplies with cse; 480 without it.
"""
size = len(matrix)
if size == 1:
return matrix[0]
elif size == 2:
(a, b), (c, d) = matrix
return a*d - c*b
elif size == 3:
(a, b, c), (d, e, f), (g, h, i) = matrix
return a*(e*i - h*f) - d*(b*i - h*c) + g*(b*f - e*c)
elif size == 4:
(a, b, c, d), (e, f, g, h), (i, j, k, l), (m, n, o, p) = matrix
return (a*f*k*p - a*f*l*o - a*g*j*p + a*g*l*n + a*h*j*o - a*h*k*n
- b*e*k*p + b*e*l*o + b*g*i*p - b*g*l*m - b*h*i*o + b*h*k*m
+ c*e*j*p - c*e*l*n - c*f*i*p + c*f*l*m + c*h*i*n - c*h*j*m
- d*e*j*o + d*e*k*n + d*f*i*o - d*f*k*m - d*g*i*n + d*g*j*m)
elif size == 5:
(a, b, c, d, e), (f, g, h, i, j), (k, l, m, n, o), (p, q, r, s, t), (u, v, w, x, y) = matrix
x0 = s*y
x1 = a*g*m
x2 = t*w
x3 = a*g*n
x4 = r*x
x5 = a*g*o
x6 = t*x
x7 = a*h*l
x8 = q*y
x9 = a*h*n
x10 = s*v
x11 = a*h*o
x12 = r*y
x13 = a*i*l
x14 = t*v
x15 = a*i*m
x16 = q*w
x17 = a*i*o
x18 = s*w
x19 = a*j*l
x20 = q*x
x21 = a*j*m
x22 = r*v
x23 = a*j*n
x24 = b*f*m
x25 = b*f*n
x26 = b*f*o
x27 = b*h*k
x28 = t*u
x29 = b*h*n
x30 = p*x
x31 = b*h*o
x32 = b*i*k
x33 = p*y
x34 = b*i*m
x35 = r*u
x36 = b*i*o
x37 = b*j*k
x38 = s*u
x39 = b*j*m
x40 = p*w
x41 = b*j*n
x42 = c*f*l
x43 = c*f*n
x44 = c*f*o
x45 = c*g*k
x46 = c*g*n
x47 = c*g*o
x48 = c*i*k
x49 = c*i*l
x50 = p*v
x51 = c*i*o
x52 = c*j*k
x53 = c*j*l
x54 = q*u
x55 = c*j*n
x56 = d*f*l
x57 = d*f*m
x58 = d*f*o
x59 = d*g*k
x60 = d*g*m
x61 = d*g*o
x62 = d*h*k
x63 = d*h*l
x64 = d*h*o
x65 = d*j*k
x66 = d*j*l
x67 = d*j*m
x68 = e*f*l
x69 = e*f*m
x70 = e*f*n
x71 = e*g*k
x72 = e*g*m
x73 = e*g*n
x74 = e*h*k
x75 = e*h*l
x76 = e*h*n
x77 = e*i*k
x78 = e*i*l
x79 = e*i*m
return (x0*x1 - x0*x24 + x0*x27 + x0*x42 - x0*x45 - x0*x7 - x1*x6
+ x10*x11 - x10*x21 - x10*x44 + x10*x52 + x10*x69 - x10*x74
- x11*x20 + x12*x13 + x12*x25 - x12*x3 - x12*x32 - x12*x56
+ x12*x59 - x13*x2 + x14*x15 + x14*x43 - x14*x48 - x14*x57
+ x14*x62 - x14*x9 - x15*x8 + x16*x17 - x16*x23 - x16*x58
+ x16*x65 + x16*x70 - x16*x77 - x17*x22 + x18*x19 + x18*x26
- x18*x37 - x18*x5 - x18*x68 + x18*x71 - x19*x4 - x2*x25
+ x2*x3 + x2*x32 + x2*x56 - x2*x59 + x20*x21 + x20*x44
- x20*x52 - x20*x69 + x20*x74 + x22*x23 + x22*x58 - x22*x65
- x22*x70 + x22*x77 + x24*x6 - x26*x4 - x27*x6 + x28*x29
- x28*x34 - x28*x46 + x28*x49 + x28*x60 - x28*x63 - x29*x33
+ x30*x31 - x30*x39 - x30*x47 + x30*x53 + x30*x72 - x30*x75
- x31*x38 + x33*x34 + x33*x46 - x33*x49 - x33*x60 + x33*x63
+ x35*x36 - x35*x41 - x35*x61 + x35*x66 + x35*x73 - x35*x78
- x36*x40 + x37*x4 + x38*x39 + x38*x47 - x38*x53 - x38*x72
+ x38*x75 + x4*x5 + x4*x68 - x4*x71 + x40*x41 + x40*x61
- x40*x66 - x40*x73 + x40*x78 - x42*x6 - x43*x8 + x45*x6
+ x48*x8 + x50*x51 - x50*x55 - x50*x64 + x50*x67 + x50*x76
- x50*x79 - x51*x54 + x54*x55 + x54*x64 - x54*x67 - x54*x76
+ x54*x79 + x57*x8 + x6*x7 - x62*x8 + x8*x9)
else:
# TODO algorithm?
import numpy as np
return float(np.linalg.det(matrix))
def inv(matrix):
"""5 has way too many multiplies.
>> from sympy import *
>> from sympy.abc import *
>> Matrix([a]).inv()
Matrix([[1/a]])
>> cse(Matrix([[a, b], [c, d]]).inv())
Matrix([
[1/a + b*c/(a**2*(d - b*c/a)), -b/(a*(d - b*c/a))],
[ -c/(a*(d - b*c/a)), 1/(d - b*c/a)]])
>> m_3 = Matrix([[a, b, c], [d, e, f], [g, h, i]])
>> #cse(m_3.inv())
>> m_4 = Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]])
>> cse(m_4.inv())
# Note: for 3, 4 - forgot to generate code using optimizations='basic'
"""
size = len(matrix)
if size == 1:
try:
return [1.0/matrix[0]]
except:
return [1.0/matrix[0][0]]
elif size == 2:
try:
(a, b), (c, d) = matrix
x0 = 1.0/a
x1 = b*x0
x2 = 1.0/(d - c*x1)
x3 = c*x2
return [[x0 + b*x3*x0*x0, -x1*x2],
[-x0*x3, x2]]
except:
import numpy as np
return np.linalg.inv(matrix).tolist()
elif size == 3:
(a, b, c), (d, e, f), (g, h, i) = matrix
x0 = 1./a
x1 = b*d
x2 = e - x0*x1
x3 = 1./x2
x4 = b*g
x5 = h - x0*x4
x6 = x0*x3
x7 = d*x6
x8 = -g*x0 + x5*x7
x9 = c*d
x10 = f - x0*x9
x11 = b*x6
x12 = c*x0 - x10*x11
x13 = a*e
x14 = -x1 + x13
x15 = 1./(-a*f*h - c*e*g + f*x4 + h*x9 - i*x1 + i*x13)
x16 = x14*x15
x17 = x12*x16
x18 = x14*x15*x3
x19 = x18*x5
x20 = x10*x18
return [[x0 - x17*x8 + x1*x3*x0*x0, -x11 + x12*x19, -x17],
[-x20*x8 - x7, x10*x16*x5*x2**-2 + x3, -x20],
[ x16*x8, -x19, x16]]
elif size == 4:
(a, b, c, d), (e, f, g, h), (i, j, k, l), (m, n, o, p) = matrix
x0 = 1./a
x1 = b*e
x2 = f - x0*x1
x3 = 1./x2
x4 = i*x0
x5 = -b*x4 + j
x6 = x0*x3
x7 = e*x6
x8 = -x4 + x5*x7
x9 = c*x0
x10 = -e*x9 + g
x11 = b*x6
x12 = -x10*x11 + x9
x13 = a*f
x14 = -x1 + x13
x15 = k*x13
x16 = b*g*i
x17 = c*e*j
x18 = a*g*j
x19 = k*x1
x20 = c*f*i
x21 = x15 + x16 + x17 - x18 - x19 - x20
x22 = 1/x21
x23 = x14*x22
x24 = x12*x23
x25 = m*x0
x26 = -b*x25 + n
x27 = x26*x3
x28 = -m*x9 + o - x10*x27
x29 = x23*x8
x30 = -x25 + x26*x7 - x28*x29
x31 = d*x0
x32 = -e*x31 + h
x33 = x3*x32
x34 = -i*x31 + l - x33*x5
x35 = -x11*x32 - x24*x34 + x31
x36 = a*n
x37 = g*l
x38 = h*o
x39 = l*o
x40 = b*m
x41 = h*k
x42 = c*l
x43 = f*m
x44 = c*h
x45 = i*n
x46 = d*k
x47 = e*n
x48 = d*o
x49 = d*g
x50 = j*m
x51 = 1.0/(a*j*x38 - b*i*x38 - e*j*x48 + f*i*x48 + p*x15
+ p*x16 + p*x17 - p*x18 - p*x19 - p*x20 + x1*x39
- x13*x39 + x36*x37 - x36*x41 - x37*x40 + x40*x41
+ x42*x43 - x42*x47 - x43*x46 + x44*x45 - x44*x50
- x45*x49 + x46*x47 + x49*x50)
x52 = x21*x51
x53 = x35*x52
x54 = x14*x22*x3
x55 = x5*x54
x56 = -x27 + x28*x55
x57 = x52*x56
x58 = x14*x51
x59 = x28*x58
x60 = x10*x54
x61 = x33 - x34*x60
x62 = x52*x61
x63 = x34*x58
return [[x0 - x24*x8 - x30*x53 + x1*x3*x0*x0, -x11 + x12*x55 - x35*x57, -x24 + x35*x59, -x53],
[-x30*x62 - x60*x8 - x7, x10*x23*x5*x2**-2 + x3 - x56*x62, x59*x61 - x60, -x62],
[x29 - x30*x63, -x55 - x56*x63, x14*x14*x22*x28*x34*x51 + x23, -x63],
[x30*x52, x57, -x59, x52]]
else:
return inv_lu(matrix)
# TODO algorithm?
# import numpy as np
# return np.linalg.inv(matrix).tolist()
def shape(value):
'''Find and return the shape of an array, whether it is a numpy array or
a list-of-lists or other combination of iterators.
Parameters
----------
value : various
Input array, [-]
Returns
-------
shape : tuple(int, dimension)
Dimensions of array, [-]
Notes
-----
It is assumed the shape is consistent - not something like [[1.1, 2.2], [2.4]]
Examples
--------
>>> shape([])
(0,)
>>> shape([1.1, 2.2, 5.5])
(3,)
>>> shape([[1.1, 2.2, 5.5], [2.0, 1.1, 1.5]])
(2, 3)
>>> shape([[[1.1,], [2.0], [1.1]]])
(1, 3, 1)
>>> shape(['110-54-3'])
(1,)
'''
try:
return value.shape
except:
pass
dims = [len(value)]
try:
# Except this block to handle the case of no value
iter_value = value[0]
for i in range(10):
# try:
if type(iter_value) in primitive_containers:
dims.append(len(iter_value))
iter_value = iter_value[0]
else:
break
# except:
# break
except:
pass
return tuple(dims)
# try:
# try:
# new_shape = (len(value), len(value[0]), len(value[0][0]))
# except:
# new_shape = (len(value), len(value[0]))
# except:
# new_shape = (len(value),)
# return new_shape
def eye(N):
mat = []
for i in range(N):
r = [0.0]*N
r[i] = 1.0
mat.append(r)
return mat
def dot(a, b):
try:
ab = [sum([ri*bi for ri, bi in zip(row, b)]) for row in a]
except:
ab = [sum([ai*bi for ai, bi in zip(a, b)])]
return ab
def inner_product(a, b):
tot = 0.0
for i in range(len(a)):
tot += a[i]*b[i]
return tot
def inplace_LU(A, ipivot, N):
Np1 = N+1
for j in range(1, Np1):
for i in range(1, j):
tot = A[i][j]
for k in range(1, i):
tot -= A[i][k]*A[k][j]
A[i][j] = tot
apiv = 0.0
for i in range(j, Np1):
tot = A[i][j]
for k in range(1, j):
tot -= A[i][k]*A[k][j]
A[i][j] = tot
if apiv < abs(A[i][j]):
apiv, ipiv = abs(A[i][j]), i
if apiv == 0:
raise ValueError("Singular matrix")
ipivot[j] = ipiv
if ipiv != j:
for k in range(1, Np1):
t = A[ipiv][k]
A[ipiv][k] = A[j][k]
A[j][k] = t
Ajjinv = 1.0/A[j][j]
for i in range(j+1, Np1):
A[i][j] *= Ajjinv
return None
def solve_from_lu(A, pivots, b, N):
Np1 = N + 1
# Note- list call is very slow faster to replace with [i for i in row]
b = [0.0] + [i for i in b] #list(b)
for i in range(1, Np1):
tot = b[pivots[i]]
b[pivots[i]] = b[i]
for j in range(1, i):
tot -= A[i][j]*b[j]
b[i] = tot
for i in range(N, 0, -1):
tot = b[i]
for j in range(i+1, Np1):
tot -= A[i][j]*b[j]
b[i] = tot/A[i][i]
return b
def solve_LU_decomposition(A, b):
N = len(b)
A_copy = [[0.0]*(N+1)]
for row in A:
# Note- list call is very slow faster to replace with [i for i in row]
r = [0.0] + [i for i in row]
# r = list(row)
# r.insert(0, 0.0)
A_copy.append(r)
pivots = [0.0]*(N+1)
inplace_LU(A_copy, pivots, N)
return solve_from_lu(A_copy, pivots, b, N)[1:]
def inv_lu(a):
N = len(a)
Np1 = N + 1
A_copy = [[0.0]*Np1]
for row in a:
# Note- list call is very slow faster to replace with [i for i in row]
r = list(row)
r.insert(0, 0.0)
A_copy.append(r)
a = A_copy
ainv = [[0.0]*N for i in range(N)]
pivots = [0]*Np1
inplace_LU(a, pivots, N)
for j in range(N):
b = [0.0]*N
b[j] = 1.0
b = solve_from_lu(a, pivots, b, N)[1:]
for i in range(N):
ainv[i][j] = b[i]
return ainv
def solve(a, b):
if len(a) > 4:
if IS_PYPY or np is None:
return solve_LU_decomposition(a, b)
import numpy as np
return np.linalg.solve(a, b).tolist()
else:
return dot(inv(a), b)
def norm2(arr):
tot = 0.0
for i in arr:
tot += i*i
return sqrt(tot)
def array_as_tridiagonals(arr):
row_last = arr[0]
a, b, c = [], [row_last[0]], []
for i in range(1, len(row_last)):
row = arr[i]
b.append(row[i])
c.append(row_last[i])
a.append(row[i-1])
row_last = row
return a, b, c
def tridiagonals_as_array(a, b, c, zero=0.0):
N = len(b)
arr = [[zero]*N for _ in range(N)]
row_last = arr[0]
row_last[0] = b[0]
for i in range(1, N):
row = arr[i]
row[i] = b[i] # set the middle row back
row[i-1] = a[i-1]
row_last[i] = c[i-1]
row_last = row
return arr
def solve_tridiagonal(a, b, c, d):
b, d = [i for i in b], [i for i in d]
N = len(d)
for i in range(N - 1):
m = a[i]/b[i]
b[i+1] -= m*c[i]
d[i+1] -= m*d[i]
b[-1] = d[-1]/b[-1]
for i in range(N-2, -1, -1):
b[i] = (d[i] - c[i]*b[i+1])/b[i]
return b
def subset_matrix(whole, subset):
if type(subset) is slice:
subset = range(subset.start, subset.stop, subset.step)
# N = len(subset)
# new = [[None]*N for i in range(N)]
# for ni, i in enumerate(subset):
# for nj,j in enumerate(subset):
# new[ni][nj] = whole[i][j]
new = []
for i in subset:
whole_i = whole[i]
# r = [whole_i[j] for j in subset]
# new.append(r)
new.append([whole_i[j] for j in subset])
# r = []
# for j in subset:
# r.append(whole_i[j])
return new
|
nilq/baby-python
|
python
|
""" QLayouted module. """
# ISC License
#
# Copyright (c) 2020–2022, Paul Wilhelm, M. Sc. <anfrage@paulwilhelm.de>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from typing import Dict, Tuple, Callable, Union, Optional
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QLayout, QWidget, QPushButton
from magneticalc.QtWidgets2.QButtons import QButtons
class QLayouted:
""" QLayouted class. """
def __init__(self, direction: str = "vertical") -> None:
"""
Initializes the QLayouted class.
This adds a layout and several related functions like addWidget() to the parent class.
@param direction: Sets "vertical" or "horizontal" layout
"""
self._layout = QVBoxLayout() if direction == "vertical" else QHBoxLayout()
def install_layout(self, parent: QWidget) -> None:
"""
Installs this layout in the parent.
"""
parent.setLayout(self._layout)
# noinspection PyPep8Naming
def addWidget(self, widget, alignment: Optional[Union[Qt.Alignment, Qt.AlignmentFlag]] = None) -> None:
"""
Adds widget.
@param widget: QWidget
@param alignment: Alignment
"""
if alignment:
self._layout.addWidget(widget)
else:
self._layout.addWidget(widget)
# noinspection PyPep8Naming
def addLayout(self, layout: QLayout) -> None:
"""
Adds layout.
@param layout: QLayout
"""
self._layout.addLayout(layout)
# noinspection PyPep8Naming
def addSpacing(self, spacing: int) -> None:
"""
Adds spacing.
@param spacing: Spacing value
"""
self._layout.addSpacing(spacing)
# noinspection PyPep8Naming
def addButtons(self, data: Dict[str, Tuple[str, Callable]]) -> Dict[int, QPushButton]:
"""
Adds buttons.
@param data: Dictionary {text: (icon, callback), …}
@return: Dictionary {index: QPushButton, …}
"""
buttons = QButtons(data)
self.addLayout(buttons)
return buttons.dictionary
|
nilq/baby-python
|
python
|
#import json
def parse(command):
#split = json.loads(command)
#parsedCommand = split
return command
def main():
pass
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from happy_bittorrent.algorithms.torrent_manager import *
|
nilq/baby-python
|
python
|
import pytest
import shutil
import zipfile
from pathlib import Path
from squirrel.settings import AddonInstallerSettings
from squirrel.addons import ZipAddon
from test_addon_install import test_addon_install_path
from test_addon_install import test_addon_backup_path
from test_addon_install import settings
from fixtures import zip_files
from fixtures import valid_addons
from fixtures import invalid_addons
# Gather valid addons
# Install them
# @pytest.mark.parametrize('file_or_folder', valid_addons)
@pytest.mark.parametrize('targets', [valid_addons, ])
def test_install_valid_addons_and_index(
settings,
zip_files,
test_addon_install_path,
test_addon_backup_path,
):
settings.addon_path = test_addon_install_path
settings.backup_path = test_addon_backup_path
addons = [ZipAddon(zip_file, settings=settings) for zip_file in zip_files]
for addon in addons:
addon.install()
# addon = ZipAddon(addon_filename=zip_file, settings=settings)
# addon.install()
# Run the indexing
# Check the index content
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import csv
import hashlib
import os
import sys
idx = 1
IGNORED_DIRECTORIES = {'.git', '.svn'}
def walk(version_root, app, version):
if type(version) == type(bytes):
version = version.decode("utf8")
if version in IGNORED_DIRECTORIES:
return
global idx
print('av %d %s' % (idx, version_root), file=sys.stderr)
idx += 1
for root, d_names, f_names in os.walk(version_root):
for ignored_directory in IGNORED_DIRECTORIES:
if ignored_directory in d_names:
d_names.remove(ignored_directory)
for f in f_names:
try:
file_path = os.path.join(root, f)
hsh = sha256_f(file_path)
# print('%s\t%s\t%s\t%s\t%s' % (
# app, version, hsh, remove_prefix(file_path, version_root).count('/'), file_path))
print('%s\t%s\t%s\t%s' % (app, version, hsh, remove_prefix(file_path, version_root).count('/')))
except:
print('err: %s' % str(sys.exc_info()), file=sys.stderr)
BLOCK_SIZE = 16 * (2 ** 10)
def sha256_f(path):
sha256_hash = hashlib.sha256()
with open(path, 'rb') as f:
for byte_block in iter(lambda: f.read(BLOCK_SIZE), b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def remove_prefix(string, prefix):
return string[len(prefix):] if string.startswith(prefix) else string
def subdirectories(path):
return [(x, os.path.join(path, x)) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))]
def main(root, db):
already_parsed_avs = set()
if db is not None:
with open(db, 'r') as db_file:
for row in csv.reader(db_file, delimiter='\t'):
already_parsed_avs.add((row[0], row[1]))
def filtered_walk(vp, a, v):
if not ((a, v) in already_parsed_avs):
walk(vp, a, v)
for (directory, path) in subdirectories(root):
app = directory
if app.endswith('-cores'):
for (version, version_path) in subdirectories(path):
filtered_walk(version_path, app, version)
elif app.endswith('-themes'):
for (app, app_path) in subdirectories(path):
app = 'wp.t' + app
for (version, version_path) in subdirectories(app_path):
filtered_walk(version_path, app, version)
elif app.endswith('-plugins'):
for (app, app_path) in subdirectories(path):
app = 'wp.p' + app
trunk_path = os.path.join(app_path, 'trunk')
tags_path = os.path.join(app_path, 'tags')
if os.path.isdir(trunk_path):
filtered_walk(trunk_path, app, 'trunk')
if os.path.isdir(tags_path):
for (version, version_path) in subdirectories(tags_path):
filtered_walk(version_path, app, version)
# local testing only branch!
# else:
# for (version, version_path) in subdirectories(path):
# filtered_walk(version_path, app, version)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('scanner.py path_to_scan [already_scanned_av_csv]', file=sys.stderr)
elif len(sys.argv) == 2:
main(sys.argv[1], None)
elif len(sys.argv) > 2:
main(sys.argv[1], sys.argv[2])
|
nilq/baby-python
|
python
|
import logging
from pathlib import Path
import whoosh
from whoosh import qparser
from whoosh.filedb.filestore import FileStorage
from whoosh.index import EmptyIndexError
from whoosh.query import Every
from django.conf import settings
from mayan.apps.common.utils import any_to_bool, parse_range
from mayan.apps.lock_manager.backends.base import LockingBackend
from mayan.apps.lock_manager.exceptions import LockError
from ..classes import SearchBackend, SearchModel
from ..exceptions import DynamicSearchRetry
from ..settings import setting_results_limit
from .literals import (
DJANGO_TO_WHOOSH_FIELD_MAP, TEXT_LOCK_INSTANCE_DEINDEX,
TEXT_LOCK_INSTANCE_INDEX, WHOOSH_INDEX_DIRECTORY_NAME,
)
logger = logging.getLogger(name=__name__)
class WhooshSearchBackend(SearchBackend):
field_map = DJANGO_TO_WHOOSH_FIELD_MAP
def __init__(self, **kwargs):
index_path = kwargs.pop('index_path', None)
writer_limitmb = kwargs.pop('writer_limitmb', 128)
writer_multisegment = kwargs.pop('writer_multisegment', False)
writer_procs = kwargs.pop('writer_procs', 1)
super().__init__(**kwargs)
self.index_path = Path(
index_path or Path(settings.MEDIA_ROOT, WHOOSH_INDEX_DIRECTORY_NAME)
)
if writer_limitmb:
writer_limitmb = int(writer_limitmb)
if writer_multisegment:
writer_multisegment = any_to_bool(value=writer_multisegment)
if writer_procs:
writer_procs = int(writer_procs)
self.writer_kwargs = {
'limitmb': writer_limitmb, 'multisegment': writer_multisegment,
'procs': writer_procs
}
def _get_status(self):
result = []
title = 'Whoosh search model indexing status'
result.append(title)
result.append(len(title) * '=')
for search_model in SearchModel.all():
index = self.get_or_create_index(search_model=search_model)
search_results = index.searcher().search(Every('id'))
result.append(
'{}: {}'.format(
search_model.label, search_results.estimated_length()
)
)
return '\n'.join(result)
def _initialize(self):
self.index_path.mkdir(exist_ok=True)
def _search(
self, query, search_model, user, global_and_search=False,
ignore_limit=False
):
index = self.get_or_create_index(search_model=search_model)
id_list = []
with index.searcher() as searcher:
search_string = []
for key, value in query.items():
search_string.append(
'{}:({})'.format(key, value)
)
global_logic_string = ' AND ' if global_and_search else ' OR '
search_string = global_logic_string.join(search_string)
logger.debug('search_string: %s', search_string)
parser = qparser.QueryParser(
fieldname='_', schema=index.schema
)
parser.remove_plugin_class(cls=qparser.WildcardPlugin)
parser.add_plugin(pin=qparser.PrefixPlugin())
whoosh_query = parser.parse(text=search_string)
if ignore_limit:
limit = None
else:
limit = setting_results_limit.value
results = searcher.search(q=whoosh_query, limit=limit)
logger.debug('results: %s', results)
for result in results:
id_list.append(result['id'])
return search_model.get_queryset().filter(
id__in=id_list
).distinct()
def clear_search_model_index(self, search_model):
schema = self.get_search_model_schema(search_model=search_model)
# Clear the model index.
self.get_storage().create_index(
indexname=search_model.get_full_name(), schema=schema
)
def deindex_instance(self, instance):
try:
lock = LockingBackend.get_backend().acquire_lock(
name=TEXT_LOCK_INSTANCE_DEINDEX
)
except LockError:
raise
else:
try:
search_model = SearchModel.get_for_model(instance=instance)
index = self.get_or_create_index(search_model=search_model)
with index.writer(**self.writer_kwargs) as writer:
writer.delete_by_term('id', str(instance.pk))
finally:
lock.release()
def get_or_create_index(self, search_model):
storage = self.get_storage()
schema = self.get_search_model_schema(search_model=search_model)
try:
# Explicitly specify the schema. Allows using existing index
# when the schema changes.
index = storage.open_index(
indexname=search_model.get_full_name(), schema=schema
)
except EmptyIndexError:
index = storage.create_index(
indexname=search_model.get_full_name(), schema=schema
)
return index
def get_search_model_schema(self, search_model):
field_map = self.get_resolved_field_map(search_model=search_model)
schema_kwargs = {key: value['field'] for key, value in field_map.items()}
return whoosh.fields.Schema(**schema_kwargs)
def get_storage(self):
return FileStorage(path=self.index_path)
def index_instance(self, instance, exclude_model=None, exclude_kwargs=None):
try:
lock = LockingBackend.get_backend().acquire_lock(
name=TEXT_LOCK_INSTANCE_INDEX
)
except LockError:
raise
else:
try:
search_model = SearchModel.get_for_model(instance=instance)
index = self.get_or_create_index(search_model=search_model)
with index.writer(**self.writer_kwargs) as writer:
kwargs = search_model.populate(
backend=self, instance=instance,
exclude_model=exclude_model,
exclude_kwargs=exclude_kwargs
)
try:
writer.delete_by_term('id', str(instance.pk))
writer.add_document(**kwargs)
except Exception as exception:
logger.error(
'Unexpected exception while indexing object '
'id: %(id)s, search model: %(search_model)s, '
'index data: %(index_data)s, raw data: '
'%(raw_data)s, field map: %(field_map)s; '
'%(exception)s' % {
'exception': exception,
'field_map': self.get_resolved_field_map(
search_model=search_model
),
'id': instance.pk,
'index_data': kwargs,
'raw_data': instance.__dict__,
'search_model': search_model.get_full_name()
}, exc_info=True
)
raise
except whoosh.index.LockError:
raise DynamicSearchRetry
finally:
lock.release()
def index_search_model(self, search_model, range_string=None):
queryset = search_model.get_queryset()
queryset = search_model.get_queryset()
if range_string:
queryset = queryset.filter(
pk__in=list(parse_range(range_string=range_string))
)
for instance in queryset:
self.index_instance(instance=instance)
def reset(self, search_model=None):
self.tear_down(search_model=search_model)
self.update_mappings(search_model=search_model)
def tear_down(self, search_model=None):
if search_model:
search_models = (search_model,)
else:
search_models = SearchModel.all()
for search_model in search_models:
self.clear_search_model_index(search_model=search_model)
def update_mappings(self, search_model=None):
if search_model:
search_models = (search_model,)
else:
search_models = SearchModel.all()
for search_model in search_models:
self.get_or_create_index(search_model=search_model)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
from collections import defaultdict
class BoundDictionary(defaultdict):
"""
A default dictionary where elements are tightly coupled.
The factory method is responsible for binding the parent object to the child.
If a reference attribute is assigned then child objects will have the key assigned to this.
Otherwise it's just a defaultdict.
"""
def __init__(self, reference=None, *args, **kw):
self.reference = reference
super(BoundDictionary, self).__init__(*args, **kw)
def __getitem__(self, key):
value = super(BoundDictionary, self).__getitem__(key)
if self.reference is not None:
setattr(value, self.reference, key)
return value
|
nilq/baby-python
|
python
|
import os
import random
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from utils import loadWord2Vec
from math import log
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.svm import LinearSVC
# build corpus
dataset = '20ng'
# shulffing
doc_name_list = []
doc_train_list = []
doc_test_list = []
f = open('data/' + dataset + '.txt', 'r')
for line in f.readlines():
doc_name_list.append(line.strip())
temp = line.split("\t")
if temp[1].find('test') != -1:
doc_test_list.append(line.strip())
elif temp[1].find('train') != -1:
doc_train_list.append(line.strip())
f.close()
doc_content_list = []
f = open('data/corpus/' + dataset + '.clean.txt', 'r')
for line in f.readlines():
doc_content_list.append(line.strip())
f.close()
train_ids = []
for train_name in doc_train_list:
train_id = doc_name_list.index(train_name)
train_ids.append(train_id)
print(train_ids)
random.shuffle(train_ids)
# partial labeled data
f = open('data/' + dataset + '.train.index', 'r')
lines = f.readlines()
f.close()
train_ids = [int(x.strip()) for x in lines]
#train_ids = train_ids[:int(0.2 * len(train_ids))]
test_ids = []
for test_name in doc_test_list:
test_id = doc_name_list.index(test_name)
test_ids.append(test_id)
print(test_ids)
random.shuffle(test_ids)
ids = train_ids + test_ids
print(ids)
print(len(ids))
train_size = len(train_ids)
val_size = int(0.1 * train_size)
real_train_size = train_size - val_size
shuffle_doc_name_list = []
shuffle_doc_words_list = []
for id in ids:
shuffle_doc_name_list.append(doc_name_list[int(id)])
shuffle_doc_words_list.append(doc_content_list[int(id)])
tfidf_vec = TfidfVectorizer() #max_features=50000
tfidf_matrix = tfidf_vec.fit_transform(shuffle_doc_words_list)
print(tfidf_matrix)
#tfidf_matrix_array = tfidf_matrix.toarray()
# BOW TFIDF + LR
#train_x = []
train_y = []
#test_x = []
test_y = []
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split(' ')
doc_meta = shuffle_doc_name_list[i]
temp = doc_meta.split('\t')
label = temp[2]
if i < train_size:
#train_x.append(tfidf_matrix_array[i])
train_y.append(label)
else:
#test_x.append(tfidf_matrix_array[i])
test_y.append(label)
#clf = svm.SVC(decision_function_shape='ovr', class_weight="balanced",kernel='linear')
#clf = LinearSVC(random_state=0)
clf = LogisticRegression(random_state=1)
clf.fit(tfidf_matrix[:train_size], train_y)
predict_y = clf.predict(tfidf_matrix[train_size:])
correct_count = 0
for i in range(len(test_y)):
if predict_y[i] == test_y[i]:
correct_count += 1
accuracy = correct_count * 1.0 / len(test_y)
print(dataset, accuracy)
print("Precision, Recall and F1-Score...")
print(metrics.classification_report(test_y, predict_y, digits=4))
|
nilq/baby-python
|
python
|
# Clase 4. Curso Píldoras Informáticas.
print(5 + 6)
print(10 % 3)
print(5 ** 3)
print(9 / 2)
print(9 // 2)
Nombre = 5
print(type(Nombre))
Nombre = 5.4
print(type(Nombre))
Nombre = "John"
print(type(Nombre))
Mensaje = """ Esto es un
mensaje utilizando comilla
triple. Sirve para dar todos
los saltos de líneas que te vengan
en gana."""
print(Mensaje)
Number1 = 4
Number2 = 6
if Number2>Number1:
print(Number2, "es mayor que", Number1)
else:
print(Number2, "es menor o igual que", Number1)
|
nilq/baby-python
|
python
|
#
# @lc app=leetcode.cn id=382 lang=python3
#
# [382] 链表随机节点
#
# https://leetcode-cn.com/problems/linked-list-random-node/description/
#
# algorithms
# Medium (57.03%)
# Likes: 66
# Dislikes: 0
# Total Accepted: 6K
# Total Submissions: 10.6K
# Testcase Example: '["Solution","getRandom"]\n[[[1,2,3]],[]]'
#
# 给定一个单链表,随机选择链表的一个节点,并返回相应的节点值。保证每个节点被选的概率一样。
#
# 进阶:
# 如果链表十分大且长度未知,如何解决这个问题?你能否使用常数级空间复杂度实现?
#
# 示例:
#
#
# // 初始化一个单链表 [1,2,3].
# ListNode head = new ListNode(1);
# head.next = new ListNode(2);
# head.next.next = new ListNode(3);
# Solution solution = new Solution(head);
#
# // getRandom()方法应随机返回1,2,3中的一个,保证每个元素被返回的概率相等。
# solution.getRandom();
#
#
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def __init__(self, head: ListNode):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
"""
self.head = head
def getRandom(self) -> int:
"""
Returns a random node's value.
"""
import random
i = 0
res = 0
p = self.head
while p:
r = random.randint(0,i)
if r == 0:
res = p.val
i = i + 1
p = p.next
return res
# Your Solution object will be instantiated and called as such:
# obj = Solution(head)
# param_1 = obj.getRandom()
# @lc code=end
|
nilq/baby-python
|
python
|
from setuptools import setup
with open("README.md") as f:
readme = f.read()
with open("LICENSE") as f:
license = f.read()
setup(
name="PERFORM",
version=0.1,
author="Christopher R. Wentland",
author_email="chriswen@umich.edu",
url="https://github.com/cwentland0/pyGEMS_1D",
description="One-dimension reacting flow for ROM prototyping",
long_description=readme,
license=license,
install_requires=["numpy>=1.16.6", "scipy>=1.1.0", "matplotlib>=2.1.0"],
entry_points={"console_scripts": ["perform = perform.driver:main"]},
python_requires=">=3.6",
)
|
nilq/baby-python
|
python
|
class ProxyError(StandardError):
def __init__(self, title, message):
super(ProxyError, self).__init__()
self.title = title
self.message = message
self.error = "Error"
def __str__(self):
return "%s => %s:%s" % (self.error, self.title, self.message)
class ResourceError(ProxyError):
def __init__(self, title, message):
super(ResourceError, self).__init__(title, message)
self.error = "Resource Error"
class RequestError(ProxyError):
def __init__(self, title, message):
super(RequestError, self).__init__(title, message)
self.error = "Request Error"
class HTTPResponseMarble(object):
def __init__(self, *k, **p):
self.__dict__['status'] = u'200 OK'
self.__dict__['status_format'] = u'unicode'
self.__dict__['header_list'] = \
[dict(name=u'Content-Type', value=u'text/html; charset=utf8')]
self.__dict__['header_list_format'] = u'unicode'
self.__dict__['body'] = []
self.__dict__['body_format'] = u'unicode'
def __setattr__(self, name, value):
if name not in self.__dict__:
raise AttributeError('No such attribute %s'%name)
self.__dict__[name] = value
|
nilq/baby-python
|
python
|
__author__ = 'wanghao'
# import threading
import sys
import socket
from struct import *
import time
import threading
def run_flow(dst_ip, port, size):
def run(dst_ip, port, size):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# data = os.urandom(size)
data = pack('c', 'a')
try:
sock.connect((dst_ip, port))
size_left = size
while size_left:
if size_left > 200000000:
sock.sendall(data*200000000)
size_left -= 200000000
else:
sock.sendall(data*size_left)
size_left = 0
except socket.timeout:
print "Connection Timeout!"
except socket.error, e:
print e
finally:
sock.close()
t = threading.Thread(target=run(dst_ip, port, size))
t.start()
t.join()
print "Done"
#run(dst_ip, port, size)
if __name__ == '__main__':
dst_ip = sys.argv[1]
port = int(sys.argv[2])
size = int(float(sys.argv[3]))
fd = open("fct.txt", 'w')
#print "Flow Size:", size
fd.write("Flow Size %d " % size)
start_t = time.clock()
#print "Start:", time.strftime("%M:%S")
fd.write("Start: %s " % time.strftime("%M:%S"))
run_flow(dst_ip, port, size)
end_t = time.clock()
#print "End:", time.strftime("%M:%S")
fd.write("End: %s " % time.strftime("%M:%S"))
print "Duration:", end_t - start_t
fd.write("Duration: %f \r\n" % (end_t - start_t))
fd.close()
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import os
# Implementing relu()
def relu(z):
if z > 0:
return z
else:
return 0
# Define linewidth and fontsize to control the aesthetics of the plots easily
linewidth = 4
fontsize = 20
# Define a range of values for the inputs of relu(z)
z_range = np.arange(-5,5, 0.01)
plt.figure(figsize=(16,9))
# For each z in x_range compute relu(z)
y_relu = [relu(z) for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth= linewidth, label='Relu(z)')
plt.ylim(-5, 5)
plt.xlim(-5, 5)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.show()
def grad_relu(z):
if z > 0:
return 1
else:
return 0
### The gradients of relu
y_relu = [relu(z) for z in z_range]
grad_y_relu = [grad_relu(z) for z in z_range]
plt.figure(figsize=(16, 9))
# The relu
plt.subplot(1,2,1)
plt.plot(z_range, y_relu, c='b',linewidth= linewidth, label='Relu(z)')
plt.legend(fontsize=fontsize,loc=2)
plt.grid()
### The gradients of relu
plt.subplot(1,2,2)
plt.plot(z_range, grad_y_relu, c='r',linewidth= linewidth, label='d Relu(z)/dz')
plt.legend(fontsize=fontsize,loc=2)
plt.grid()
plt.show()
# Demonstrating the flexibility of relu: relu(z),relu(-z),-relu(z),-relu(-z)
z_range = np.arange(-5,5, 0.01)
plt.figure(figsize=(16,9))
plt.suptitle('The Flexibility of Relu(z)', fontsize=fontsize)
plt.subplot(2,2,1)
y_relu = [relu(z) for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth= linewidth, label='Relu(z)')
plt.ylim(-5,5)
plt.xlim(-5,5)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.subplot(2,2,2)
y_relu = [relu(-z) for z in z_range]
plt.plot(z_range, y_relu, c='k', linewidth= linewidth,label='Relu(-z)')
plt.ylim(-5,5)
plt.xlim(-5,5)
plt.legend(fontsize=fontsize,loc=1)
plt.grid()
plt.subplot(2,2,3)
y_relu = [-relu(z) for z in z_range]
plt.plot(z_range, y_relu, c='r', linewidth= linewidth,label='-Relu(z)')
plt.ylim(-5,5)
plt.xlim(-5,5)
plt.legend(fontsize=fontsize,loc=2)
plt.grid()
plt.subplot(2,2,4)
y_relu = [-relu(-z) for z in z_range]
plt.plot(z_range, y_relu, c='g', linewidth= linewidth,label='-Relu(-z)')
plt.ylim(-5,5)
plt.xlim(-5,5)
plt.legend(fontsize=fontsize,loc=1)
plt.grid()
plt.show()
# The rotation of the slope in relu
w_range = np.arange(0.5, 3.5, 0.5)
plt.figure(figsize=(16, 9))
plt.suptitle('Changing the slope of Relu(w*z) using a coefficient w', fontsize=fontsize)
for idx, w in enumerate(w_range):
plt.subplot(2,3,idx+1)
y_relu = [relu(w*z) for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth=linewidth, label='w = %.2f' % w)
plt.ylim(-1, 5)
plt.xlim(-5, 5)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.show()
# Shifting the relu horizontally
bias = np.arange(0.5, 3.5, 0.5)
plt.figure(figsize=(16, 9))
plt.suptitle('Shifting Relu(z+b) horizontally using a bias term b inside Relu()', fontsize=fontsize)
for idx, b in enumerate(bias):
plt.subplot(2,3, idx+1)
y_relu = [relu(z+b) for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth=linewidth, label='b = %.2f' % b)
plt.ylim(-1, 5)
plt.xlim(-4, 4)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.show()
# Shifting the relu vertically
bias = np.arange(0.5, 3.5, 0.5)
plt.figure(figsize=(16, 9))
plt.suptitle('Shifting Relu(z) + b vertically using a bias term b outside Relu()', fontsize=fontsize)
for idx, b in enumerate(bias):
plt.subplot(2,3, idx+1)
y_relu = [relu(z)+b for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth=linewidth, label='b = %.2f' % b)
plt.ylim(-1, 5)
plt.xlim(-4, 4)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.show()
# Defining the data and the ground-truth
x = torch.unsqueeze(torch.linspace(-10, 10, 300), dim=1)
y = x.pow(3)
# Setting the available device
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
print("Device", device)
# Build a regression model clas
class Regressor(nn.Module):
def __init__(self, n_hidden=2):
super(Regressor, self).__init__()
self.hidden = torch.nn.Linear(1, n_hidden) # hidden layer
self.predict = torch.nn.Linear(n_hidden, 1) # output layer
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.predict(x)
return x
# number of relu() units
n_hidden = 7
# total number of epochs
n_epochs = 4000
# Building an object from the regressor class while passing
# n_hidden and setting the model to train() mode
regressor = Regressor(n_hidden=n_hidden).train()
# Defining the optimizer
optimizer = torch.optim.SGD(regressor.parameters(), lr=0.0001)
# Defining MSE as the appropriate los function
# For regression.
loss_func = torch.nn.MSELoss()
plt.figure(figsize=(16, 9))
for epoch in range(n_epochs):
# Put the model in training mode
regressor.train()
# This is there to clear the previous plot in the animation
# After each epoch
plt.clf()
# input x to the regressor and receive the predicion
y_hat = regressor(x)
# Compute the loss between y_hat and the actual
# Value of the ground-truth curve, y
loss = loss_func(y_hat, y)
# Compute the gradients w.r.t all the parameters
loss.backward()
# Update the parameters
optimizer.step()
# Zero out all the gradients before inputing the next data point
# Into the regressor model
optimizer.zero_grad()
# Every 100 epoch evaluate do some plotting
if epoch % 100 == 0:
print('Epoch %d --- Loss %.5f' % (epoch+1, loss.data.numpy()))
# Bbefore evaluation, put the model back to evaluation mode
regressor.eval()
# At this very moment of training, grab the current biases and weights
# From the model object, namely, b_0, b_1, w_0, and w_1
biases_0 = regressor.hidden.bias.cpu().detach().numpy()
weights_0 = regressor.hidden.weight.squeeze(0).cpu().detach().numpy()
biases_1 = regressor.predict.bias.cpu().detach().numpy() # This has ONLY 1 value
weights_1 = regressor.predict.weight.squeeze(0).cpu().detach().numpy()
# For the purpose of plotting consider the current range of
# x as the inputs to EACH relu() individualy
data = x.detach().numpy()
# This will hold the UNLIMATE
# prediction, that is, relu(input*w_0+b_0)*w_1 + b_1
# We reset it before plotting the current status of the model
# And the learned relu() functions
sum_y_relu = []
# For each relu() unit do the following
for idx in range(n_hidden):
plt.suptitle('Epoch=%d --- MSE loss= %.2f' % (epoch+1, loss.data.numpy()), fontsize=fontsize)
# Plot output of the current relu() unit
plt.subplot(1,3,1)
plt.title('Relu(w_0*x + b_0)', fontsize=fontsize)
y_relu = [relu(d*weights_0[idx]+biases_0[idx]) for d in data]
plt.plot(data, y_relu)
plt.ylim(-1,40)
plt.grid()
plt.subplot(1, 3, 2)
# Plot output of the current relu(), multiplied by its
# corresponding weight, w_1, and summed with the bias b_1
plt.title('Relu(w_0*x + b_0)*w_1 + b_1',fontsize=fontsize)
y_relu = [relu(d*weights_0[idx]+biases_0[idx])*weights_1[idx] + biases_1[0] for d in data]
plt.plot(data,y_relu)
plt.ylim(-500,900)
plt.grid()
# Kee adding the Relu(w_0*x + b_0)*w_1 + b_1 for each relu to the
# sum_y_relu list. We will sum them up later to plot
# The ULTIMATE predction of the model y_hat
sum_y_relu.append([relu(d*weights_0[idx]+biases_0[idx])*weights_1[idx] + biases_1[0] for d in data])
# Sum it all up
sum_y_relu = np.sum(np.array(sum_y_relu),axis=0)
plt.subplot(1, 3, 3)
plt.title('y_hat)', fontsize=fontsize)
plt.plot(x.data.numpy(), y.data.numpy(), color="k", label='Ground-truth')
plt.plot(data,sum_y_relu, c='r', label='Prediction')
plt.legend()
plt.grid()
# A slight delay in the animation
plt.pause(0.1)
|
nilq/baby-python
|
python
|
'''
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
'''
# Write your code here
def right(a,b):
r=a
for i in range(b):
r=r[-1:]+r[:-1]
r=''.join(r)
return int(r)
def left(a,b):
r=a
for i in range(b):
r.append(a.pop(0))
r=''.join(r)
return int(r)
def btod(n):
if n==0:
return 0
return n%10+2*(btod(n//10))
for i in range(int(input())):
a,b,c=input().split()
a=int(a)
b=int(b)
a=bin(a)
a=a[2:]
a=str(a)
if (16-len(a))!=0:
a="0"*(16-len(a))+a
a=list(a)
if c=='L':
res=left(a,b)
res=btod(res)
print(res)
if c=='R':
res=right(a,b)
res=btod(res)
print(res)
|
nilq/baby-python
|
python
|
import mykde
"""
If a font in the browser is not Droid, in Google Chrome right click on the text
with the wrong font, select 'Inspect element', find 'Computed style' and
'font-family' in it:
font-family: 'lucida grande', tahoma, verdana, arial, sans-serif;
And for each font do 'fc=match':
$ fc-match Helvetica
LiberationSans-Regular.ttf: "Liberation Sans" "Regular"
Ok, you found the offending font. Add it to 'fonts.conf' file.
"""
class Action(mykde.BaseAction):
name = 'Droid fonts everywhere'
description = """
Droid fonts are used everywhere possible, because they render very nice.<br>
In browser they should replace Verdana, Arial and other MS fonts.<br>
<br>
<img src="screenshot.png"/>
"""
packages = ['fonts-droid']
affects = [mykde.KdeSettings]
def proceed(self):
self.update_kconfig('./kdeglobals', '~/.kde/share/config/kdeglobals')
self.copy_file('./fonts.conf', '~/.config/fontconfig/')
self.delete_file('~/.fonts.conf')
# self.create_symlink('~/.config/fontconfig/fonts.conf', '~/.fonts.conf') # in 12.04 only this works
def override_font(self, font, override):
"""Add necessary nodes to fonts.conf """
raise NotImplemented
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 25 11:25:36 2017
@author: azkei
We Briefly covered operations between two data structures last file.
We will cover how arithmetic operators apply between two or more structured here
using Flexible Arithmetic Methods such as
add(), sub(),div(),mul()
"""
# 1. Flexible Arithmetic Methods
# Addition
frame1.add(frame2)
# Subtraction
frame1.sub(frame2)
# Division
frame1.div(frame2)
# Multiplication
frame1.mul(frame2)
# As you can see there is NaN's on values that have not be operated on.
# 2. Operations between DataFrame and Series
# Generate a 4x4 DF with range 0-15
frame = pd.DataFrame(np.arange(16).reshape((4,4)),
index=['red','blue','yellow','white'],
columns=['ball','pen','pencil','paper'])
# Generate Series, values 0-4
ser = pd.Series(np.arange(4),index=['ball','pen','pencil','paper'])
ser
# Subtract Series in DataFrame
# The frame will subtract based on the common indexes the two Structures have
frame - ser
# If the index is not present, the result will have elements with NaN
ser['mug'] = 9
frame - ser
|
nilq/baby-python
|
python
|
from django.utils import timezone
import pytz
class TimeZoneMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
tzname = request.session.get("time_zone")
if tzname:
timezone.activate(pytz.timezone(tzname))
else:
timezone.deactivate()
return self.get_response(request)
|
nilq/baby-python
|
python
|
from podcast.views import home_page
from django.urls import path
from . import views
app_name = "podcast"
urlpatterns = [
path('', views.home_page, name="home-page"),
path('podcast/',views.podcast_list, name="podcast-list"),
path('podcast/search/',views.search_podcast, name="podcast-search"),
path('podcast/create/',views.create_podcast, name="create-podcast"),
path('podcast/detail/<int:pk>/',views.podcast_detail, name="podcast-detail"),
# Articles
path('article/',views.article_list, name="article-list"),
path('article/search/',views.search_article, name="article-search"),
path('article/create/',views.create_article, name="create-article"),
path('article/detail/<int:pk>/',views.article_detail, name="article-detail"),
# News
path('news/',views.news_list, name="news-list"),
path('news/search/',views.search_news, name="news-search"),
path('news/create/',views.create_news, name="create-news"),
path('news/detail/<int:pk>/',views.news_detail, name="news-detail"),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration."""
from __future__ import absolute_import, print_function
import os
import shutil
import tempfile
import uuid
from zipfile import ZipFile
import pytest
from flask import Flask
from flask_babelex import Babel
from flask_webpackext import current_webpack
from invenio_assets import InvenioAssets
from invenio_config import InvenioConfigDefault
from invenio_db import InvenioDB
from invenio_db import db as db_
from invenio_files_rest import InvenioFilesREST
from invenio_files_rest.models import Location, ObjectVersion
from invenio_formatter import InvenioFormatter
from invenio_pidstore.providers.recordid import RecordIdProvider
from invenio_records import InvenioRecords
from invenio_records_files.api import Record
from invenio_records_ui import InvenioRecordsUI
from invenio_records_ui.views import create_blueprint_from_app
from six import BytesIO
from sqlalchemy_utils.functions import create_database, database_exists
from invenio_previewer import InvenioPreviewer
@pytest.yield_fixture(scope='session', autouse=True)
def app():
"""Flask application fixture with database initialization."""
instance_path = tempfile.mkdtemp()
app_ = Flask(
'testapp', static_folder=instance_path, instance_path=instance_path)
app_.config.update(
TESTING=True,
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///:memory:'),
SQLALCHEMY_TRACK_MODIFICATIONS=True,
RECORDS_UI_DEFAULT_PERMISSION_FACTORY=None,
RECORDS_UI_ENDPOINTS=dict(
recid=dict(
pid_type='recid',
route='/records/<pid_value>',
template='invenio_records_ui/detail.html',
),
recid_previewer=dict(
pid_type='recid',
route='/records/<pid_value>/preview',
view_imp='invenio_previewer.views:preview',
record_class='invenio_records_files.api:Record',
),
recid_files=dict(
pid_type='recid',
route='/record/<pid_value>/files/<filename>',
view_imp='invenio_records_files.utils.file_download_ui',
record_class='invenio_records_files.api:Record',
),
),
SERVER_NAME='localhost',
APP_THEME=['semantic-ui']
)
Babel(app_)
InvenioAssets(app_)
InvenioDB(app_)
InvenioRecords(app_)
InvenioConfigDefault(app_)
InvenioFormatter(app_)
InvenioPreviewer(app_)._state
InvenioRecordsUI(app_)
app_.register_blueprint(create_blueprint_from_app(app_))
InvenioFilesREST(app_)
with app_.app_context():
yield app_
shutil.rmtree(instance_path)
@pytest.yield_fixture()
def db(app):
"""Setup database."""
if not database_exists(str(db_.engine.url)):
create_database(str(db_.engine.url))
db_.create_all()
yield db_
db_.session.remove()
db_.drop_all()
@pytest.yield_fixture(scope='session')
def webassets(app):
"""Flask application fixture with assets."""
initial_dir = os.getcwd()
os.chdir(app.instance_path)
# force theme.config alias pinting to less/invenio_theme/theme.config
theme_bundle = current_webpack.project.bundles[0]
theme_bundle.aliases['../../theme.config'] = \
'less/invenio_theme/theme.config'
current_webpack.project.buildall()
yield app
os.chdir(initial_dir)
@pytest.yield_fixture()
def location(db):
"""File system location."""
tmppath = tempfile.mkdtemp()
loc = Location(
name='testloc',
uri=tmppath,
default=True
)
db.session.add(loc)
db.session.commit()
yield loc
shutil.rmtree(tmppath)
@pytest.fixture()
def record(db, location):
"""Record fixture."""
rec_uuid = uuid.uuid4()
provider = RecordIdProvider.create(
object_type='rec', object_uuid=rec_uuid)
record = Record.create({
'control_number': provider.pid.pid_value,
'title': 'TestDefault',
}, id_=rec_uuid)
db.session.commit()
return record
@pytest.fixture()
def record_with_file(db, record, location):
"""Record with a test file."""
testfile = ObjectVersion.create(record.bucket, 'testfile',
stream=BytesIO(b'atest'))
record.update(dict(
_files=[dict(
bucket=str(testfile.bucket_id),
key=testfile.key,
size=testfile.file.size,
checksum=str(testfile.file.checksum),
version_id=str(testfile.version_id),
), ]
))
record.commit()
db.session.commit()
return record, testfile
@pytest.fixture()
def zip_fp(db):
"""ZIP file stream."""
fp = BytesIO()
zipf = ZipFile(fp, 'w')
zipf.writestr('Example.txt', 'This is an example'.encode('utf-8'))
zipf.writestr(u'Lé UTF8 test.txt', 'This is an example'.encode('utf-8'))
zipf.close()
fp.seek(0)
return fp
|
nilq/baby-python
|
python
|
import requests
from bs4 import BeautifulSoup
from .handler import add_handler
@add_handler(r'http(s?)://drops.dagstuhl.de/(\w+)')
def download(url):
metadata = dict()
metadata['importer'] = 'drops'
data = requests.get(url)
soup = BeautifulSoup(data.text, "lxml")
authortags = soup.find_all("meta", attrs={"name": "citation_author"})
metadata['authors'] = [i['content'].strip() for i in authortags ]
metadata['title'] = soup.find_all("meta", attrs={"name": "citation_title"})[0]['content']
metadata['url'] = soup.find_all("meta", attrs={"name": "citation_pdf_url"})[0]['content']
metadata['date'] = soup.find_all("meta", attrs={"name": "citation_date"})[0]['content']
metadata['abstract'] = soup.find(string='Abstract').findParent().findParent().text.replace('\nAbstract\n', '').strip()
metadata['venue'] = soup.find_all("meta", attrs={"name": "citation_conference_title"})[0]['content']
metadata['DOI'] = soup.find_all("meta", attrs={"name": "citation_doi"})[0]['content']
metadata['metaurl'] = url
metadata['uid'] = metadata['DOI']
metadata['keywords'] = soup.find("meta", attrs={"name": "DC.Subject", "scheme": "SWD"})['content'].split(',')
return metadata
|
nilq/baby-python
|
python
|
# reference page
# https://iric-solver-dev-manual-jp.readthedocs.io/ja/latest/06/03_reference.html
import sys
import iric
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LightSource
from scipy import signal, interpolate
import flow
class cgns():
def __init__(self, f):
self.fid = iric.cg_open(f, iric.CG_MODE_MODIFY)
iric.cg_iRIC_Init(self.fid)
# iric.cg_iRIC_InitRead(fid)
# set grid and arid attributes
ier = self.set_grid()
# set time series parameters
ier = self.set_time_parameters()
# set flow calculation parameters
ier = self.set_flow_parameters()
#--------------------------------------------------
# set grid
#--------------------------------------------------
def set_grid(self):
ier = 0
self.ni, self.nj = iric.cg_iRIC_GotoGridCoord2d()
x, y = iric.cg_iRIC_GetGridCoord2d()
z = iric.cg_iRIC_Read_Grid_Real_Node('Elevation')
s = iric.cg_iRIC_Read_Grid_Real_Cell('roughness_cell')
xx = x.reshape(self.nj, self.ni)
yy = y.reshape(self.nj, self.ni)
zz = z.reshape(self.nj, self.ni)
ss = s.reshape(self.nj-1, self.ni-1)
# 2d plot
# fig, ax = plt.subplots()
# ax.contourf(xx, yy, zz, 20)
# 3d plot
# fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))
# ls = LightSource(270, 45)
# rgb = ls.shade(zz, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
# surf = ax.plot_surface(xx, yy, zz, rstride=1, cstride=1, facecolors=rgb,
# linewidth=0, antialiased=False, shade=False)
# plt.show()
self.xx = xx
self.yy = yy
self.zz = zz
self.ss = ss
return ier
#--------------------------------------------------
# set time series parameters
#--------------------------------------------------
def set_time_parameters(self):
ier = 0
#流量条件
t_series = iric.cg_iRIC_Read_FunctionalWithName('discharge_waterlevel', 'time')
q_series = iric.cg_iRIC_Read_FunctionalWithName('discharge_waterlevel', 'discharge')
#計算時間の設定
if iric.cg_iRIC_Read_Integer('i_sec_hour') == 2:
t_series = t_series*3600.
t_start = t_series[0]
t_end = t_series[len(t_series)-1]
t_out = iric.cg_iRIC_Read_Real('tuk')
# class変数
self.t_series = t_series
self.q_series = q_series
self.dt = iric.cg_iRIC_Read_Real('dt')
self.istart = int(t_start / self.dt)
self.iend = int(t_end / self.dt) + 1
self.iout = int(t_out / self.dt)
return ier
#--------------------------------------------------
# set flow calculation parameters
#--------------------------------------------------
def set_flow_parameters(self):
ier = 0
self.cip = iric.cg_iRIC_Read_Integer('j_cip')
self.conf = iric.cg_iRIC_Read_Integer('j_conf')
return ier
#--------------------------------------------------
# write calculation result
#--------------------------------------------------
def write_calc_result(self, ctime, flw):
ier = 0
# # write time
iric.cg_iRIC_Write_Sol_Time(ctime)
# # write discharge
qq = self.get_upstream_q(ctime)
iric.cg_iRIC_Write_Sol_BaseIterative_Real('Discharge', qq)
# # write grid
iric.cg_iRIC_Write_Sol_GridCoord2d(self.xx.reshape(-1), self.yy.reshape(-1))
# # write node values
# iric.cg_iRIC_Write_Sol_Integer("Elevation", self.zz.reshape(-1))
iric.cg_iRIC_Write_Sol_Real("Elevation", self.zz.reshape(-1))
iric.cg_iRIC_Write_Sol_Real("VelocityX", flw.uu.reshape(-1))
iric.cg_iRIC_Write_Sol_Real("VelocityY", flw.vv.reshape(-1))
# # write cell values
# iric.cg_iRIC_Write_Sol_Cell_Integer("Manning_S", self.ss.reshape(-1))
iric.cg_iRIC_Write_Sol_Cell_Real("ManningN_c", self.ss.reshape(-1))
iric.cg_iRIC_Write_Sol_Cell_Real("Elevation_c", flw.zz.reshape(-1))
iric.cg_iRIC_Write_Sol_Cell_Real("Depth_c", flw.hs.reshape(-1))
iric.cg_iRIC_Write_Sol_Cell_Real("WaterLevel_c", flw.hh.reshape(-1))
# # write edge values
# iric.cg_iRIC_Write_Sol_IFace_Integer(label, val)
# iric.cg_iRIC_Write_Sol_IFace_Real(label, val)
# # write edge values
# iric.cg_iRIC_Write_Sol_JFace_Integer(label, val)
# iric.cg_iRIC_Write_Sol_JFace_Real(label, val)
return ier
def close(self):
ier = 0
iric.cg_close(self.fid)
return ier
#--------------------------------------------------
# set flow calculation parameters
#--------------------------------------------------
def get_upstream_q(self, t):
tt = self.t_series
qq = self.q_series
#いろいろな補間関数がある
#https://org-technology.com/posts/univariate-interpolation.html
func = interpolate.interp1d(tt, qq)
# func = interpolate.interp1d(tt, qq, kind="quadratic")
q = float(func(t))
# q = float(q.astype(np.float64))
# print(q)
# print(type(q))
return q
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.index, name='homePage'),
path('profile/<username>/', views.profile, name='profile'),
path('profile/<username>/update', views.edit_profile, name='update'),
path('category/<category>/', views.category, name='category'),
path('product/<id>', views.product, name='product'),
path('search/', views.search, name='search'),
path('cart', views.cart,name='cart'),
path('add_cart/<id>/', views.add, name='add_cart'),
path('remove_cart/<id>/', views.remove, name='remove'),
path('payment', views.payment,name='payment'),
path('success', views.success,name='success'),
]
|
nilq/baby-python
|
python
|
"""
Leetcode #300
"""
from typing import List
import bisect
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
inc = [float("inf")] * len(nums)
size = 0
for num in nums:
i = bisect.bisect_left(inc, num)
inc[i] = num
size = max(i+1, size)
return size
if __name__ == "__main__":
assert Solution().lengthOfLIS([10,9,2,5,3,7,101,18]) == 4
|
nilq/baby-python
|
python
|
import random
from multiprocessing import Pool
from gym_puyopuyo.agent import tree_search_actions
from gym_puyopuyo.state import State
def benchmark(depth, threshold, factor):
state = State(16, 8, 5, 3, tsu_rules=False)
total_reward = 0
for i in range(1000):
if i % 100 == 0:
print(i, "/ 1000")
actions = tree_search_actions(state, depth, occupation_threshold=threshold, factor=factor)
action = random.choice(actions)
reward = state.step(*state.actions[action])
total_reward += reward
if reward < 0:
return total_reward, True
return total_reward, False
if __name__ == "__main__":
argss = []
for t in [0.7, 0.75, 0.8, 0.85]:
for f in [15, 18, 20, 25]:
argss.append((3, t, f))
with Pool() as p:
results = p.starmap(benchmark, argss)
for result, args in zip(results, argss):
reward, died = result
if not died:
print(reward, args)
|
nilq/baby-python
|
python
|
import json
import csv
import sys
from pathlib import Path
import datetime
import pytest
import networkx as nx
from sb.aws_trace_analyzer import CSV_FIELDS, extract_trace_breakdown, longest_path, create_span_graph, duration, get_sorted_children, is_async_call, call_stack # noqa: E501
def test_get_sorted_children():
G = nx.DiGraph()
# Example inspired from the matrix multiplication app
# where two spans (sub1, sub2) have the same end_time (end) but
# sub1 just starts 1ms earlier (start1). Timeline: start1<end1=start2=end2
start1 = 1613573901.68
start2 = 1613573901.681
end = 1613573901.681
root_id = 'root_id'
sub1_id = 'sub1'
sub2_id = 'sub2'
G.add_node(root_id)
# Adding sub2 first
G.add_node('sub2', **{'doc': {'start_time': start2, 'end_time': end}})
G.add_edge(root_id, sub2_id)
# Adding sub1 second
G.add_node('sub1', **{'doc': {'start_time': start1, 'end_time': end}})
G.add_edge(root_id, sub1_id)
succ_ids = list(G.successors(root_id))
# Should have wrong order by default
assert succ_ids == [sub2_id, sub1_id]
assert get_sorted_children(G, root_id) == ['sub1', 'sub2']
def test_is_async_call_async():
parent = {'end_time': 1624353531.865}
child = {'end_time': 1624353532.865}
assert is_async_call(parent, child)
def test_is_async_call_sync():
parent = {'end_time': 1624353531.865}
child = {'end_time': 1624353531.865}
assert not is_async_call(parent, child)
def test_is_async_call_sync_with_margin():
"""Case of 999 microsecond margin.
Source: exp31/realworld-dynamodb-lambda/logs/2021-04-30_14-52-50"""
parent = {'end_time': 1624353531.865}
child = {'end_time': 1624353531.8654525}
assert not is_async_call(parent, child)
def test_is_async_call_sync_with_margin_larger():
"""Case of 1001 microsecond margin.
Source: exp31/faas-migration-go/aws/logs/2021-04-30_09-06-52"""
parent = {'end_time': 1619774396.626}
child = {'end_time': 1619774396.627}
assert not is_async_call(parent, child)
def test_is_async_call_async_with_margin():
"""Edge case beyond the 1001 microsecond margin.
It should detect an async call when exceeding the margin."""
parent = {'end_time': 1619774396.626}
child = {'end_time': 1619774396.6271}
assert is_async_call(parent, child)
def traces_path(app):
"""Returns the path to the traces.json for a given app name."""
tests_path = Path(__file__).parent.parent
sub_path = f"fixtures/aws_trace_analyzer/{app}/traces.json"
return (tests_path / sub_path).resolve()
def assert_trace_breakdown(t_path, expected_breakdown):
"""Compares the trace breakdown from a single trace against
a given expected_breakdown.
Caveat: Supports only a single trace"""
with open(t_path) as json_file:
trace = json.load(json_file)
trace_breakdown = extract_trace_breakdown(trace)
assert trace_breakdown == expected_breakdown
def print_csv(trace_breakdown):
"""Debugging helper that prints a trace breakdown as CSV output"""
trace_writer = csv.writer(sys.stdout, quoting=csv.QUOTE_MINIMAL)
headers = CSV_FIELDS
trace_writer.writerow(headers)
trace_writer.writerow(trace_breakdown)
def test_extract_trace_breakdown_thumbnail_app():
"""Tests the most studied execution of the thumbnail app.
See fixtures/thumbnail_app for additional visualizations.
"""
expected_breakdown = ['1-5fbcfc1f-4d2e9bed6dc0c41c39dfdb2f', 1606220832.0, 1606220846.963, datetime.timedelta(seconds=14, microseconds=963000), 'https://gcz7l3ixlb.execute-api.us-east-1.amazonaws.com/production/upload', 2, 0, 0, 0, ['AWS::ApiGateway::Stage', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['production-thumbnail-generator/production', 'Lambda', 'thumbnail-generator-production-upload', 'thumbnail-generator-production-upload', 'Initialization', 'S3', 'S3', 'S3', 'S3', 'thumbnail-generator-production-thumbnail-generator', 'Dwell Time', 'Attempt #1', 'thumbnail-generator-production-thumbnail-generator', 'Initialization', 'S3', 'S3', 'S3', 'S3'], datetime.timedelta(microseconds=86000), datetime.timedelta(seconds=1, microseconds=99000), datetime.timedelta(microseconds=771000), datetime.timedelta(seconds=4, microseconds=142000), datetime.timedelta(seconds=5, microseconds=501000), datetime.timedelta(microseconds=59000), None, datetime.timedelta(seconds=3, microseconds=305000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_breakdown_thumbnail_app_warm():
expected_breakdown = ['1-6049e32a-49e6a9866fc15c8e30479d09', 1615455019.055, 1615455027.436, datetime.timedelta(seconds=8, microseconds=381000), 'https://d574arqmjg.execute-api.us-east-1.amazonaws.com/prod/upload', 1, 0, 0, 0, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::ApiGateway::Stage', 'AWS::Lambda', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['prod-thumbnail-generator/prod', 'Lambda', 'thumbnail-generator-prod-upload', 'thumbnail-generator-prod-upload', 'S3', 'S3', 'thumbnail-generator-prod-thumbnail-generator', 'Dwell Time', 'Attempt #1', 'thumbnail-generator-prod-thumbnail-generator', 'Initialization', 'S3', 'S3', 'S3', 'S3'], datetime.timedelta(microseconds=40000), datetime.timedelta(seconds=1, microseconds=44000), datetime.timedelta(microseconds=345000), datetime.timedelta(seconds=1, microseconds=29000), datetime.timedelta(seconds=4, microseconds=310000), datetime.timedelta(microseconds=76000), None, datetime.timedelta(seconds=1, microseconds=537000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_warm')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_breakdown_thumbnail_app_missing_root():
"""
The root segment of the trace (6f58e0a0bce69065) is missing and created empty through
the parent_id of the child node (60f93765ebcf2a58). This invalidates the trace duration
because another node with the earliest start time is chosen as new root.
Source:
lg3/ec2-user/faas-migration/ThumbnailGenerator/Lambda/logs/2021-04-29_23-40-56
"""
# Potential partial result (if we plan to support this, missing external_services!)
expected_breakdown = ['1-608b4550-1929270a067637cfd701f545', 1619739984.606, 1619739985.731, datetime.timedelta(seconds=1, microseconds=125000), None, 0, 0, 0, 0, ['AWS::Lambda', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda::Function', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['thumbnail-generator-dev-upload', 'thumbnail-generator-dev-upload', 'S3', 'S3', 'thumbnail-generator-dev-thumbnail-generator', 'Dwell Time', 'Attempt #1', 'thumbnail-generator-dev-thumbnail-generator', 'S3', 'S3', 'S3', 'S3'], datetime.timedelta(microseconds=17000), datetime.timedelta(microseconds=800000), None, None, datetime.timedelta(microseconds=123000), datetime.timedelta(microseconds=49000), None, datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_missing_root')
with pytest.raises(Exception) as e:
assert_trace_breakdown(tp, expected_breakdown)
assert str(e.value) == 'Incomplete trace 1-608b4550-1929270a067637cfd701f545 because the parent node 6f58e0a0bce69065 of node 60f93765ebcf2a58 is empty.' # noqa: E501
def test_extract_trace_breakdown_thumbnail_app_in_progress():
"""
The segment 55b8cdd122595924 is in_progress and therefore misses its 'end_time'.
Source:
lg3/ec2-user/faas-migration/ThumbnailGenerator/Lambda/logs/2021-04-29_23-40-56
"""
expected_breakdown = ['1-608b4565-558f8ff60404afa17feb278c', 1619739984.606, 1619739985.731, datetime.timedelta(seconds=1, microseconds=125000), None, 0, 0, 0, 0, ['AWS::Lambda', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda::Function', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['thumbnail-generator-dev-upload', 'thumbnail-generator-dev-upload', 'S3', 'S3', 'thumbnail-generator-dev-thumbnail-generator', 'Dwell Time', 'Attempt #1', 'thumbnail-generator-dev-thumbnail-generator', 'S3', 'S3', 'S3', 'S3'], datetime.timedelta(microseconds=17000), datetime.timedelta(microseconds=800000), None, None, datetime.timedelta(microseconds=123000), datetime.timedelta(microseconds=49000), None, datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_in_progress')
with pytest.raises(Exception) as e:
assert_trace_breakdown(tp, expected_breakdown)
assert str(e.value) == 'Subsegment 55b8cdd122595924 in progress.'
def test_extract_trace_breakdown_thumbnail_app_fault():
"""
The segment 514db1f2511b92cf has a fault and returned HTTP status 500.
Source:
lg4/ec2-user/faas-migration/ThumbnailGenerator/Lambda/logs/2021-04-30_11-49-21
"""
expected_breakdown = ['1-608bf96d-9be20cff02f9e96ff14ae178', 1619786093.459, 1619786093.469, datetime.timedelta(microseconds=10000), 'https://8vtxzfmw67.execute-api.us-west-2.amazonaws.com/dev/upload', 0, 0, 0, 1, ['AWS::ApiGateway::Stage', 'AWS::Lambda'], ['dev-thumbnail-generator/dev', 'Lambda', 'Lambda'], datetime.timedelta(microseconds=10000), None, None, None, None, None, None, None, datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_fault')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_breakdown_thumbnail_app_error_fault_throttle():
"""Trace with errors, faults, and throttle
Source:
lg4/ec2-user/faas-migration/ThumbnailGenerator/Lambda/logs/2021-04-30_02-58-28
"""
expected_breakdown = ['1-608b7926-687718151c26192849c3d020', 1619753254.117, 1619753261.239, datetime.timedelta(seconds=7, microseconds=122000), 'https://ldblsc9z0j.execute-api.us-west-2.amazonaws.com/dev/upload', 0, 3, 2, 3, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::S3::Bucket'], ['dev-thumbnail-generator/dev', 'Lambda', 'thumbnail-generator-dev-upload', 'thumbnail-generator-dev-upload', 'S3', 'S3'], datetime.timedelta(microseconds=29000), None, None, None, datetime.timedelta(microseconds=115000), None, None, datetime.timedelta(seconds=6, microseconds=978000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_error_fault_throttle')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_breakdown_matrix_app():
expected_breakdown = ['1-602d2f0b-7bd8e768607f9c8200690500', 1613573899.167, 1613573911.234, datetime.timedelta(seconds=12, microseconds=67000), 'https://tf51nutw60.execute-api.us-east-1.amazonaws.com/prod/run', 5, 0, 0, 0, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::ApiGateway::Stage', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::StepFunctions::StateMachine'], ['prod-matrix-mul/prod', 'STEPFUNCTIONS', 'MatrixMul', 'CreateMatrix', 'Lambda', 'matrix-mul-prod-create_matrix', 'matrix-mul-prod-create_matrix', 'Initialization', 'ChooseVariant', 'AppendWorkerCount', 'DistributeWork', 'Lambda', 'matrix-mul-prod-paralell_mul_scheduler', 'matrix-mul-prod-paralell_mul_scheduler', 'Initialization', 'ParallelMul', 'Branch 2', 'AssignWorkerID3', 'MulWorker3', 'Lambda', 'matrix-mul-prod-mul_worker', 'matrix-mul-prod-mul_worker', 'Initialization', 'BuildResult', 'Lambda', 'matrix-mul-prod-result_builder', 'matrix-mul-prod-result_builder', 'Initialization', 'GenReport', 'Lambda', 'matrix-mul-prod-build_report', 'matrix-mul-prod-build_report', 'Initialization'], datetime.timedelta(microseconds=399000), datetime.timedelta(0), datetime.timedelta(seconds=2, microseconds=359000), datetime.timedelta(seconds=1, microseconds=243000), datetime.timedelta(seconds=8, microseconds=66000), None, None, None, datetime.timedelta(0)] # noqa: E501
tp = traces_path('matrix_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_model_training_app():
""" Different timestamp granularity at two places:
1) AWS::Lambda segment (03c544ca90d5515d) with end_time 1624353531.865
and the AWS::Lambda::Function segment (2d1ac1631da8de72) with end_time 1624353531.8654525
The microsecond-based timestamp is +0.0004525s later and hence counts as async invocation
because the child node has a later start time than the parent node. This is incorrect and
left 1547 microseconds missing at the end of the trace when validating against the duration.
This issue was fixed by adding a margin for imprecise timestamps (i.e., epsilon) in the
is_async_invocation heuristic.
2) The top-level API gateway end_time only has milliseconds (0.8669999 => 0.867)
Source:
exp31/serverless-faas-workbench/aws/cpu-memory/model_training/logs/2021-06-22_11-18-22
"""
expected_breakdown = ['1-60d1aaf2-671e23cc0b33e597b9728177', 1624353522.835, 1624353531.867, datetime.timedelta(seconds=9, microseconds=32000), 'https://fabi09ztfd.execute-api.us-east-1.amazonaws.com/dev/train', 0, 0, 0, 0, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['dev-model-training/dev', 'Lambda', 'model-training-dev-model-training', 'model-training-dev-model-training', 'Invocation', 'S3', 'S3', 'S3', 'S3', 'Overhead'], datetime.timedelta(microseconds=17530), None, None, None, datetime.timedelta(seconds=8, microseconds=818030), None, datetime.timedelta(microseconds=198), datetime.timedelta(microseconds=196242), datetime.timedelta(0)] # noqa: E501
tp = traces_path('model_training_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_realworld_app():
"""Official duration uses less accurate timestamp for start_time:
0:00:00.035000 (official) vs 0:00:00.035189 (end_time - start_time).
Fixed using a timestamp margin to ignore differences below a threshold (e.g., 1ms).
Source:
exp31/realworld-dynamodb-lambda/logs/2021-04-30_14-52-50
"""
expected_breakdown = ['1-608c1bd4-8ceba73b6799988cd7aaee1a', 1619794900.85, 1619794900.8851886, datetime.timedelta(microseconds=35000), 'https://538uury0ga.execute-api.eu-west-1.amazonaws.com/dev/api/articles/8c90798198-vvwthh/comments', 0, 0, 0, 0, ['AWS::ApiGateway::Stage', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::DynamoDB::Table', 'AWS::DynamoDB::Table'], ['dev-realworld/dev', 'Lambda', 'realworld-dev-getComments', 'realworld-dev-getComments', 'Invocation', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'Overhead'], datetime.timedelta(microseconds=12043), None, None, None, datetime.timedelta(microseconds=3645), None, datetime.timedelta(microseconds=312), datetime.timedelta(microseconds=19000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('realworld_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_realworld_app_margin():
"""
Fixed by using timestamp margin when comparing
* the trace duration from X-Ray (50ms) against
* the latency breakdown (49ms).
Source:
lg3/ec2-user/realworld-dynamodb-lambda/logs/2021-04-30_15-52-46
"""
expected_breakdown = ['1-608c2b95-da77705249832f8715f095de', 1619798933.143, 1619798933.1929998, datetime.timedelta(microseconds=50000), 'https://myz35jktl7.execute-api.eu-west-1.amazonaws.com/dev/api/articles/110b06f1f4-kg38cx', 0, 0, 0, 0, ['AWS::ApiGateway::Stage', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::DynamoDB::Table', 'AWS::DynamoDB::Table', 'AWS::DynamoDB::Table'], ['dev-realworld/dev', 'Lambda', 'realworld-dev-getArticle', 'realworld-dev-getArticle', 'Invocation', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'Overhead'], datetime.timedelta(microseconds=13087), None, None, None, datetime.timedelta(microseconds=4614), None, datetime.timedelta(microseconds=299), datetime.timedelta(microseconds=31000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('realworld_app_margin')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_todo_app():
"""Reproduces a trace where the trace duration doesn't
match the latency breakdown due to clock inaccuracy
between the API gateway Lambda segment (045739a66c26a771, 1619774396.626)
and the AWS::Lambda segment (6fd8f7cf8343d129, 1619774396.627).
The API gateway synchronously invokes AWS::Lambda and should therefore end
later and not 1ms earlier.
Source:
exp31/faas-migration-go/aws/logs/2021-04-30_09-06-52
"""
expected_breakdown = ['1-608bcbbc-634420f19aa0dd283cbf7529', 1619774396.595, 1619774396.636, datetime.timedelta(microseconds=41000), 'https://bm0q7xberc.execute-api.eu-west-1.amazonaws.com/dev/lst', 0, 0, 0, 0, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::DynamoDB::Table'], ['dev-aws/dev', 'Lambda', 'aws-dev-lst', 'aws-dev-lst', 'DynamoDB', 'DynamoDB'], datetime.timedelta(microseconds=31000), None, None, None, datetime.timedelta(microseconds=1579), None, None, datetime.timedelta(microseconds=8421), datetime.timedelta(0)] # noqa: E501
tp = traces_path('todo_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_longest_path_sync():
"""Scenario where a synchronous invocation is the longest path"""
start_time = 1619760991.000
s1_start = start_time
s2_start = start_time + 10
s2_end = start_time + 20
a_start = start_time + 25
s3_start = start_time + 30
a_end = start_time + 35
s3_end = start_time + 40
s1_end = start_time + 50
segments = [
('s1', s1_start, s1_end),
('s2', s2_start, s2_end),
('a', a_start, a_end),
('s3', s3_start, s3_end)
]
G = nx.DiGraph()
G.graph['start'] = 's1'
G.graph['end'] = 's1'
for (id, start_time, end_time) in segments:
s1 = {'id': id, 'start_time': start_time, 'end_time': end_time}
node_attr = {'doc': s1, 'duration': duration(s1)}
G.add_node(s1['id'], **node_attr)
G.add_edge('s1', 's2')
G.add_edge('s2', 'a')
G.add_edge('s1', 's3')
G.graph['call_stack'] = call_stack(G, 's1')
assert ['s1', 's2', 's3'] == longest_path(G, 's1')
def test_longest_path_async():
"""Scenario where an asynchronous invocation is the longest path"""
start_time = 1619760991.000
s1_start = start_time
s2_start = start_time + 10
s2_end = start_time + 20
s_start = start_time + 30
s_end = start_time + 40
s1_end = start_time + 50
s3_start = start_time + 70
s3_end = start_time + 80
segments = [
('s1', s1_start, s1_end),
('s2', s2_start, s2_end),
('s', s_start, s_end),
('s3', s3_start, s3_end)
]
G = nx.DiGraph()
for (id, start_time, end_time) in segments:
s1 = {'id': id, 'start_time': start_time, 'end_time': end_time}
node_attr = {'doc': s1, 'duration': duration(s1)}
G.add_node(s1['id'], **node_attr)
G.add_edge('s1', 's2')
G.add_edge('s1', 's')
G.add_edge('s2', 's3')
G.graph['call_stack'] = call_stack(G, 's3')
assert ['s1', 's2', 's3'] == longest_path(G, 's1')
def test_longest_path_event_processing_app():
"""Reproduces an issue where the last returning child
was appended to the longest path although not being part of it.
Specifically, the overhead node `0d431` was appended at the end of the
longest path but should not be part of it because the async transition
from SNS `3d46` to the format function `6d05` consitutes a longer path.
Source:
exp31/faas-migration/Event-Processing/Lambda/logs/2021-04-30_05-34-22
"""
# Manually validated based on trace map, timestamp inspection, and
# comparison against networkx implementation of dag_longest_path
expected_path = ['59d284e254912526', '7ddca1046ef1985c', '5d98752257e51041', '4c57c76218613840', 'ce71a9e6624497a6', '62a5b8bdc147d7dd', '3d46a9ec2871f006', '6d05055c18416f23', '09b064d0dfd77159', '1ac8a21aee22b4e3', '206c8bde4844d1da', '0968878f47f64916', '4e389a109ab7353c', '1a3fbbb81821e5dd', '02b8700a2f5e645f', 'e4546d09dde35985'] # noqa: E501
tp = traces_path('event_processing_app')
with open(tp) as json_file:
trace = json.load(json_file)
G = create_span_graph(trace)
assert G.graph['longest_path'] == expected_path
def test_extract_trace_event_processing_app():
"""Reproduces a trace with a validation error on the trace duration:
"Trace duration 0:00:00.125000 does not match latency breakdown 0:00:00.047000
within margin 0:00:00.001001."
Source:
exp31/faas-migration/Event-Processing/Lambda/logs/2021-04-30_05-34-22
"""
expected_breakdown = ['1-608b975f-82c9cf3915cf8d7c1093ada7', 1619760991.873, 1619760991.998, datetime.timedelta(microseconds=125000), 'https://aqk7l5ytj2.execute-api.eu-west-1.amazonaws.com/dev/ingest', 0, 0, 0, 0, ['AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::SNS', 'AWS::ApiGateway::Stage', 'AWS::Lambda::Function', 'AWS::SQS::Queue'], ['dev-event-processing/dev', 'Lambda', 'event-processing-dev-ingest', 'event-processing-dev-ingest', 'Invocation', 'SNS', 'SNS', 'event-processing-dev-format_state_change', 'Dwell Time', 'Attempt #1', 'event-processing-dev-format_state_change', 'Invocation', 'SQS', 'SQS', 'QueueTime', 'Overhead'], datetime.timedelta(microseconds=25523), datetime.timedelta(microseconds=11000), None, None, datetime.timedelta(microseconds=1807), datetime.timedelta(microseconds=32000), datetime.timedelta(microseconds=670), datetime.timedelta(microseconds=54000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('event_processing_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_hello_retail_app_error():
"""Reproduces a trace with the error:
"Task Timed Out:
'arn:aws:states:eu-west-1:0123456789012:activity:dev-hello-retail-product-photos-receive"
Source:
exp31/hello-retail/logs/2021-04-30_14-15-59
"""
expected_breakdown = ['1-608c1487-8892c4a7bc5e24a223902a15', 1619793031.301, 1619793031.47, datetime.timedelta(microseconds=169000), 'https://luokbyeogl.execute-api.eu-west-1.amazonaws.com/dev/sms', 0, 2, 0, 2, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::DynamoDB::Table', 'AWS::S3::Bucket', 'AWS::stepfunctions'], ['dev-hello-retail-product-photos-receive/dev', 'Lambda', 'hello-retail-product-photos-receive-dev-receive', 'hello-retail-product-photos-receive-dev-receive', 'Invocation', 'DynamoDB', 'DynamoDB', 'S3', 'S3', 'stepfunctions', 'stepfunctions', 'Overhead'], datetime.timedelta(microseconds=52829), None, None, None, datetime.timedelta(microseconds=6511), None, datetime.timedelta(microseconds=660), datetime.timedelta(microseconds=109000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('hello_retail_app_error')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_image_processing_app_error():
"""Reproduces a trace with the error:
"PhotoDoesNotMeetRequirementError"
Source:
exp31/aws-serverless-workshops/ImageProcessing/logs/2021-04-30_07-02-35
trace_id=1-608bac5f-feef4e053d4b9fa008fcb044
"""
expected_breakdown = ['1-608bac5f-feef4e053d4b9fa008fcb044', 1619766367.436, 1619766368.579, datetime.timedelta(seconds=1, microseconds=143000), 'https://s47zgw7ake.execute-api.eu-west-1.amazonaws.com/execute/', 0, 4, 0, 1, ['AWS::Lambda', 'AWS::StepFunctions::StateMachine', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::Lambda::Function', 'AWS::rekognition'], ['APIGatewayToStepFunctions/execute', 'STEPFUNCTIONS', 'RiderPhotoProcessing-8gfRn3qHdsBb', 'FaceDetection', 'Lambda', 'wildrydes-FaceDetectionFunction-UB72KZMWRLCF', 'wildrydes-FaceDetectionFunction-UB72KZMWRLCF', 'Invocation', 'rekognition', 'rekognition', 'Overhead', 'PhotoDoesNotMeetRequirement', 'Lambda', 'wildrydes-NotificationPlaceholderFunction-KDTBMSLPJ0O2', 'wildrydes-NotificationPlaceholderFunction-KDTBMSLPJ0O2', 'Invocation', 'Overhead'], datetime.timedelta(microseconds=106497), datetime.timedelta(0), None, None, datetime.timedelta(microseconds=4818), None, datetime.timedelta(microseconds=685), datetime.timedelta(seconds=1, microseconds=31000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('image_processing_app_error')
assert_trace_breakdown(tp, expected_breakdown)
# def test_extract_tmp_visualizer():
# """Just a tmp case for creating visualizer data
# """
# expected_breakdown = [] # noqa: E501
# tp = traces_path('long_trigger1')
# assert_trace_breakdown(tp, expected_breakdown)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import math
import numpy as np
import numpy.random as npr
import cv2
# from matplotlib.colors import rgb_to_hsv
# from matplotlib.colors import hsv_to_rgb
from configure import cfg
import utils.blob
# from caffe.io import resize_image
def GenerateBatchSamples(roi, img_shape):
sampled_bboxes = []
for i in range(len(cfg.TRAIN.batch_sampler)):
sampled_bboxes_this = GenerateSamples(roi, cfg.TRAIN.batch_sampler[i],
img_shape)
sampled_bboxes.extend(sampled_bboxes_this)
return sampled_bboxes
def GenerateSamples(roi, batch_sampler, img_shape):
found = 0
sampled_bboxes = []
for i in range(batch_sampler.max_trials):
if found > batch_sampler.max_sample:
return sampled_bboxes
# Generate sampled_bbox in the normalized space [0, 1].
sampled_bbox = SampleBBox(batch_sampler.sampler, img_shape)
if SatisfySampleConstraint(sampled_bbox, roi,
batch_sampler.sample_constraint):
found = found + 1
sampled_bboxes.append(sampled_bbox)
return sampled_bboxes
def SampleBBox(sampler, img_shape):
# Get random scale.
assert sampler.max_scale >= sampler.min_scale
assert sampler.min_scale > 0.0
assert sampler.max_scale <= 1.0
scale = npr.uniform(sampler.min_scale, sampler.max_scale)
# Get random aspect ratio.
assert sampler.max_aspect_ratio >= sampler.min_aspect_ratio
assert sampler.min_aspect_ratio > 0.0
assert sampler.max_aspect_ratio < 10000
aspect_ratio = npr.uniform(sampler.min_aspect_ratio,
sampler.max_aspect_ratio)
aspect_ratio = max(aspect_ratio, 1.0 * math.pow(scale, 2.0))
aspect_ratio = min(aspect_ratio, 1.0 / math.pow(scale, 2.0))
# Figure out bbox dimension.
bbox_width = scale * math.sqrt(aspect_ratio)
bbox_height = scale / math.sqrt(aspect_ratio)
# Figure out top left coordinates.
h_off = npr.uniform(0.0, 1.0 - bbox_height)
w_off = npr.uniform(0.0, 1.0 - bbox_width)
#---------------------------------------
bbox_height = bbox_height * img_shape[0]
bbox_width = bbox_width * img_shape[1]
h_off = h_off * img_shape[0]
w_off = w_off * img_shape[1]
assert bbox_width > 0
assert bbox_height > 0
sampled_bbox = np.array(
[w_off, h_off, w_off + bbox_width, h_off + bbox_height],
dtype=np.uint16)
sampled_bbox[0] = min(max(sampled_bbox[0], 0), img_shape[1] - 1)
sampled_bbox[1] = min(max(sampled_bbox[1], 0), img_shape[0] - 1)
sampled_bbox[2] = min(
max(sampled_bbox[2], sampled_bbox[0]), img_shape[1] - 1)
sampled_bbox[3] = min(
max(sampled_bbox[3], sampled_bbox[1]), img_shape[0] - 1)
assert sampled_bbox[0] <= sampled_bbox[2]
assert sampled_bbox[1] <= sampled_bbox[3]
return sampled_bbox
def SatisfySampleConstraint(sampled_bbox, roi, sample_constraint):
# Check constraints.
found = False
roi_num = roi.shape[0]
for i in range(roi_num):
this_roi = roi[i, :]
jaccard_overlap = JaccardOverlap(sampled_bbox, this_roi)
if jaccard_overlap < sample_constraint.min_jaccard_overlap:
continue
if jaccard_overlap > sample_constraint.max_jaccard_overlap:
continue
return True
return False
def JaccardOverlap(bbox1, bbox2):
intersect_bbox = IntersectBBox(bbox1, bbox2)
intersect_width = intersect_bbox[2] - intersect_bbox[0] + 1
intersect_height = intersect_bbox[3] - intersect_bbox[1] + 1
if intersect_width > 0 and intersect_height > 0:
intersect_size = intersect_width * intersect_height
bbox1_size = BBoxSize(bbox1)
bbox2_size = BBoxSize(bbox2)
return 1.0 * intersect_size / (
bbox1_size + bbox2_size - intersect_size)
else:
return 0.0
def IntersectBBox(bbox1, bbox2):
if bbox2[0] > bbox1[2] or bbox2[2] < bbox1[0] or bbox2[1] > bbox1[3] or bbox2[3] < bbox1[1]:
# Return [0, 0, 0, 0] if there is no intersection.
# intersect_bbox=[0.0,0.0,0.0,0.0]
intersect_bbox = [-1.0, -1.0, -1.0, -1.0]
else:
intersect_bbox = [
max(bbox1[0], bbox2[0]),
max(bbox1[1], bbox2[1]),
min(bbox1[2], bbox2[2]),
min(bbox1[3], bbox2[3])
]
return intersect_bbox
def BBoxSize(bbox):
if (bbox[2] < bbox[0] or bbox[3] < bbox[1]):
# If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0.0
else:
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
return (width + 1) * (height + 1)
def Crop(img, crop_bbox):
img_shape = img.shape
# x1 = 1.0 * crop_bbox[0] * img_shape[1]
# y1 = 1.0 * crop_bbox[1] * img_shape[0]
# x2 = 1.0 * crop_bbox[2] * img_shape[1]
# y2 = 1.0 * crop_bbox[3] * img_shape[0]
x1 = crop_bbox[0]
y1 = crop_bbox[1]
x2 = crop_bbox[2]
y2 = crop_bbox[3]
assert x1 >= 0, x1
assert y1 >= 0, y1
assert x2 <= img_shape[1], '{} vs {}'.format(x2, img_shape[1])
assert y2 <= img_shape[0], '{} vs {}'.format(y2, img_shape[0])
crop_img = img[y1:y2 + 1, x1:x2 + 1, :]
return crop_img
def MeetEmitConstraint(src_bbox, bbox):
x_center = 1.0 * (bbox[0] + bbox[2]) / 2
y_center = 1.0 * (bbox[1] + bbox[3]) / 2
if x_center >= src_bbox[0] and x_center <= src_bbox[2] and y_center >= src_bbox[1] and y_center <= src_bbox[3]:
return True
else:
return False
def ApplyCrop(img):
if cfg.TRAIN.CROP <= 0:
img_height = img.shape[0]
img_width = img.shape[1]
return img, np.array(
(0, 0, img_width - 1, img_height - 1), dtype=np.uint16)
img_shape = np.array(img.shape)
crop_dims = img_shape[:2] * cfg.TRAIN.CROP
# crop_dims = img_shape[:2] * 0.9
r0 = npr.random()
r1 = npr.random()
s = img_shape[:2] - crop_dims
s[0] *= r0
s[1] *= r1
# im_crop = np.array([s[0],
# s[1],
# s[0] + crop_dims[0] - 1,
# s[1] + crop_dims[1] - 1],
# dtype=np.uint16)
crop_bbox = np.array(
[s[1], s[0], s[1] + crop_dims[1] - 1, s[0] + crop_dims[0] - 1],
dtype=np.uint16)
crop_img = img[crop_bbox[1]:crop_bbox[3] + 1, crop_bbox[0]:
crop_bbox[2] + 1, :]
return crop_img, crop_bbox
def ApplyExpand(img):
img_shape = img.shape
prob = npr.random()
if prob > cfg.TRAIN.expand_prob:
return img, np.array(
(0, 0, img_shape[1], img_shape[0]), dtype=np.uint16)
if abs(cfg.TRAIN.max_expand_ratio - 1.) < 1e-2:
return img, np.array(
(0, 0, img_shape[1], img_shape[0]), dtype=np.uint16)
expand_ratio = npr.uniform(1, cfg.TRAIN.max_expand_ratio)
expand_img, expand_bbox = ExpandImage(img, expand_ratio)
return expand_img, expand_bbox
def ExpandImage(img, expand_ratio):
img_height = img.shape[0]
img_width = img.shape[1]
img_channels = img.shape[2]
# Get the bbox dimension.
height = int(img_height * expand_ratio)
width = int(img_width * expand_ratio)
h_off = npr.uniform(0, height - img_height)
w_off = npr.uniform(0, width - img_width)
h_off = int(h_off)
w_off = int(w_off)
expand_bbox = []
# expand_bbox.append(1.0 * (-w_off) / img_width)
# expand_bbox.append(1.0 * (-h_off) / img_height)
# expand_bbox.append(1.0 * (width - w_off) / img_width)
# expand_bbox.append(1.0 * (height - h_off) / img_height)
expand_bbox.append(-w_off)
expand_bbox.append(-h_off)
expand_bbox.append(width - w_off - 1)
expand_bbox.append(height - h_off - 1)
expand_bbox = np.array(expand_bbox)
expand_img = np.tile(cfg.PIXEL_MEANS, (height, width, 1)).astype(img.dtype)
expand_img[h_off:h_off + img_height, w_off:w_off + img_width, :] = img
return expand_img, expand_bbox
def ApplyDistort_old(in_img):
hsv = cv2.cvtColor(in_img, cv2.COLOR_BGR2HSV)
s0 = npr.random() * (cfg.TRAIN.SATURATION - 1) + 1
s1 = npr.random() * (cfg.TRAIN.EXPOSURE - 1) + 1
# s0 = npr.random() * (1.5 - 1) + 1
# s1 = npr.random() * (1.5 - 1) + 1
s0 = s0 if npr.random() > 0.5 else 1.0 / s0
s1 = s1 if npr.random() > 0.5 else 1.0 / s1
hsv = np.array(hsv, dtype=np.float32)
hsv[:, :, 1] = np.minimum(s0 * hsv[:, :, 1], 255)
hsv[:, :, 2] = np.minimum(s1 * hsv[:, :, 2], 255)
hsv = np.array(hsv, dtype=np.uint8)
out_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return out_img
def ApplyDistort(in_img):
prob = npr.random()
if prob > 0.5:
# Do random brightness distortion.
out_img = RandomBrightness(in_img, cfg.TRAIN.brightness_prob,
cfg.TRAIN.brightness_delta)
# cv2.imshow('0 RandomBrightness',out_img.astype(np.uint8))
# Do random contrast distortion.
out_img = RandomContrast(out_img, cfg.TRAIN.contrast_prob,
cfg.TRAIN.contrast_lower,
cfg.TRAIN.contrast_upper)
# cv2.imshow('1 RandomContrast',out_img.astype(np.uint8))
# Do random saturation distortion.
out_img = RandomSaturation(out_img, cfg.TRAIN.saturation_prob,
cfg.TRAIN.saturation_lower,
cfg.TRAIN.saturation_upper)
# cv2.imshow('2 RandomSaturation',out_img.astype(np.uint8))
# Do random exposure distortion.
out_img = RandomExposure(out_img, cfg.TRAIN.exposure_prob,
cfg.TRAIN.exposure_lower,
cfg.TRAIN.exposure_upper)
# cv2.imshow('3 RandomExposure',out_img.astype(np.uint8))
# Do random hue distortion.
out_img = RandomHue(out_img, cfg.TRAIN.hue_prob, cfg.TRAIN.hue_delta)
# cv2.imshow('4 RandomHue',out_img.astype(np.uint8))
# Do random reordering of the channels.
out_img = RandomOrderChannels(out_img, cfg.TRAIN.random_order_prob)
# cv2.imshow('5 RandomOrderChannels',out_img.astype(np.uint8))
else:
# Do random brightness distortion.
out_img = RandomBrightness(in_img, cfg.TRAIN.brightness_prob,
cfg.TRAIN.brightness_delta)
# cv2.imshow('0 RandomBrightness',out_img.astype(np.uint8))
# Do random saturation distortion.
out_img = RandomSaturation(out_img, cfg.TRAIN.saturation_prob,
cfg.TRAIN.saturation_lower,
cfg.TRAIN.saturation_upper)
# cv2.imshow('1 RandomSaturation',out_img.astype(np.uint8))
# Do random exposure distortion.
out_img = RandomExposure(out_img, cfg.TRAIN.exposure_prob,
cfg.TRAIN.exposure_lower,
cfg.TRAIN.exposure_upper)
# cv2.imshow('2 RandomExposure',out_img.astype(np.uint8))
# Do random hue distortion.
out_img = RandomHue(out_img, cfg.TRAIN.hue_prob, cfg.TRAIN.hue_delta)
# cv2.imshow('3 RandomHue',out_img.astype(np.uint8))
# Do random contrast distortion.
out_img = RandomContrast(out_img, cfg.TRAIN.contrast_prob,
cfg.TRAIN.contrast_lower,
cfg.TRAIN.contrast_upper)
# cv2.imshow('4 RandomContrast',out_img.astype(np.uint8))
# Do random reordering of the channels.
out_img = RandomOrderChannels(out_img, cfg.TRAIN.random_order_prob)
# cv2.imshow('5 RandomOrderChannels',out_img.astype(np.uint8))
return out_img
def convertTo(in_img, alpha, beta):
out_img = in_img.astype(np.float32)
out_img = out_img * alpha + beta
out_img = np.clip(out_img, 0, 255)
out_img = out_img.astype(in_img.dtype)
return out_img
# def bgr_to_hsv(bgr):
# b, g, r = cv2.split(bgr)
# rgb = cv2.merge((r, g, b))
# hsv = rgb_to_hsv(rgb)
# return hsv
# def hsv_to_bgr(hsv):
# rgb = hsv_to_rgb(hsv)
# r, g, b = cv2.split(rgb)
# bgr = cv2.merge((b, g, r))
# return bgr
def RandomBrightness(in_img, brightness_prob, brightness_delta):
prob = npr.random()
if prob < brightness_prob:
assert brightness_delta > 0, "brightness_delta must be non-negative."
delta = npr.uniform(-brightness_delta, brightness_delta)
out_img = AdjustBrightness(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustBrightness(in_img, delta):
if abs(delta) > 0:
# out_img = cv2.convertTo(in_img, 1, 1, delta)
out_img = convertTo(in_img, 1, delta)
else:
out_img = in_img
return out_img
def RandomContrast(in_img, contrast_prob, lower, upper):
prob = npr.random()
if prob < contrast_prob:
assert upper >= lower, 'contrast upper must be >= lower.'
assert lower >= 0, 'contrast lower must be non-negative.'
delta = npr.uniform(lower, upper)
out_img = AdjustContrast(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustContrast(in_img, delta):
if abs(delta - 1.0) > 1e-3:
# out_img = cv2.convertTo(in_img, -1, delta, 0)
out_img = convertTo(in_img, delta, 0)
else:
out_img = in_img
return out_img
def RandomExposure(in_img, exposure_prob, lower, upper):
prob = npr.random()
if prob < exposure_prob:
assert upper >= lower, 'saturation upper must be >= lower.'
assert lower >= 0, 'saturation lower must be non-negative.'
delta = npr.uniform(lower, upper)
out_img = AdjustExposure(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustExposure(in_img, delta):
if abs(delta - 1.0) != 1e-3:
# Convert to HSV colorspae.
out_img = cv2.cvtColor(in_img, cv2.COLOR_BGR2HSV)
# out_img = bgr_to_hsv(in_img)
# Split the image to 3 channels.
h, s, v = cv2.split(out_img)
# Adjust the exposure.
# channels[2] = cv2.convertTo(channels[2], -1, delta, 0)
v = convertTo(v, delta, 0)
# out_img = cv2.merge((h, s, v))
out_img[:, :, 2] = v
# Back to BGR colorspace.
out_img = cv2.cvtColor(out_img, cv2.COLOR_HSV2BGR)
# out_img = hsv_to_bgr(out_img)
else:
out_img = in_img
return out_img
def RandomSaturation(in_img, saturation_prob, lower, upper):
prob = npr.random()
if prob < saturation_prob:
assert upper >= lower, 'saturation upper must be >= lower.'
assert lower >= 0, 'saturation lower must be non-negative.'
delta = npr.uniform(lower, upper)
out_img = AdjustSaturation(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustSaturation(in_img, delta):
if abs(delta - 1.0) != 1e-3:
# Convert to HSV colorspae.
out_img = cv2.cvtColor(in_img, cv2.COLOR_BGR2HSV)
# out_img = bgr_to_hsv(in_img)
# Split the image to 3 channels.
h, s, v = cv2.split(out_img)
# Adjust the saturation.
# channels[1] = cv2.convertTo(channels[1], -1, delta, 0)
s = convertTo(s, delta, 0)
# out_img = cv2.merge((h, s, v))
out_img[:, :, 1] = s
# Back to BGR colorspace.
out_img = cv2.cvtColor(out_img, cv2.COLOR_HSV2BGR)
# out_img = hsv_to_bgr(out_img)
else:
out_img = in_img
return out_img
def RandomHue(in_img, hue_prob, hue_delta):
prob = npr.random()
if prob < hue_prob:
assert hue_delta >= 0, 'hue_delta must be non-negative.'
delta = npr.uniform(-hue_delta, hue_delta)
out_img = AdjustHue(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustHue(in_img, delta):
if abs(delta) > 0:
# Convert to HSV colorspae.
out_img = cv2.cvtColor(in_img, cv2.COLOR_BGR2HSV)
# out_img = bgr_to_hsv(in_img)
# Split the image to 3 channels.
h, s, v = cv2.split(out_img)
# Adjust the hue.
# channels[0] = cv2.convertTo(channels[0], -1, 1, delta)
h = convertTo(h, 1, delta)
# out_img = cv2.merge((h, s, v))
out_img[:, :, 0] = h
# Back to BGR colorspace.
out_img = cv2.cvtColor(out_img, cv2.COLOR_HSV2BGR)
# out_img = hsv_to_bgr(out_img)
else:
out_img = in_img
return out_img
def RandomOrderChannels(in_img, random_order_prob):
prob = npr.random()
if prob < random_order_prob:
# Split the image to 3 channels.
channels = cv2.split(in_img)
assert len(channels) == 3
# Shuffle the channels.
channels = npr.shuffle(channels)
out_img = cv2.merge(channels)
else:
out_img = in_img
return out_img
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
#-------------------------------------------------------------
interp_mode = cv2.INTER_LINEAR
if len(cfg.TRAIN.INTERP_MODEL) > 0:
idx = npr.randint(len(cfg.TRAIN.INTERP_MODEL))
interp_name = cfg.TRAIN.INTERP_MODEL[idx]
if interp_name == 'LINEAR':
interp_mode = cv2.INTER_LINEAR
elif interp_name == 'AREA':
interp_mode = cv2.INTER_AREA
elif interp_name == 'NEAREST':
interp_mode = cv2.INTER_NEAREST
elif interp_name == 'CUBIC':
interp_mode = cv2.INTER_CUBIC
elif interp_name == 'LANCZOS4':
interp_mode = cv2.INTER_LANCZOS4
else:
print 'Unknow interp mode: ', interp_name
exit(0)
# if len(cfg.TRAIN.INTERP_MODEL) > 0:
# interp_order = np.random.randint(0, 6)
if cfg.RESIZE_MODE == 'WARP':
im_scale_h = float(target_size) / float(im_shape[0])
im_scale_w = float(target_size) / float(im_shape[1])
im = cv2.resize(
im,
None,
None,
fx=im_scale_w,
fy=im_scale_h,
interpolation=interp_mode)
# im = resize_image(
# im, (target_size, target_size), interp_order=interp_order)
im_scales = [im_scale_h, im_scale_w]
elif cfg.RESIZE_MODE == 'FIT_SMALLEST':
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(
im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=interp_mode)
# im = resize_image(
# im, (im_shape[0] * im_scale, im_shape[1] * im_scale),
# interp_order=interp_order)
im_scales = [im_scale, im_scale]
else:
print 'Unknow resize mode.'
exit()
return im, im_scales
def get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
processed_ims = []
im_scale_factors = []
if cfg.RESIZE_MODE == 'WARP':
for target_size in cfg.TEST.SCALES:
im_scale_h = float(target_size) / float(im_shape[0])
im_scale_w = float(target_size) / float(im_shape[1])
im = cv2.resize(
im,
None,
None,
fx=im_scale_w,
fy=im_scale_h,
interpolation=cv2.INTER_LINEAR)
im_scale = [im_scale_h, im_scale_w]
im_scale_factors.append(im_scale)
processed_ims.append(im)
elif cfg.RESIZE_MODE == 'FIT_SMALLEST':
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(
im_orig,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale = [im_scale, im_scale]
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = utils.blob.im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def normalize_img_roi(img_roi, img_shape):
roi_normalized = np.copy(img_roi)
roi_normalized[:, 0] = roi_normalized[:, 0] / img_shape[1]
roi_normalized[:, 1] = roi_normalized[:, 1] / img_shape[0]
roi_normalized[:, 2] = roi_normalized[:, 2] / img_shape[1]
roi_normalized[:, 3] = roi_normalized[:, 3] / img_shape[0]
return roi_normalized
|
nilq/baby-python
|
python
|
from django.urls import path
from .views import ResultListView, create_result, edit_results
urlpatterns = [
path("create/", create_result, name="create-result"),
path("edit-results/", edit_results, name="edit-results"),
path("view/all", ResultListView.as_view(), name="view-results"),
]
|
nilq/baby-python
|
python
|
"""
Class for manipulating user information.
"""
from datetime import date, timedelta
from functools import total_ordering
from logging import getLogger
from os import stat
from os.path import exists
from re import compile as re_compile
from stat import (S_IMODE, S_ISDIR, S_ISREG)
from typing import (
Any, Collection, Dict, FrozenSet, Optional, NamedTuple, Set, Type, TypeVar,
Union)
from .constants import (
EPOCH, FIELD_PATTERN, REAL_NAME_MAX_LENGTH, UID_MIN, UID_MAX)
from .entity import Entity
from .utils import parse_opt_int
# pylint: disable=C0103
log = getLogger(__name__)
class UserTuple(NamedTuple):
"""
UserTuple(NamedTuple)
Holds the data for a User object in an immutable format.
"""
name: str
uid: int
gid: int
real_name: str
home: str
shell: str
password: Optional[str]
last_password_change_date: Optional[date]
password_age_min_days: Optional[int]
password_age_max_days: Optional[int]
password_warn_days: Optional[int]
password_disable_days: Optional[int]
account_expire_date: Optional[date]
ssh_public_keys: FrozenSet[str]
modified: bool
U = TypeVar("U", bound="User")
@total_ordering
class User(Entity):
"""
User object for holding data about a single user entry in the /etc/passwd
and /etc/shadow files.
"""
# pylint: disable=W0201,R0902
def __init__( # pylint: disable=R0913,R0914
self, name: str, uid: int, gid: int, real_name: str, home: str,
shell: str, password: Optional[str] = None,
last_password_change_date: Optional[date] = None,
password_age_min_days: Optional[int] = None,
password_age_max_days: Optional[int] = None,
password_warn_days: Optional[int] = None,
password_disable_days: Optional[int] = None,
account_expire_date: Optional[date] = None,
ssh_public_keys: Optional[Set[str]] = None,
modified: bool = False) -> None:
"""
User(
name: str, uid: int, gid: int, real_name: str, home: str, shell: str,
password: Optional[str] = None,
last_password_change_date: Optional[date] = None,
password_age_min_days: Optional[int] = None,
password_age_max_days: Optional[int] = None
password_warn_days: Optional[int] = None,
password_disable_days: Optional[int] = None,
account_expire_date: Optional[date] = None,
ssh_public_keys: Optional[str] = None, modified: bool = False) -> User
Create a new User object.
"""
super(User, self).__init__(name=name, gid=gid, password=password, modified=modified)
self.name = name
self.uid = uid
self.real_name = real_name
self.home = home
self.shell = shell
self.last_password_change_date = last_password_change_date
self.password_age_min_days = password_age_min_days
self.password_age_max_days = password_age_max_days
self.password_warn_days = password_warn_days
self.password_disable_days = password_disable_days
self.account_expire_date = account_expire_date
self.ssh_public_keys = ssh_public_keys
def __eq__(self, other: Any) -> bool:
if not isinstance(other, User):
return False
return self.as_tuple == other.as_tuple
def __ne__(self, other: Any) -> bool:
if not isinstance(other, User):
return True
return self.as_tuple != other.as_tuple
def __lt__(self, other: "User") -> bool:
self._lt_check_other_type(other)
return self.as_tuple < other.as_tuple
@property
def uid(self) -> int:
"""
The integer user id of the user.
"""
return self._uid
@uid.setter
def uid(self, value: int) -> None:
if not isinstance(value, int):
raise TypeError("uid must be an int")
if not UID_MIN <= value <= UID_MAX:
raise ValueError(
f"uid must be between {UID_MIN} and {UID_MAX}, inclusive: "
f"{value}")
self._uid = value
@property
def real_name(self) -> str:
"""
The real name of the user.
This _may_ be a comma-delimited list of values containing the following
fields:
* The user's full name
* The building and room number
* Office telephone number
* Home telephone number
* Other contact information
"""
return self._real_name
@real_name.setter
def real_name(self, value: Optional[str]) -> None:
if value is None:
self._real_name = ""
return
if not isinstance(value, str):
raise TypeError("real_name must be a string or None")
if not FIELD_PATTERN.match(value):
raise ValueError("real_name contains illegal characters")
if len(value.encode("utf-8")) > REAL_NAME_MAX_LENGTH:
raise ValueError(
f"real_name is longer than {REAL_NAME_MAX_LENGTH} bytes "
f"(UTF-8 encoded)")
self._real_name = value
@property
def home(self) -> str:
"""
The home directory of the user.
"""
return self._home
@home.setter
def home(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("home must be a string")
if not FIELD_PATTERN.match(value):
raise TypeError("home contains illegal characters")
self._home = value
@property
def shell(self) -> str:
"""
The login shell of the user.
"""
return self._shell
@shell.setter
def shell(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("shell must be a string")
if not FIELD_PATTERN.match(value):
raise ValueError(
"shell is not an absolute path or contains doubled or "
f"trailing slashes: {value}")
self._shell = value
@property
def ssh_public_keys(self) -> FrozenSet[str]:
"""
The SSH public keys of the user.
"""
return frozenset(self._ssh_public_keys)
@ssh_public_keys.setter
def ssh_public_keys(
self, value: Optional[Union[Collection[str], str]]) -> None:
if value is None:
self._ssh_public_keys = set() # type: Set[str]
return
if isinstance(value, str):
self._ssh_public_keys = set([value])
return
if not isinstance(value, (list, tuple, set)):
raise TypeError("ssh_public_keys must be a collection of strings")
new_ssh_public_keys = set() # type: Set[str]
for el in value:
if not isinstance(el, str):
raise TypeError(
"ssh_public_keys must be a collection of strings")
new_ssh_public_keys.add(el)
self._ssh_public_keys = new_ssh_public_keys
@property
def ssh_dir_permissions_ok(self) -> bool:
"""
Indicates whether ~/.ssh exists, is a directory owned by the user,
and is only writable by the user.
"""
# pylint: disable=R0911
home = self.home
if not home:
log.debug(
"User %s does not have a home directory set", self.name)
return False
ssh_dir = home + "/.ssh"
if not exists(ssh_dir):
log.debug(
"User %s does not have ~/.ssh directory: %s", self.name,
ssh_dir)
return False
try:
ssh_stat = stat(ssh_dir)
except OSError as e:
log.error("Unable to stat %s: %s", ssh_dir, e)
return False
if ssh_stat.st_uid != self.uid:
log.warning(
"User %s does not own ~/.ssh directory %s: user uid %d, "
"owner uid %d", self.name, ssh_dir, self.uid, ssh_stat.st_uid)
return False
if not S_ISDIR(ssh_stat.st_mode):
log.warning(
"User %s ~/.ssh direcotry %s is not a directory", self.name,
ssh_dir)
return False
mode_bits = S_IMODE(ssh_stat.st_mode)
if mode_bits & 0o020:
log.warning(
"User %s ~/.ssh directory %s is group-writable", self.name,
ssh_dir)
return False
if mode_bits & 0o002:
log.warning(
"User %s ~/.ssh directory %s is other-writable", self.name,
ssh_dir)
return False
return True
@property
def authorized_keys_permissions_ok(self) -> bool:
"""
Indicates whether ~/.ssh/authorized_keys exists, is owned by the
user, and is only writable by the user.
"""
# pylint: disable=R0911
if not self.ssh_dir_permissions_ok:
return False
auth_keys = self.home + "/.ssh/authorized_keys"
if not exists(auth_keys):
log.debug(
"User %s does not have ~/.ssh/authorized_keys: %s", self.name,
auth_keys)
return False
try:
auth_keys_stat = stat(auth_keys)
except OSError as e:
log.error("Unable to stat %s: %s", auth_keys, e)
return False
if auth_keys_stat.st_uid != self.uid:
log.warning(
"User %s does not own ~/.ssh/authorized_keys file %s: user "
"uid %d, owner uid %d", self.name, auth_keys, self.uid,
auth_keys_stat.st_uid)
return False
if not S_ISREG(auth_keys_stat.st_mode):
log.warning(
"User %s ~/.ssh/authorized_keys file %s is not a file",
self.name, auth_keys)
return False
mode_bits = S_IMODE(auth_keys_stat.st_mode)
if mode_bits & 0o020:
log.warning(
"User %s ~/.ssh/authorized_keys file %s is group-writable",
self.name, auth_keys)
return False
if mode_bits & 0o002:
log.warning(
"User %s ~/.ssh/authorized_keys file %s is other-writable",
self.name, auth_keys)
return False
return True
@property
def authorized_keys(self) -> Set[str]:
"""
Return the authorized keys found in ~/.ssh
"""
result = set() # type: Set[str]
auth_keys = self.home + "/.ssh/authorized_keys"
if not self.authorized_keys_permissions_ok:
return result
with open(auth_keys, "r") as fd:
for line in fd:
line = line.strip()
if line:
result.add(line)
return result
@property
def as_tuple(self) -> UserTuple:
"""
The user represented as an immutable tuple object.
"""
return UserTuple(
name=self.name,
uid=self.uid,
gid=self.gid,
real_name=self.real_name,
home=self.home,
shell=self.shell,
password=self.password,
last_password_change_date=self.last_password_change_date,
password_age_min_days=self.password_age_min_days,
password_age_max_days=self.password_age_max_days,
password_warn_days=self.password_warn_days,
password_disable_days=self.password_disable_days,
account_expire_date=self.account_expire_date,
ssh_public_keys=self.ssh_public_keys,
modified=self.modified,
)
def __repr__(self):
return repr(self.as_tuple)
@staticmethod
def date_from_days(days: Optional[int]) -> Optional[date]:
"""
User.date_from_days(days: Optional[int]) -> Optional[date]
Convert a count of days-from-epoch to an optional date field.
If days is negative or None, the result is None.
This standardizes negative values returned by the Python spwd library
to None values.
"""
if days is None or days < 0:
return None
return EPOCH + timedelta(days=days)
@staticmethod
def age_from_days(days: int) -> Optional[int]:
"""
User.age_from_days(days: Optional[int]) -> Optional[int]
Convert an age in days to an optional age field.
If days is negative or None, the result is None.
This standardizes negative values returned by the Python spwd library
to None values.
"""
if days is None or days < 0:
return None
return days
_iso8601_date_pattern = re_compile(
r"^(?P<year>[0-9]{4})-?"
r"(?P<month>[0-9][1-9]|1[0-2])-?"
r"(?P<day>0[1-9]|[12][0-9]|3[01])$")
@staticmethod
def date_from_string(s: Optional[str]) -> Optional[date]:
"""
User.date_from_string(s: Optional[str]) -> Optional[date]
Convert a string date in YYYY-MM-DD form to a date object. If s is
None, this returns None.
"""
if s is None:
return None
m = User._iso8601_date_pattern.match(s)
if not m:
raise ValueError("Cannot parse as date: %r" % s)
year = int(m.group("year"))
month = int(m.group("month"))
day = int(m.group("day"))
return date(year, month, day)
def update_from_dynamodb_item(self, item: Dict[str, Any]) -> bool:
"""
user.update_from_dynamodb_item(item: Dict[str, Any]) -> bool
Update the user from a given DynamoDB item. If an attribute has been
modified, the modified flag is set to true.
The name field cannot be updated.
The return value is the value of the modified flag.
"""
super(User, self).update_from_dynamodb_item(item)
uid = int(item["UID"]["N"])
if self.uid != uid:
self.uid = uid
self.modified = True
real_name = item.get("RealName", {"S": ""})["S"]
if self.real_name != real_name:
self.real_name = real_name
self.modified = True
home = item.get("Home", {"S": ""})["S"]
if self.home != home:
self.home = home
self.modified = True
shell = item.get("Shell", {"S": ""})["S"]
if self.shell != shell:
self.shell = shell
self.modified = True
last_password_change_date = User.date_from_string(
item.get("LastPasswordChangeDate", {}).get("S"))
if self.last_password_change_date != last_password_change_date:
self.last_password_change_date = last_password_change_date
self.modified = True
password_age_min_days = parse_opt_int(
item.get("PasswordAgeMinDays", {}).get("N"))
if self.password_age_min_days != password_age_min_days:
self.password_age_min_days = password_age_min_days
self.modified = True
password_age_max_days = parse_opt_int(
item.get("PasswordAgeMaxDays", {}).get("N"))
if self.password_age_max_days != password_age_max_days:
self.password_age_max_days = password_age_max_days
self.modified = True
password_warn_days = parse_opt_int(
item.get("PasswordWarnDays", {}).get("N"))
if self.password_warn_days != password_warn_days:
self.password_warn_days = password_warn_days
self.modified = True
password_disable_days = parse_opt_int(
item.get("PasswordDisableDays", {}).get("N"))
if self.password_disable_days != password_disable_days:
self.password_disable_days = password_disable_days
self.modified = True
account_expire_date = User.date_from_string(
item.get("AccountExpireDate", {}).get("S"))
if self.account_expire_date != account_expire_date:
self.account_expire_date = account_expire_date
self.modified = True
ssh_public_keys = item.get("SSHPublicKeys", {}).get("SS", set())
if self.ssh_public_keys != ssh_public_keys:
self.ssh_public_keys = ssh_public_keys
self.modified = True
return self.modified
@classmethod
def from_dynamodb_item(cls: Type[U], item: Dict[str, Any]) -> U:
"""
User.from_dynamodb_item(item: Dict[str, Any]) -> User
Create a user from a given DynamoDB item. The modified flag is
automatically set to true.
"""
return cls(
name=item["Name"]["S"],
uid=int(item["UID"]["N"]),
gid=int(item["GID"]["N"]),
real_name=item.get("RealName", {"S": ""})["S"],
home=item.get("Home", {"S": ""})["S"],
shell=item.get("Shell", {"S": ""})["S"],
password=item.get("Password", {}).get("S"),
last_password_change_date=User.date_from_string(
item.get("LastPasswordChangeDate", {}).get("S")),
password_age_min_days=parse_opt_int(
item.get("PasswordAgeMinDays", {}).get("N")),
password_age_max_days=parse_opt_int(
item.get("PasswordAgeMaxDays", {}).get("N")),
password_warn_days=parse_opt_int(
item.get("PasswordWarnDays", {}).get("N")),
password_disable_days=parse_opt_int(
item.get("PasswordDisableDays", {}).get("N")),
account_expire_date=User.date_from_string(
item.get("AccountExpireDate", {}).get("S")),
ssh_public_keys=item.get("SSHPublicKeys", {}).get("SS", set()),
modified=True)
|
nilq/baby-python
|
python
|
import os
import setuptools
with open("README.md") as f:
long_description = f.read()
with open(
os.path.join(os.path.dirname(__file__), "config", "requirements", "base.txt")
) as f:
requirements = [i.strip() for i in f]
setuptools.setup(
name="rembrain_robot_framework",
version="0.1.4",
author="Rembrain",
author_email="info@rembrain.ai",
description="Rembrain Robot Framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/VasilyMorzhakov/rembrain_robotframework",
# collect all packages
packages=setuptools.find_packages(),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
)
|
nilq/baby-python
|
python
|
#coding:utf-8
from flask import Flask, redirect, url_for, request
from datetime import datetime
from flask_bootstrap import Bootstrap
from app.config_default import Config as DefaultConfig
bootstrap = Bootstrap()
def check_start(app, db):
from app.includes.start import _exist_config, exist_table, create_path, set_site
create_path(app)
app.start = False
if _exist_config(app):
from app.config import Config
app.config.from_object(Config)
if exist_table(app):
app.start = True
return
@app.before_request
def request_check_start():
if app.start:
return set_site(app)
ends = frozenset(["admin.setup", "admin.install", "static"])
if request.endpoint in ends:
return
if not _exist_config(app):
return redirect(url_for("admin.setup"))
return redirect(url_for("admin.install"))
def template_filters(app):
@app.template_filter("friendly_time")
def friendly_time(date):
now = datetime.now()
delta = now - date
if delta.days >= 365:
return u'%d年前' % (delta.days / 365)
elif delta.days >= 30:
return u'%d个月前' % (delta.days / 30)
elif delta.days > 0:
return u'%d天前' % delta.days
elif delta.days < 0:
return u"0秒前"
elif delta.seconds < 60:
return u"%d秒前" % delta.seconds
elif delta.seconds < 60 * 60:
return u"%d分钟前" % (delta.seconds / 60)
else:
return u"%d小时前" % (delta.seconds / 60 / 60)
def create_app():
app = Flask(__name__)
app.config.from_object(DefaultConfig)
from app.models.model import db, login_manager
bootstrap.init_app(app)
db.init_app(app)
db.PREFIX = app.config["DB_PREFIX"]
app.site = {}
def site_context_processor():
return dict(site=app.site)
app.context_processor(site_context_processor)
check_start(app, db)
login_manager.init_app(app)
from app.web import web
app.register_blueprint(web)
from app.admin import admin
app.register_blueprint(admin, url_prefix="/admin")
from app.api import api
app.register_blueprint(api, url_prefix="/api")
template_filters(app)
login_manager.login_view = "admin.login"
login_manager.login_message = "请先登录!!!"
from app.log import init_logging
init_logging(app)
return app
|
nilq/baby-python
|
python
|
"""
Generate_branched_alkane
========================
"""
from rdkit import Chem
import numpy as np
import random
def generate_branched_alkane(num_atoms: int, save: bool=False) -> Chem.Mol:
"""Generates a branched alkane.
Parameters
----------
num_atoms : int
Number of atoms in molecule to be generated.
save : bool
Whether to save the molecule as a .mol file.
"""
mol = Chem.MolFromSmiles('CCCC')
edit_mol = Chem.RWMol(mol)
while edit_mol.GetNumAtoms() < num_atoms:
x = Chem.rdchem.Atom(6)
randidx = np.random.randint(len(edit_mol.GetAtoms()))
atom = edit_mol.GetAtomWithIdx(randidx)
if atom.GetDegree() > 2:
continue
if atom.GetDegree() == 2 and random.random() <= 0.5:
continue
idx = edit_mol.AddAtom(x)
edit_mol.AddBond(idx, randidx, Chem.rdchem.BondType.SINGLE)
Chem.SanitizeMol(edit_mol)
mol = Chem.rdmolops.AddHs(edit_mol.GetMol())
if save:
Chem.rdmolfiles.MolToMolFile(mol, f'{num_atoms}_branched_alkane.mol')
return mol
|
nilq/baby-python
|
python
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from openwisp_users.models import OrganizationUser, User
from .utils import TestOrganizationMixin
class TestUsers(TestOrganizationMixin, TestCase):
user_model = User
def test_create_superuser_email(self):
user = User.objects.create_superuser(username='tester',
password='tester',
email='test@superuser.com')
self.assertEqual(user.emailaddress_set.count(), 1)
self.assertEqual(user.emailaddress_set.first().email, 'test@superuser.com')
def test_create_superuser_email_empty(self):
user = User.objects.create_superuser(username='tester',
password='tester',
email='')
self.assertEqual(user.emailaddress_set.count(), 0)
def test_unique_email_validation(self):
self._create_user(username='user1', email='same@gmail.com')
options = {
'username': 'user2',
'email': 'same@gmail.com',
'password': 'pass1'
}
u = self.user_model(**options)
with self.assertRaises(ValidationError):
u.full_clean()
u.save()
def test_create_user_without_email(self):
options = {
'username': 'testuser',
'password': 'test1',
}
u = self.user_model(**options)
u.full_clean()
u.save()
self.assertIsNone(u.email)
def test_organizations_pk(self):
user = self._create_user(username='organizations_pk')
org1 = self._create_org(name='org1')
org2 = self._create_org(name='org2')
self._create_org(name='org3')
OrganizationUser.objects.create(user=user, organization=org1)
OrganizationUser.objects.create(user=user, organization=org2)
self.assertIn((org1.pk,), user.organizations_pk)
self.assertEqual(len(user.organizations_pk), 2)
def test_organizations_pk_empty(self):
user = self._create_user(username='organizations_pk')
self.assertEqual(len(user.organizations_pk), 0)
def test_organization_repr(self):
org = self._create_org(name='org1', is_active=False)
self.assertIn('disabled', str(org))
def test_organization_owner_bad_organization(self):
user = self._create_user(username='user1', email='abc@example.com')
org1 = self._create_org(name='org1')
org2 = self._create_org(name='org2')
org_user = self._create_org_user(organization=org1, user=user)
org_owner = self._create_org_owner()
org_owner.organization = org2
org_owner.organization_user = org_user
with self.assertRaises(ValidationError):
org_owner.full_clean()
def test_create_users_without_email(self):
options = {
'username': 'testuser',
'password': 'test1',
}
u = self.user_model(**options)
u.full_clean()
u.save()
self.assertIsNone(u.email)
options['username'] = 'testuser2'
u = self.user_model(**options)
u.full_clean()
u.save()
self.assertIsNone(u.email)
self.assertEqual(User.objects.filter(email=None).count(), 2)
|
nilq/baby-python
|
python
|
from tensorflow_functions import cosine_knn
import collections
import numpy as np
import logging
from embedding import load_embedding
import operator
from sklearn.cluster import KMeans
from utils import length_normalize, normalize_questions, normalize_vector, calculate_cosine_simil, perf_measure
import sklearn.metrics
import argparse
import os
import datetime
class Question_Manager():
questions = []
questions_normalized = []
questions_vectors = []
keywords = collections.defaultdict()
embedding = None
def __init__(self, embedding_path='/home/iker/Documents/QuestionCluster/TechEmbeddings/embeddings_lower.vec'):
self.questions = []
self.questions_normalized = []
self.questions_vectors = []
self.keywords = collections.defaultdict()
self.embedding = load_embedding(embedding_path)
def get_keywords(self):
return sorted(self.keywords.items(), key=operator.itemgetter(1), reverse=True)
def question_to_vector(self, question, prefix=False):
sentence = np.zeros([self.embedding.dims])
num_words = 0
for word in question:
try:
if prefix:
sentence += self.embedding.word_to_vector(prefix+'/'+word)
else:
sentence += self.embedding.word_to_vector(word)
num_words += 1
except KeyError as r:
continue
if num_words > 0:
sentence = sentence / num_words
else:
logging.warning('Could not calculate the sentence embedding fot the sentence ' + str(question))
return sentence
def update_keyword_for_sentence(self, question):
for word in question:
try:
self.keywords[word] += 1
except KeyError as er:
self.keywords[word] = 1
def print_question(self, question, path='questions.txt'):
with open(path, 'a') as file:
print(str(question), file=file)
def add_question(self, question):
normalized_question = normalize_questions(question)
question_vector = self.question_to_vector(normalized_question)
if len(normalized_question) > 0 and question_vector is not None:
self.questions.append(question)
self.questions_normalized.append(normalized_question)
self.questions_vectors.append(question_vector)
self.update_keyword_for_sentence(normalized_question)
self.print_question(question)
def load_form_file(self, path):
with open(path, 'r') as file:
for line in file:
self.add_question(line)
def clustering(self, n_clusters=8):
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(np.array(self.questions_vectors))
cluster_centers = kmeans.cluster_centers_
labels = kmeans.labels_
questions_cluster = [[] for x in range(n_clusters)]
for i_label, label in enumerate(kmeans.predict(np.array(self.questions_vectors))):
questions_cluster[label].append(self.questions[i_label])
return questions_cluster
def k_nearest(self, sentence, k=1):
vectors_norm = length_normalize(np.array(self.questions_vectors))
result = cosine_knn([self.question_to_vector(normalize_questions(sentence))], vectors_norm, k=k)
for s in result[0]:
print(self.questions[s])
def evaluate_similarity(self, question_file, threshold = 0.8, prefix =False):
question1 = []
question2 = []
gold_scores = []
with open(question_file) as file:
for line in file:
line = line.rstrip()
q1, q2, gold = line.split('\t')
question1.append(q1)
question2.append(q2)
gold_scores.append(int(gold))
question_vectors_1 = [self.question_to_vector(normalize_questions(x),prefix) for x in question1]
question_vectors_2 = [self.question_to_vector(normalize_questions(x), prefix) for x in question2]
scores = []
for i in range(len(question_vectors_1)):
if i % 10 == 0:
string = "<" + str(datetime.datetime.now()) + "> " + 'Evaluating Question Pairs: ' + str(
int(100 * ((i+10) / len(question_vectors_1)))) + '%'
print(string, end="\r")
score = calculate_cosine_simil(question_vectors_1[i], question_vectors_2[i])
if score > threshold:
scores.append(1)
else:
scores.append(0)
print()
result = sklearn.metrics.log_loss(gold_scores, scores)
TP, FP, TN, FN = perf_measure(gold_scores, scores)
acc = np.sum(np.array(gold_scores) == np.array(scores))/len(gold_scores)
print('Log Loss: ' + str(result))
print('Acc: ' + str(acc))
print('TP: ' + str(TP) + '\tFP: ' + str(FP) + '\tTN: ' + str(TN) + '\tFN: ' + str(FN))
print(scores)
print(gold_scores)
return result, acc, TP, FP, TN, FN
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--question_file', required=True, type=str)
parser.add_argument('-e', '--embedding', required=True, type=str)
#parser.add_argument('-t', '--threshold', default='0.8', type=float)
parser.add_argument('-p', '--prefix', type=str, default=None)
args = parser.parse_args()
qm = Question_Manager(embedding_path=args.embedding)
for threshold in [0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]:
print('===> Threshold: ' + str(threshold))
result = qm.evaluate_similarity(args.question_file, threshold, args.prefix)
if not os.path.exists('Results'):
os.makedirs('Results')
with open('Results/baseline.csv', 'a+') as file:
txtResults = str(args.embedding) + '\t' + str(threshold) + '\t' + str(result[0]) + '\t' + str(result[1]) + '\t' + \
str(result[2]) + '\t' + str(result[3]) + '\t' + str(result[4]) + '\t' + str(result[5])
print('%s' % (str(txtResults)), file=file)
|
nilq/baby-python
|
python
|
from Impromptu import *
|
nilq/baby-python
|
python
|
import pytch
from pytch import (
Sprite,
Stage,
Project,
when_green_flag_clicked,
when_this_sprite_clicked,
)
import random
# Click the balloons to pop them and score points
class BalloonStage(Stage):
Backdrops = [('midnightblue', 'library/images/stage/solid-midnightblue.png')]
# TODO: Improve how using a non-default backdrop works.
def __init__(self):
Stage.__init__(self)
self.switch_backdrop('midnightblue')
class Balloon(Sprite):
Costumes = [('balloon', 'library/images/balloon.png', 50, 80)]
Sounds = [('pop', 'library/sounds/pop.mp3')]
def __init__(self):
Sprite.__init__(self)
self.score = 0
def go_to_random_spot(self):
self.go_to_xy(random.randint(-200, 200),
random.randint(-150, 150))
@when_green_flag_clicked
def play_game(self):
self.score = 0
self.go_to_random_spot()
self.switch_costume('balloon')
self.show()
while True:
pytch.wait_seconds(3.0)
self.go_to_random_spot()
self.show()
@when_this_sprite_clicked
def pop(self):
self.start_sound('pop')
self.hide()
self.score += 1
project = pytch.Project()
project.register_stage_class(BalloonStage)
project.register_sprite_class(Balloon)
project.go_live()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.