content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
#from time import sleep class SessionHelper(): def __init__(self, app): self.app = app def login(self, user_email, password): driver = self.app.driver self.app.open_page() #driver.find_element_by_id("email").click() driver.find_element_by_id("email").send_keys(user_email) driver.find_element_by_id("password").send_keys(password) # driver.find_element_by_id("password").click() driver.find_element_by_xpath("//input[@value='SIGN IN']").click() def logout(self): driver = self.app.driver driver.find_element_by_id("c1-user-text").click() driver.find_element_by_id("c1-menu-logout").click() #driver.getCurrentUrl() def ensure_logout(self): driver = self.app.driver if self.is_logged_in(): self.logout() def is_logged_in(self): driver = self.app.driver #sleep(1) return len(driver.find_elements_by_id("c1-user-text")) > 0 def is_logged_in_as(self, user_email): driver = self.app.driver return driver.find_element_by_id("c1-user-text").get_attribute("title") == user_email def ensure_login(self, user_email, password): #driver = self.app.driver if self.is_logged_in(): if self.is_logged_in_as(user_email): return else: self.logout() self.login(user_email, password)
nilq/baby-python
python
#!/bin/env python # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nmigen import Shape from nmigen.hdl.rec import Record, DIR_FANIN, DIR_FANOUT class _Endpoint: """Abstract base class for Sinks and Sources.""" def __init__(self, payload_type, name, src_loc_at): self.payload_type = payload_type self._record = Record([ ("valid", Shape(), DIR_FANOUT), ("ready", Shape(), DIR_FANIN), ("last", Shape(), DIR_FANOUT), ("payload", payload_type, DIR_FANOUT), ], src_loc_at=2+src_loc_at, name=name) self.valid = self._record.valid self.ready = self._record.ready self.last = self._record.last self.payload = self._record.payload def is_transferring(self): """Returns an expression that is true when a transfer takes place.""" return (self.valid & self.ready) class Source(_Endpoint): """A stream source. Parameters ---------- payload_type: Shape(N) or Layout The payload transferred from this Source. name: str Base for signal names. Attributes: ----------- payload_type: Shape(N) or Layout valid: Signal(1), out ready: Signal(1), in last: Signal(1), out payload: Signal(N) or Record, out """ def __init__(self, payload_type, name=None, src_loc_at=0): super().__init__(payload_type, name, src_loc_at) def connect(self, sink): """Returns a list of statements that connects this source to a sink. Parameters: sink: This Sink to which to connect. """ assert isinstance(sink, Sink) return self._record.connect(sink._record) class Sink(_Endpoint): """A stream sink Parameters ---------- payload: Signal(N) or Record The payload transferred to this Sink. name: str Base for signal names. Attributes: ----------- payload_type: Shape(N) or Layout valid: Signal(1), in ready: Signal(1), out last: Signal(1), in payload: Signal(N) or Record, in """ def __init__(self, payload_type, name=None, src_loc_at=0): super().__init__(payload_type, name, src_loc_at) def glue_sources(source_in: Source, source_out: Source): """Combinatorially glues two sources together. source_in is combinatorially glued to source_out. This is useful when exposing a submodule's Source as part of the interface of the current module. The two sources must have identical payload types. Parameters: source_in: The source that forms part of the submodule's interface. source_out: The source that forms part of the current module's interface. Result: A sequence of statements that connects the two sources. """ # Checking to catch simple mistakes assert isinstance(source_in, Source) assert isinstance(source_out, Source) assert source_in.payload_type == source_out.payload_type return [ source_in.ready.eq(source_out.ready), source_out.valid.eq(source_in.valid), source_out.last.eq(source_in.last), source_out.payload.eq(source_in.payload), ] def glue_sinks(sink_in: Sink, sink_out: Sink): """Combinatorially glues two sinks together. sink_in is combinatorially glued to sink_out. This is useful when exposing a submodule's Sink as part of the interface of the current module. The two sinks must have identical payload types. Parameters: sink_in: The sink that forms part of the current module's interface. sink_out: The sink that forms part of the submodule's interface. Result: A sequence of statements that connects the two sinks. """ # Checking to catch simple mistakes assert isinstance(sink_in, Sink) assert isinstance(sink_out, Sink) assert sink_in.payload_type == sink_out.payload_type return [ sink_in.ready.eq(sink_out.ready), sink_out.valid.eq(sink_in.valid), sink_out.last.eq(sink_in.last), sink_out.payload.eq(sink_in.payload), ]
nilq/baby-python
python
# vim: set tabstop=4 shiftwidth=4 expandtab ############################################################################## # Written by: Brian G. Merrell <bgmerrell@novell.com> # Date: 12/03/2008 # Description: helpprovider.py wrapper script # Used by the helpprovider-*.py tests ############################################################################## '''Application wrapper for helpprovider.py''' from strongwind import * from helpers import * import sys # class to represent the main window. class HelpProviderFrame(accessibles.Frame): STREET_TIP = "Enter the street address in this text box." CITY_TIP = "Enter the city here." STATE_TIP = "Enter the state in this text box." ZIP_TIP = "Enter the zip code here." def __init__(self, accessible): super(HelpProviderFrame, self).__init__(accessible) self.text_boxes = self.findAllTexts(None) try: self.street_text_box = self.text_boxes[3] self.city_text_box = self.text_boxes[2] self.state_text_box = self.text_boxes[1] self.zip_text_box = self.text_boxes[0] except IndexError, e: print "Could not find all the expected text boxes" print e sys.exit(1) def assert_tooltip_appeared(self, message): procedurelogger.action("Verify that a tooltip appears and that it has the correct message. Also verify that no other tooltip accessibles are found") procedurelogger.expectedResult("Tooltip appears and reads: \"%s\"" % message) # verify that we can only find one tooltip tooltips = self.app.findAllToolTips(None) assert len(tooltips) == 1, "Only one tooltip accessible should exist" # verify that the tooltip has the message we expect tooltip = tooltips[0] assert tooltip.name == message, \ "The tooltip does not have the expected message" # check the state of the tooltip just for fun statesCheck(tooltip, "ToolTip") def assert_descriptions(self): # Make sure that the accessible description for each text box matches # the tooltip message for that text box. This could be done from # assert_tooltip_appeared, but this allows a lot of tests to run even # if this assertion fails for text_box in self.text_boxes: procedurelogger.action("Click in %s" % text_box) text_box.mouseClick() self.keyCombo("F1") sleep(config.SHORT_DELAY) procedurelogger.expectedResult("A tooltip appears for %s" % \ text_box) tooltip = self.app.findAllToolTips(None)[0] #BUG487859, COMMENTING OUT TEST BECAUSE BUG IS AN ENHANCEMENT #procedurelogger.action("Verify that the accessible description for the text box matches the text box's tooltip message.") #procedurelogger.expectedResult("The accessible description \"%s\" matches the tooltip message \"%s\"" % (text_box.description, tooltip.name)) #assert text_box.description == tooltip.name #END BUG487859 # close sample application after running the test def quit(self): self.altF4()
nilq/baby-python
python
import warnings import numpy as np from scipy._lib.six import callable, string_types from scipy._lib.six import xrange from scipy.spatial import _distance_wrap from scipy.linalg import norm import MyTimer _SIMPLE_CDIST = {} def _copy_array_if_base_present(a): """ Copies the array if its base points to a parent array. """ if a.base is not None: return a.copy() elif np.issubsctype(a, np.float32): return np.array(a, dtype=np.double) else: return a def _convert_to_double(X): if X.dtype != np.double: X = X.astype(np.double) if not X.flags.contiguous: X = X.copy() return X def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None): timer = MyTimer.MyTimerCLS() timer.refresh('enter cidst') XA = np.asarray(XA, order='c') XB = np.asarray(XB, order='c') timer.refresh('asarray') # The C code doesn't do striding. XA = _copy_array_if_base_present(_convert_to_double(XA)) XB = _copy_array_if_base_present(_convert_to_double(XB)) timer.refresh('_copy_array_if_base_present') s = XA.shape sB = XB.shape timer.refresh('get shape') if len(s) != 2: raise ValueError('XA must be a 2-dimensional array.') if len(sB) != 2: raise ValueError('XB must be a 2-dimensional array.') if s[1] != sB[1]: raise ValueError('XA and XB must have the same number of columns ' '(i.e. feature dimension.)') timer.refresh('error check') mA = s[0] mB = sB[0] n = s[1] timer.refresh('get dim') dm = np.zeros((mA, mB), dtype=np.double) timer.refresh(' np.zeros ') if callable(metric): if metric == minkowski: for i in xrange(0, mA): for j in xrange(0, mB): dm[i, j] = minkowski(XA[i, :], XB[j, :], p) elif metric == wminkowski: for i in xrange(0, mA): for j in xrange(0, mB): dm[i, j] = wminkowski(XA[i, :], XB[j, :], p, w) elif metric == seuclidean: for i in xrange(0, mA): for j in xrange(0, mB): dm[i, j] = seuclidean(XA[i, :], XB[j, :], V) elif metric == mahalanobis: for i in xrange(0, mA): for j in xrange(0, mB): dm[i, j] = mahalanobis(XA[i, :], XB[j, :], V) else: for i in xrange(0, mA): for j in xrange(0, mB): dm[i, j] = metric(XA[i, :], XB[j, :]) timer.refresh(' if callable ') print 'cool' elif isinstance(metric, string_types): mstr = metric.lower() timer.refresh('else') try: validate, cdist_fn = _SIMPLE_CDIST[mstr] XA = validate(XA) XB = validate(XB) cdist_fn(XA, XB, dm) return dm except KeyError: pass timer.refresh(' try') if mstr in ['hamming', 'hamm', 'ha', 'h']: if XA.dtype == bool: XA = _convert_to_bool(XA) XB = _convert_to_bool(XB) _distance_wrap.cdist_hamming_bool_wrap(XA, XB, dm) else: XA = _convert_to_double(XA) XB = _convert_to_double(XB) _distance_wrap.cdist_hamming_wrap(XA, XB, dm) elif mstr in ['jaccard', 'jacc', 'ja', 'j']: if XA.dtype == bool: XA = _convert_to_bool(XA) XB = _convert_to_bool(XB) _distance_wrap.cdist_jaccard_bool_wrap(XA, XB, dm) else: XA = _convert_to_double(XA) XB = _convert_to_double(XB) _distance_wrap.cdist_jaccard_wrap(XA, XB, dm) elif mstr in ['minkowski', 'mi', 'm', 'pnorm']: timer.refresh('before _convert_to_double') XA = _convert_to_double(XA) XB = _convert_to_double(XB) timer.refresh('_convert_to_double') _distance_wrap.cdist_minkowski_wrap(XA, XB, dm, p) timer.refresh('after minkowski') elif mstr in ['wminkowski', 'wmi', 'wm', 'wpnorm']: timer.refresh('before _convert_to_double') XA = _convert_to_double(XA) XB = _convert_to_double(XB) timer.refresh('_convert_to_double') w = _convert_to_double(w) _distance_wrap.cdist_weighted_minkowski_wrap(XA, XB, dm, p, w) elif mstr in ['seuclidean', 'se', 's']: XA = _convert_to_double(XA) XB = _convert_to_double(XB) if V is not None: V = np.asarray(V, order='c') if V.dtype != np.double: raise TypeError('Variance vector V must contain doubles.') if len(V.shape) != 1: raise ValueError('Variance vector V must be ' 'one-dimensional.') if V.shape[0] != n: raise ValueError('Variance vector V must be of the same ' 'dimension as the vectors on which the ' 'distances are computed.') # The C code doesn't do striding. VV = _copy_array_if_base_present(_convert_to_double(V)) else: VV = np.var(np.vstack([XA, XB]), axis=0, ddof=1) _distance_wrap.cdist_seuclidean_wrap(XA, XB, VV, dm) elif mstr in ['cosine', 'cos']: XA = _convert_to_double(XA) XB = _convert_to_double(XB) _cosine_cdist(XA, XB, dm) elif mstr in ['correlation', 'co']: XA = _convert_to_double(XA) XB = _convert_to_double(XB) XA -= XA.mean(axis=1)[:, np.newaxis] XB -= XB.mean(axis=1)[:, np.newaxis] _cosine_cdist(XA, XB, dm) elif mstr in ['mahalanobis', 'mahal', 'mah']: XA = _convert_to_double(XA) XB = _convert_to_double(XB) if VI is not None: VI = _convert_to_double(np.asarray(VI, order='c')) VI = _copy_array_if_base_present(VI) else: m = mA + mB if m <= n: # There are fewer observations than the dimension of # the observations. raise ValueError("The number of observations (%d) is too " "small; the covariance matrix is " "singular. For observations with %d " "dimensions, at least %d observations " "are required." % (m, n, n + 1)) X = np.vstack([XA, XB]) V = np.atleast_2d(np.cov(X.T)) del X VI = np.linalg.inv(V).T.copy() # (u-v)V^(-1)(u-v)^T _distance_wrap.cdist_mahalanobis_wrap(XA, XB, VI, dm) elif metric == 'test_euclidean': dm = cdist(XA, XB, euclidean) elif metric == 'test_seuclidean': if V is None: V = np.var(np.vstack([XA, XB]), axis=0, ddof=1) else: V = np.asarray(V, order='c') dm = cdist(XA, XB, lambda u, v: seuclidean(u, v, V)) elif metric == 'test_sqeuclidean': dm = cdist(XA, XB, lambda u, v: sqeuclidean(u, v)) elif metric == 'test_braycurtis': dm = cdist(XA, XB, braycurtis) elif metric == 'test_mahalanobis': if VI is None: X = np.vstack([XA, XB]) V = np.cov(X.T) VI = np.linalg.inv(V) X = None del X else: VI = np.asarray(VI, order='c') VI = _copy_array_if_base_present(VI) # (u-v)V^(-1)(u-v)^T dm = cdist(XA, XB, (lambda u, v: mahalanobis(u, v, VI))) elif metric == 'test_canberra': dm = cdist(XA, XB, canberra) elif metric == 'test_cityblock': dm = cdist(XA, XB, cityblock) elif metric == 'test_minkowski': dm = cdist(XA, XB, minkowski, p=p) elif metric == 'test_wminkowski': dm = cdist(XA, XB, wminkowski, p=p, w=w) elif metric == 'test_correlation': dm = cdist(XA, XB, correlation) elif metric == 'test_hamming': dm = cdist(XA, XB, hamming) elif metric == 'test_jaccard': dm = cdist(XA, XB, jaccard) elif metric == 'test_chebyshev' or metric == 'test_chebychev': dm = cdist(XA, XB, chebyshev) elif metric == 'test_yule': dm = cdist(XA, XB, yule) elif metric == 'test_matching': dm = cdist(XA, XB, matching) elif metric == 'test_dice': dm = cdist(XA, XB, dice) elif metric == 'test_kulsinski': dm = cdist(XA, XB, kulsinski) elif metric == 'test_rogerstanimoto': dm = cdist(XA, XB, rogerstanimoto) elif metric == 'test_russellrao': dm = cdist(XA, XB, russellrao) elif metric == 'test_sokalsneath': dm = cdist(XA, XB, sokalsneath) elif metric == 'test_sokalmichener': dm = cdist(XA, XB, sokalmichener) else: raise ValueError('Unknown Distance Metric: %s' % mstr) else: raise TypeError('2nd argument metric must be a string identifier ' 'or a function.') timer.refresh('before return') return dm, timer
nilq/baby-python
python
class Color(object): RESET = '\x1b[0m' BLACK = 0 RED = 1 GREEN = 2 YELLOW = 3 BLUE = 4 MAGENTA = 5 CYAN = 6 WHITE = 7 NORMAL = 0 BOLD = 1 @staticmethod def to_color_string(string, foreground = 7, background = None, style = 1): style = '\x1b[0%sm' % style foreground = '\x1b[3%sm' % foreground background = '' if background is None else '\x1b[4%sm' % background preset = style + foreground + background colored = preset + string + Color.RESET return colored def warn(string): colored = Color.to_color_string(string, foreground = Color.YELLOW) return colored
nilq/baby-python
python
import os import unittest import pytest from github import GithubException from ogr import GithubService from ogr.abstract import PRStatus, IssueStatus from ogr.persistent_storage import PersistentObjectStorage from ogr.exceptions import GithubAPIException DATA_DIR = "test_data" PERSISTENT_DATA_PREFIX = os.path.join( os.path.dirname(os.path.realpath(__file__)), DATA_DIR ) class GithubTests(unittest.TestCase): def setUp(self): self.token = os.environ.get("GITHUB_TOKEN") self.user = os.environ.get("GITHUB_USER") test_name = self.id() or "all" persistent_data_file = os.path.join( PERSISTENT_DATA_PREFIX, f"test_github_data_{test_name}.yaml" ) PersistentObjectStorage().storage_file = persistent_data_file if PersistentObjectStorage().is_write_mode and ( not self.user or not self.token ): raise EnvironmentError("please set GITHUB_TOKEN GITHUB_USER env variables") self.service = GithubService(token=self.token) self.ogr_project = self.service.get_project( namespace="packit-service", repo="ogr" ) self.ogr_fork = self.service.get_project( namespace="packit-service", repo="ogr", is_fork=True ) self.hello_world_project = self.service.get_project( namespace="packit-service", repo="hello-world" ) self.not_forked_project = self.service.get_project( namespace="fedora-modularity", repo="fed-to-brew" ) def tearDown(self): PersistentObjectStorage().dump() class Comments(GithubTests): def test_pr_comments(self): pr_comments = self.ogr_project.get_pr_comments(9) assert pr_comments assert len(pr_comments) == 2 assert pr_comments[0].comment.endswith("fixed") assert pr_comments[1].comment.startswith("LGTM") def test_pr_comments_reversed(self): pr_comments = self.ogr_project.get_pr_comments(9, reverse=True) assert pr_comments assert len(pr_comments) == 2 assert pr_comments[0].comment.startswith("LGTM") def test_pr_comments_filter(self): pr_comments = self.ogr_project.get_pr_comments(9, filter_regex="fixed") assert pr_comments assert len(pr_comments) == 1 assert pr_comments[0].comment.startswith("@TomasTomecek") pr_comments = self.ogr_project.get_pr_comments( 9, filter_regex="LGTM, nicely ([a-z]*)" ) assert pr_comments assert len(pr_comments) == 1 assert pr_comments[0].comment.endswith("done!") def test_pr_comments_search(self): comment_match = self.ogr_project.search_in_pr(9, filter_regex="LGTM") assert comment_match assert comment_match[0] == "LGTM" comment_match = self.ogr_project.search_in_pr( 9, filter_regex="LGTM, nicely ([a-z]*)" ) assert comment_match assert comment_match[0] == "LGTM, nicely done" class GenericCommands(GithubTests): def test_description(self): description = self.ogr_project.get_description() assert description.startswith("One Git library to Rule") def test_branches(self): branches = self.ogr_project.get_branches() assert branches assert set(branches) == {"master"} def test_git_urls(self): urls = self.ogr_project.get_git_urls() assert urls assert len(urls) == 2 assert "git" in urls assert "ssh" in urls assert urls["git"] == "https://github.com/packit-service/ogr.git" assert urls["ssh"].endswith("git@github.com:packit-service/ogr.git") def test_username(self): # changed to check just lenght, because it is based who regenerated data files assert len(self.service.user.get_username()) > 3 def test_email(self): test_str = self.service.user.get_email() assert test_str assert len(test_str) > 0 assert "@" in test_str assert "." in test_str def test_get_file(self): file_content = self.ogr_project.get_file_content(".git_archival.txt") assert file_content assert isinstance(file_content, str) assert "ref-names:" in file_content def test_nonexisting_file(self): with self.assertRaises(FileNotFoundError): self.ogr_project.get_file_content(".blablabla_nonexisting_file") def test_parent_project(self): assert self.ogr_fork.parent.namespace == "packit-service" assert self.ogr_fork.parent.repo == "ogr" @unittest.skip("get_commit_flags not implemented") def test_commit_flags(self): flags = self.ogr_project.get_commit_flags( commit="29ca3caefc781b4b41245df3e01086ffa4b4639e" ) assert isinstance(flags, list) assert len(flags) == 0 def test_get_sha_from_tag(self): assert ( self.ogr_project.get_sha_from_tag("0.0.1") == "29ca3caefc781b4b41245df3e01086ffa4b4639e" ) with pytest.raises(GithubAPIException) as ex: self.ogr_project.get_sha_from_tag("future") assert "not found" in str(ex.value) def test_get_tag_from_tag_name(self): tag = self.ogr_project.get_tag_from_tag_name("0.0.1") assert tag.name == "0.0.1" assert tag.commit_sha == "29ca3caefc781b4b41245df3e01086ffa4b4639e" def test_get_tag_from_nonexisting_tag_name(self): assert not self.ogr_project.get_tag_from_tag_name("future") def test_get_owners(self): owners = self.ogr_project.get_owners() assert ["packit-service"] == owners def test_issue_permissions(self): users = self.ogr_project.who_can_close_issue() assert "lachmanfrantisek" in users issue = self.ogr_project.get_issue_info(1) assert self.ogr_project.can_close_issue("lachmanfrantisek", issue) assert not self.ogr_project.can_close_issue("marusinm", issue) def test_pr_permissions(self): users = self.ogr_project.who_can_merge_pr() assert "lachmanfrantisek" in users assert self.ogr_project.can_merge_pr("lachmanfrantisek") assert not self.ogr_project.can_merge_pr("marusinm") class Issues(GithubTests): def test_issue_list(self): issue_list = self.ogr_fork.get_issue_list() assert isinstance(issue_list, list) assert not issue_list issue_list_all = self.ogr_project.get_issue_list(status=IssueStatus.all) assert issue_list_all assert len(issue_list_all) >= 45 issue_list_closed = self.ogr_project.get_issue_list(status=IssueStatus.closed) assert issue_list_closed assert len(issue_list_closed) >= 35 issue_list = self.ogr_project.get_issue_list() assert issue_list assert len(issue_list) >= 3 def test_issue_info(self): issue_info = self.ogr_project.get_issue_info(issue_id=4) assert issue_info assert issue_info.title.startswith("Better name") assert issue_info.status == IssueStatus.closed def test_issue_labels(self): labels = self.ogr_project.get_issue_labels(issue_id=4) assert not labels self.ogr_project.add_issue_labels(issue_id=4, labels=["test_lb1", "test_lb2"]) labels = self.ogr_project.get_issue_labels(issue_id=4) assert len(labels) == 2 assert labels[0].name == "test_lb1" assert labels[1].name == "test_lb2" class PullRequests(GithubTests): def test_pr_list(self): pr_list = self.ogr_fork.get_pr_list() assert isinstance(pr_list, list) pr_list_all = self.ogr_project.get_pr_list(status=PRStatus.all) assert pr_list_all assert len(pr_list_all) >= 75 pr_list_closed = self.ogr_project.get_pr_list(status=PRStatus.closed) assert pr_list_closed assert len(pr_list_closed) >= 70 closed_pr_numbers = [] for closed_pr in pr_list_closed: closed_pr_numbers.append(closed_pr.id) assert 93 in closed_pr_numbers pr_list_merged = self.ogr_project.get_pr_list(status=PRStatus.merged) assert pr_list_merged assert len(pr_list_merged) >= 1 closed_pr_numbers = [] for closed_pr in pr_list_merged: closed_pr_numbers.append(closed_pr.id) assert 93 not in closed_pr_numbers pr_list = self.ogr_project.get_pr_list() assert pr_list assert len(pr_list) >= 1 def test_pr_info(self): pr_info = self.ogr_project.get_pr_info(pr_id=1) assert pr_info assert pr_info.title == "WIP: API" assert pr_info.status == PRStatus.merged def test_all_pr_commits(self): commits = self.ogr_project.get_all_pr_commits(pr_id=1) assert len(commits) == 3 assert commits[0] == "431f4a7c5cce24c3035b17c5131a3918ab989bd0" assert commits[2] == "5d6cc05d30ef0a0d69bb42bdcaad187408a070b0" def test_update_pr_info(self): pr_info = self.ogr_project.get_pr_info(pr_id=1) orig_title = pr_info.title orig_description = pr_info.description self.ogr_project.update_pr_info( pr_id=1, title="changed", description="changed description" ) pr_info = self.ogr_project.get_pr_info(pr_id=1) assert pr_info.title == "changed" assert pr_info.description == "changed description" self.ogr_project.update_pr_info( pr_id=1, title=orig_title, description=orig_description ) pr_info = self.ogr_project.get_pr_info(pr_id=1) assert pr_info.title == orig_title assert pr_info.description == orig_description def test_pr_labels(self): labels = self.ogr_project.get_pr_labels(pr_id=1) assert not labels self.ogr_project.add_pr_labels(pr_id=1, labels=["test_lb1", "test_lb2"]) labels = self.ogr_project.get_pr_labels(pr_id=1) assert len(labels) == 2 assert labels[0].name == "test_lb1" assert labels[1].name == "test_lb2" class Releases(GithubTests): def test_get_release(self): release = self.hello_world_project.get_release(tag_name="0.4.1") assert release.title == "test" assert release.body == "testing release" def test_get_releases(self): releases = self.ogr_project.get_releases() assert releases assert len(releases) >= 9 def test_create_release(self): count_before = len(self.hello_world_project.get_releases()) release = self.hello_world_project.create_release( tag="0.5.0", name="test", message="testing release" ) count_after = len(self.hello_world_project.get_releases()) assert release.tag_name == "0.5.0" assert release.title == "test" assert release.body == "testing release" assert count_before + 1 == count_after def test_edit_release(self): release = self.hello_world_project.get_release(tag_name="0.1.0") origin_name = release.title origin_message = release.body release.edit_release( name=f"{origin_name}-changed", message=f"{origin_message}-changed" ) assert release.title == f"{origin_name}-changed" assert release.body == f"{origin_message}-changed" def test_latest_release(self): release = self.ogr_project.get_latest_release() assert release.tag_name == "0.5.0" assert release.title == "0.5.0" assert "New Features" in release.body class Forks(GithubTests): def test_fork(self): assert self.ogr_fork.is_fork is True fork_description = self.ogr_fork.get_description() assert fork_description @unittest.skip( "not working with yaml file because it check exception within setup" ) def test_nonexisting_fork(self): self.ogr_nonexisting_fork = self.service.get_project( repo="omfeprkfmwpefmwpefkmwpeofjwepof", is_fork=True ) with self.assertRaises(GithubException) as ex: self.ogr_nonexisting_fork.get_description() s = str(ex.value.args) assert "Not Found" in s assert "404" in s def test_get_fork(self): fork = self.ogr_project.get_fork() assert fork assert fork.get_description() def test_is_fork(self): assert not self.ogr_project.is_fork is_forked = self.ogr_project.is_forked() assert isinstance(is_forked, bool) # `is True` is here on purpose: we want to be sure that .is_forked() returns True object # because Tomas had his crazy ideas and wanted to return GitProject directly, # stop that madman assert is_forked is True fork = self.ogr_project.get_fork(create=False) assert fork assert fork.is_fork def test_create_fork(self): not_existing_fork = self.not_forked_project.get_fork(create=False) assert not not_existing_fork assert not self.not_forked_project.is_forked() old_forks = self.not_forked_project.service.user.get_forks() self.not_forked_project.fork_create() assert self.not_forked_project.get_fork().get_description() assert self.not_forked_project.is_forked() new_forks = self.not_forked_project.service.user.get_forks() assert len(old_forks) == len(new_forks) - 1
nilq/baby-python
python
# django from django import forms # local django from exam.models import CustomExam from exam.validators import CustomExamValidator class CreateCustomExams(forms.ModelForm): """ Form to create a custom exam. """ description = forms.CharField(widget=forms.Textarea) class Meta: # Define model to form. model = CustomExam fields = ('description', 'name',) def clean(self): """ Get Custom Exam fields. """ description = self.cleaned_data.get('description') name = self.cleaned_data.get('name') # Verify validations in form. self.validator_all(description, name) def validator_all(self, description, name): """ Checks validator in all fields. """ validator = CustomExamValidator() # Fields common all users. validator.validator_name(name) validator.validator_description(description)
nilq/baby-python
python
# Generated by Django 2.2 on 2020-08-09 06:03 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coupons', '0003_coupon_max_discount'), ] operations = [ migrations.AlterField( model_name='coupon', name='max_discount', field=models.DecimalField(decimal_places=2, default=100, max_digits=6), ), ]
nilq/baby-python
python
import threading from Queue import Empty, Full from multiprocessing import Process, Queue, Value import datetime import os import zmq import logging from logging import handlers from platformData import * from BEMOSSThread import BThread, BProcess from commandProcessor import processCommand import cgitb cgitb.enable() #gives more detailed traceback main_logger = logging.getLogger("filelogger") main_logger.level = logging.DEBUG console_logger = logging.getLogger("consolelogger") console_logger.level = logging.INFO fileHandler = handlers.RotatingFileHandler(filename="BEMOSS.log",maxBytes=50000000,backupCount=10) #50 MB limit consoleHandler = logging.StreamHandler() formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s", "%Y-%m-%d %H:%M:%S") fileHandler.setFormatter(formatter) main_logger.handlers = [fileHandler] console_logger.handlers = [consoleHandler] main_logger.propagate = False console_logger.propagate = False changeLogFilterQ = Queue(10) def handelLogging(): filterheader = "" while True: source, header, level, message = logQueue.get() message = header +" :" + message try: newfilter = changeLogFilterQ.get(False) except Empty: pass else: filterheader = newfilter main_logger.log(level,message) if filterheader: if header.startswith(filterheader): console_logger.log(level,"filtering:" + filterheader + ": " + message) else: console_logger.log(level, message) def handleCommands(threadLock,stopFlag): while True: # Wait for next request from client print "Creating Socket" context = zmq.Context() rep_socket = context.socket(zmq.REP) rep_socket.bind(address) message = rep_socket.recv() print message if message == "Exit": stopFlag.Value = 1 break splitmessage = message.split(" ") if len(splitmessage) == 2 and splitmessage[0] == "filterlog": #update the console log filtering changeLogFilterQ.put(splitmessage[1]) print("Filter requested:" + splitmessage[1]) rep_socket.send(str("Filter Requested")) continue with threadLock: try: reply = processCommand(message) except Exception as ex: reply = "Problem executing command: " + str(type(ex)) + " " + str(ex) else: print "Command Processed: " + message if not reply: reply = "" rep_socket.send(str(reply)) print "Exiting handle commands Thread" stopFlag = Value('i',0) threadLock = threading.Lock() command_line_thread = BThread(target=handleCommands,args=(threadLock,stopFlag)) command_line_thread.id = -1 command_line_thread.name = "commandHandler" command_line_thread.daemon = True command_line_thread.start() logging_thread = BThread(target=handelLogging) logging_thread.id = -1 logging_thread.name = "loggingHandler" logging_thread.daemon = True logging_thread.start() start_time = datetime.datetime.now() print "****BEMOSS started****" print os.getpid() mainThread = threading.current_thread() mainThread.name = "MainBEMOSSThread" mainThread.id = 0 counter = 0 while not stopFlag.value: #check if there is any new messages in the outQueue of the agents try: source,destinations,topic,message = outQueue.get(True,1) for destination in destinations: if destination in inQueues_dict: try: #for each destination, put the message in the destination's inQueue inQueues_dict[destination].put((source, topic,message), False) except Full: _ = inQueues_dict[destination].get() #if destination inQueue is full, remove old, and put inQueues_dict[destination].put((source, topic, message), False) print(destination + " QueueFull") raise elif destination == "platformmanager": with threadLock: processCommand(topic + ' ' + message) except Empty: #continue # counter +=1 # if counter == 10: # counter = 0 # h = hpy() # print "\nPrinting Memory Usage" # info= h.heap() # print info.byvia pass time_diff = datetime.datetime.now() - start_time # if time_diff > datetime.timedelta(minutes=20): # break # time.sleep(0.1) print "BEMOSS exited"
nilq/baby-python
python
from tabulate import tabulate table = [['one','two','three'],['four','five','six'],['seven','eight','nine']] print(tabulate(table, tablefmt='html')) """Generate Report Function""" with open('example.log') as f: lines = f.readlines() print lines print(lines[2]) HTML_file= open("Report.html","w+") HTML_file.write("<html>\n <table border=1>\n <tr>\n <td>"+lines[2]+"</td>\n </tr> \n </table>\n </html>") print(tabulate(lines, tablefmt='html'))
nilq/baby-python
python
''' Environment simulators. ''' from models.simulator import POMDPSimulator from models.simulator_momdp import MOMDPSimulator from models.tiger import TigerPOMDP from models.rock_sample import RockSamplePOMDP from models.tools.belief import DiscreteBelief
nilq/baby-python
python
""" Uma matriz de confusão. Não confundir com uma tabela de confusão. A matrix de confusão possui mais do que uma duas linhas e duas colunas, por isso difere da tabela de confusão, que possui duas linhas e duas colunas Para criar a matriz de confusão escolhi o formato de dictionary da seguinte maneira: O dict - O primeiro nível do dictionary uma linha da matriz de confusão. """ class ConfusionMatrix: BETA = 1 def __init__(self, model, test_set): confusion_hash = {} possible_classes = test_set.get_uniq_classes() # { # "sim": { "sim": 3, "nao": 2 }, quando previu sim, 3 realmente eram sims, dois deveriam ser naos # "nao": { "sim": 2, "nao": 1 } # } for klass in possible_classes: confusion_hash[klass] = {} for klass_2 in possible_classes: confusion_hash[klass][klass_2] = 0 for example in test_set.examples: correct_klass = example.get_class() predicted_klass = model.predict(example) confusion_hash[predicted_klass][correct_klass] += 1 self.classes = possible_classes self.confusion_hash = confusion_hash def predictions_for(self, klass): return self.confusion_hash[klass].copy() def possible_classes(self): return self.classes.copy()
nilq/baby-python
python
## process_rootroopnft.py # first let's just check how many tweets it grabbed. with open("rootroopnft.txt", "r") as fid: line = fid.read() # end with open line = line.split("Tweet(url=") print("line[0]: ", line[0]) print("line[-1]: ", line[-1]) last_date = line[-1].split("date=datetime.datetime(")[1].split(", tzinfo=datetime.timezone.utc),")[0] print("last_date: ", last_date) # returned 2021, 11, 23, 23, 32, 3 (also the oldest tweet I was able to fetch) print("len line: ", len(line)) # returned 1484
nilq/baby-python
python
import cv2 import numpy as np class ColorTrack(): def __init__(self): pass def detect_green(self,frame): return self.detect_color(frame,np.array([33,80,40]),np.array([102, 255, 255])) def detect_red(self,frame): return self.detect_color(frame,np.array([78, 43, 46]), np.array([99, 255, 255])) def detect_color(self,frame,lower_bound,uper_bound): imgHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(imgHSV, lower_bound, uper_bound) kernelOpen = np.ones((5, 5)) kernelClose = np.ones((20, 20)) maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelOpen) maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose) maskFinal = maskClose conts, h = cv2.findContours(maskFinal.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) cv2.drawContours(imgHSV, conts, -1, (255, 0, 0), 3) max_x = 0 max_y = 0 max_w = 0 max_h = 0 max_area = 0 for i in range(len(conts)): x, y, w, h = cv2.boundingRect(conts[i]) if w * h > max_area: max_x = x max_y = y max_w = w max_h = h max_area = w * h return max_x, max_y, max_w, max_h,max_area
nilq/baby-python
python
from __future__ import print_function, absolute_import import sys import subprocess from distutils.errors import DistutilsPlatformError import semantic_version class Binding: """ Binding Options """ # https://github.com/PyO3/PyO3 PyO3 = 0 # https://github.com/dgrunwald/rust-cpython RustCPython = 1 # Bring your own binding NoBinding = 2 # Build executable Exec = 3 class Strip: """ Strip Options """ # do not strip symbols No = 0 # strip debug symbols Debug = 1 # strip all symbos All = 2 def cpython_feature(ext=True, binding=Binding.PyO3): version = sys.version_info if binding in (Binding.NoBinding, Binding.Exec): return () elif binding is Binding.PyO3: if (2, 7) < version < (2, 8): if ext: return ("pyo3/python2", "pyo3/extension-module") else: return ("pyo3/python2",) elif version > (3, 4): if ext: return ("pyo3/python3", "pyo3/extension-module") else: return ("pyo3/python3",) else: raise DistutilsPlatformError("Unsupported python version: %s" % sys.version) elif binding is Binding.RustCPython: if (2, 7) < version < (2, 8): if ext: return ("cpython/python27-sys", "cpython/extension-module-2-7") else: return ("cpython/python27-sys",) elif (3, 3) < version: if ext: return ("cpython/python3-sys", "cpython/extension-module") else: return ("cpython/python3-sys",) else: raise DistutilsPlatformError("Unsupported python version: %s" % sys.version) else: raise DistutilsPlatformError('Unknown Binding: "{}" '.format(binding)) def get_rust_version(): try: output = subprocess.check_output(["rustc", "-V"]) if isinstance(output, bytes): output = output.decode("latin-1") return semantic_version.Version(output.split(" ")[1], partial=True) except (subprocess.CalledProcessError, OSError): raise DistutilsPlatformError("Can not find Rust compiler") except Exception as exc: raise DistutilsPlatformError("Can not get rustc version: %s" % str(exc))
nilq/baby-python
python
# imports from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait from bs4 import BeautifulSoup import time import re import csv # easy function for viewing list def printlist(list): length=len(list) for i in range(length): print(list[i]) #url for page with links to all sas Viya procs by Viya product base_url='https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.5&docsetId=allprodsproc&docsetTarget=p1o1v16by0iotvn10m0jzzv9i3y8.htm&locale=en#' #retrieve the html from the list of all sas procs by product driver = webdriver.Safari() driver.get(base_url) time.sleep(10) soup = BeautifulSoup(driver.page_source,"lxml") driver.close() #print(soup) # Build the collect list: Product | Procedure | Procedure_Short | Procedure_Link bowl = soup.findAll(['h2','p'],attrs={'class':['xisDoc-title','xisDoc-paragraph']}) vcollect = [] vproduct = [] for spoon in bowl: if spoon.name=='h2' and "SAS Products" not in spoon.text: vproduct.append(spoon.text.strip()) if spoon.name=='p' and vproduct: block = spoon.find('a') if block: link = block.get('href') proc = ' '.join(block.text.split()) proc_short = proc.replace(': ',' ') # template shows up as template: because it has multiple links proc_short = proc_short.split(' ',1)[0] vcollect.append([vproduct[-1], proc, proc_short, link.strip()]) #keep the list of links for products and procedures in vdriver.csv header=["Product","Procedure","Procedure_Short","Procedure_Link"] with open("Projects/PROC overview/vdriver.csv", "w", newline="") as f: writer = csv.writer(f) writer.writerow(header) writer.writerows(vcollect) f.close #remove the few cases where a product starts by listing another product (not a proc): as in "includes contents of product..." #store these separately for linking Viya and 9.4 Product clusters prodlink = [] for idx, item in enumerate(vcollect): if item[1] in product: prodlink.append(vcollect[idx]) del vcollect[idx] #keep the list of links between 9.4 and viya products in prodlink.csv header=["Product","Procedure","Procedure_Short","Procedure_Link"] with open("Projects/PROC overview/prodlink.csv", "w", newline="") as f: writer = csv.writer(f) writer.writerow(header) writer.writerows(prodlink) f.close # url with viya products with action sets base_url='https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.5&docsetId=allprodsactions&docsetTarget=actionSetsByProduct.htm&locale=en' #retrieve the html from the list of all sas procs by product driver = webdriver.Safari() driver.get(base_url) time.sleep(10) soup = BeautifulSoup(driver.page_source,"lxml") driver.close() #print(soup) # Build the collect list: Product | Procedure | Procedure_Short | Procedure_Link bowl = soup.findAll('div',attrs='xisDoc-toc_1 ng-scope') #printlist(bowl) adriver = [] for spoon in bowl: adriver.append([spoon.text,spoon.find('a').get('href')]) #printlist(adriver) #keep the list of links for actions in adriver.csv header=["Product","Product_Link"] with open("Projects/PROC overview/adriver.csv", "w", newline="") as f: writer = csv.writer(f) writer.writerow(header) writer.writerows(adriver) f.close # cycle through each product with actions and get list of actions by product - save to acollect.csv driver = webdriver.Safari() acollect = [] # Product | ActionSet | ActionSet_Describe | ActionSet_Link | ActionSet_LinkText for row in adriver: driver.get(row[1]) time.sleep(10) action_soup = BeautifulSoup(driver.page_source,"lxml") bowl = action_soup.findAll('tr') for spoon in bowl: sip = spoon.findAll('td') if len(sip) == 3: acollect.append([row[0],sip[1].text.strip(),' '.join(sip[2].text.split()),sip[0].find('a').get('href').strip(),' '.join(sip[0].text.split())]) #print(' '.join(sip[0].text.split()),sip[0].find('a').get('href').strip(),sip[1].text.strip(),' '.join(sip[2].text.split())) driver.close() #keep the list of links for actions in acollect.csv header=["Product","ActionSet","ActionSet_Describe","ActionSet_Link","ActionSet_LinkText"] with open("Projects/PROC overview/acollect.csv", "w", newline="") as f: writer = csv.writer(f) writer.writerow(header) writer.writerows(acollect) f.close #url for page with links to all sas procs by product #base_url='https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=allprodsproc&docsetTarget=p1vzipzy6l8so0n1gbbh3ae63czb.htm&locale=en' base_url='https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.5&docsetId=allprodsproc&docsetTarget=p1vzipzy6l8so0n1gbbh3ae63czb.htm&locale=en' #retrieve the html from the list of all sas procs by product driver = webdriver.Safari() driver.get(base_url) time.sleep(10) soup = BeautifulSoup(driver.page_source,"lxml") driver.close() #print(soup) # Build the collect list: Product | Procedure | Procedure_Short | Procedure_Link bowl = soup.findAll(['h2','p'],attrs={'class':['xisDoc-title','xisDoc-paragraph']}) collect = [] product = [] for spoon in bowl: #print('line - ', spoon) if spoon.name=='h2' and "SAS Products" not in spoon.text: product.append(spoon.text.strip()) if spoon.name=='p' and product: block = spoon.find('a') if block: link = block.get('href') proc = ' '.join(block.text.split()) proc_short = proc.replace(': ',' ') # template shows up as template: because it has multiple links proc_short = proc_short.split(' ',1)[0] collect.append([product[-1], proc, proc_short, link.strip()]) #remove the few cases where a product starts by listing another product (not a proc): as in "includes contents of product..." for idx, item in enumerate(collect): if item[1] in product: del collect[idx] #print(collect) #keep the list of links for products and procedures in driver.csv header=["Product","Procedure","Procedure_Short","Procedure_Link"] with open("Projects/PROC overview/driver.csv", "w", newline="") as f: writer = csv.writer(f) writer.writerow(header) writer.writerows(collect) f.close # cycle through products, visit pages, look for links to overview and comparisons #build a list of procedures procedures = [] for row in collect: if row[2] not in procedures: procedures.append(row[2]) #printlist(procedures) #function to see check if link is for desired purpose and if it needs stump url def check_addstump(link,stump): link=link.strip() if link.startswith('http'): return link else: return stump + link # cycle through procedure links, check for overview and contrasted links: Collect = Product | Procedure | Procedure_Short | Procedure_Link | Overview_Link | Compared_Link comp_stump='https://documentation.sas.com' driver = webdriver.Safari() #collect = collect[393:397] #subset for testing #collect = collect[290:296] #subset for testing for row in collect: driver.get(row[3]) time.sleep(10) proc_soup = BeautifulSoup(driver.page_source,"lxml") for proc_link in proc_soup.find_all('a'): if ("Overview" in proc_link.text) and proc_link.get('href'): row.append(check_addstump(proc_link.get('href'),comp_stump)) if len(row) != 5: row.append('') for proc_link in proc_soup.find_all('a'): comps=["Contrasted","Compared"] if any(comp in proc_link.text for comp in comps) and proc_link.get('href'): row.append(check_addstump(proc_link.get('href'),comp_stump)) if len(row) !=6: row.append('') #printlist(collect) #keep the incompete collect list to run again from here: header=["Product","Procedure","Procedure_Short","Procedure_Link","Overview_Link","Compared_Link"] with open("Projects/PROC overview/precollect.csv", "w", newline="") as f: writer = csv.writer(f) writer.writerow(header) writer.writerows(collect) f.close # get list of procs mentioned on overview/compared to pages when they exist: Collect = Product | Procedure | Procecure_Short | Procedure_Link | Overview_Link | Compared_Link | Compared_PROCS (list) header=["Product","Procedure","Procedure_Short","Procedure_Link","Overview_Link","Compared_Link",'Compared_PROCS'] with open("Projects/PROC overview/collect.csv", "w", newline="") as f: writer = csv.writer(f) writer.writerow(header) f.close for row in collect: row.append('') regex = r"\b[A-Z][A-Z]+\b" compared_procs = [] if row[5]: # get compared PROCs driver.get(row[5]) time.sleep(10) comp_soup = BeautifulSoup(driver.page_source,"lxml") for comp_link in comp_soup.find_all('p'): for match in re.finditer(regex, comp_link.text): if (match.group() not in compared_procs) and (match.group() in procedures) and (match.group() != row[2]): #not already found, is in full list, not the current proc compared_procs.append(match.group()) row[6]=match.group() with open("Projects/PROC overview/collect.csv","a") as f: writer = csv.writer(f) writer.writerow(row) if row[4]: # get overview PROCs - only keep ones not already covered in compared driver.get(row[4]) time.sleep(15) comp_soup = BeautifulSoup(driver.page_source,"lxml") for comp_link in comp_soup.find_all('p'): for match in re.finditer(regex, comp_link.text): if (match.group() not in compared_procs) and (match.group() in procedures) and (match.group() != row[2]): #not already found, is in full list, not the current proc compared_procs.append(match.group()) row[6]=match.group() with open("Projects/PROC overview/collect.csv","a") as f: writer = csv.writer(f) writer.writerow(row) if not compared_procs: with open("Projects/PROC overview/collect.csv","a") as f: writer = csv.writer(f) writer.writerow(row) driver.quit() #printlist(collect)
nilq/baby-python
python
"""The SquonkServer class handles get, post and delete requests against the squonk base_url using the SquonkAuth class to refresh the authentication token when required. """ import requests import json import logging from email.policy import default from collections import namedtuple try: from .SquonkAuth import SquonkAuth except: from SquonkAuth import SquonkAuth from collections import namedtuple # The search result. # A namedtuple. SearchResult = namedtuple('SearchResult', 'status_code message json') class SquonkException(Exception): """A basic exception used by the Squonk API """ pass class SquonkServer: def __init__(self, auth, base_url): # general settings self._base_url = base_url self._auth = auth logging.debug('SquonkServer created:'+self._base_url) # set a request def send(self,type,request,form_data=None): # Always try to refresh the access token. # The token is only refreshed if it is close to expiry. self._auth.check_token() token = self._auth.get_token() url = str(self._base_url + '/' + request) logging.debug('SEND:' + type + ' ' + url) response = None if type == 'get': headers = {'Authorization': str('bearer ' + token) } response = requests.get(url, headers=headers, verify=True, allow_redirects=True) else: if type == 'post': headers = {'Authorization': str('bearer ' + token), 'Content-Type': 'multipart/form'} response = requests.post(url, files=form_data, headers = headers ) else: if type == 'delete': headers = {'Authorization': str('bearer ' + token) } response = requests.delete(url, headers=headers, verify=True, allow_redirects=True) else: raise SquonkException('type must be get, post or delete') status_code = response.status_code logging.debug('GOT response '+str(status_code)) if not response.status_code in [200, 201]: if response.status_code == 404: print(response.text) else: print(response.content) return response
nilq/baby-python
python
# -*- coding: utf-8 -*- import logging from mathutils import Vector class BlockDef(object): class _BlockItem(object): def __init__(self, name="", color=(0, 0, 0), block_def=(35, None)): self._name = name self._color = color self._block_def = block_def @property def color(self): return self._color @property def block_def(self): return self._block_def BLOCK_LIST = ( _BlockItem( "White Wool", Vector((0.95, 0.95, 0.95)), (35, None) ), _BlockItem( "Orange Wool", Vector((0.92, 0.53, 0.25)), (35, 1) ), _BlockItem( "Magenta Wool", Vector((0.73, 0.31, 0.77)), (35, 2) ), _BlockItem( "Light Blue Wool", Vector((0.43, 0.55, 0.81)), (35, 3) ), _BlockItem( "Yellow Wool", Vector((0.77, 0.71, 0.11)), (35, 4) ), _BlockItem( "Lime Wool", Vector((0.23, 0.75, 0.18)), (35, 5) ), _BlockItem( "Pink Wool", Vector((0.84, 0.54, 0.62)), (35, 6) ), _BlockItem( "Grey Wool", Vector((0.26, 0.26, 0.26)), (35, 7) ), _BlockItem( "Light Grey Wool", Vector((0.62, 0.65, 0.65)), (35, 8) ), _BlockItem( "Cyan Wool", Vector((0.15, 0.46, 0.59)), (35, 9) ), _BlockItem( "Purple Wool", Vector((0.53, 0.23, 0.80)), (35, 10) ), _BlockItem( "Blue Wool", Vector((0.15, 0.20, 0.60)), (35, 11) ), _BlockItem( "Brown Wool", Vector((0.22, 0.30, 0.09)), (35, 12) ), _BlockItem( "Green Wool", Vector((0.22, 0.30, 0.09)), (35, 13) ), _BlockItem( "Red Wool", Vector((0.65, 0.17, 0.16)), (35, 14) ), _BlockItem( "Black Wool", Vector((0, 0, 0)), (35, 15) ), _BlockItem( "White Stained Clay", Vector((0.77, 0.65, 0.60)), (159, None) ), _BlockItem( "Orange Stained Clay", Vector((0.60, 0.31, 0.14)), (159, 1) ), _BlockItem( "Magenta Stained Clay", Vector((0.56, 0.33, 0.40)), (159, 2) ), _BlockItem( "Light Blue Stained Clay", Vector((0.44, 0.42, 0.54)), (159, 3) ), _BlockItem( "Yellow Stained Clay", Vector((0.69, 0.49, 0.13)), (159, 4) ), _BlockItem( "Lime Stained Clay", Vector((0.38, 0.44, 0.20)), (159, 5) ), _BlockItem( "Pink Stained Clay", Vector((0.63, 0.30, 0.31)), (159, 6) ), _BlockItem( "Gray Stained Clay", Vector((0.22, 0.16, 0.14)), (159, 7) ), _BlockItem( "Light Gray Stained Clay", Vector((0.53, 0.42, 0.38)), (159, 8) ), _BlockItem( "Cyan Stained Clay", Vector((0.34, 0.35, 0.36)), (159, 9) ), _BlockItem( "Purple Stained Clay", Vector((0.44, 0.25, 0.31)), (159, 10) ), _BlockItem( "Blue Stained Clay", Vector((0.27, 0.22, 0.33)), (159, 11) ), _BlockItem( "Brown Stained Clay", Vector((0.28, 0.19, 0.13)), (159, 12) ), _BlockItem( "Green Stained Clay", Vector((0.29, 0.32, 0.16)), (159, 13) ), _BlockItem( "Red Stained Clay", Vector((0.56, 0.24, 0.18)), (159, 14) ), _BlockItem( "Black Stained Clay", Vector((0.13, 0.08, 0.06)), (159, 15) ), _BlockItem( "Stone", Vector((0.47, 0.47, 0.47)), (1, None) ), _BlockItem( "Polished Granite", Vector((0.63, 0.44, 0.38)), (1, 2) ), _BlockItem( "Oak Wood Plank", Vector((0.66, 0.53, 0.34)), (5, None) ), _BlockItem( "Spruce Wood Plank", Vector((0.46, 0.34, 0.20)), (5, 1) ), _BlockItem( "Birch Wood Plank", Vector((0.79, 0.73, 0.49)), (5, 2) ), _BlockItem( "Jungle Wood Plank", Vector((0.64, 0.46, 0.31)), (5, 3) ), _BlockItem( "Acacia Wood Plank", Vector((0.59, 0.32, 0.17)), (5, 4) ), _BlockItem( "Sand", Vector((0.83, 0.78, 0.60)), (12, None) ), _BlockItem( "Red Sand", Vector((0.63, 0.32, 0.12)), (12, 1) ), _BlockItem( "Sponge", Vector((0.78, 0.78, 0.31)), (19, None) ), _BlockItem( "Sandstone", Vector((0.88, 0.85, 0.64)), (24, None) ), _BlockItem( "Gold Block", Vector((0.99, 0.99, 0.36)), (41, None) ), _BlockItem( "Iron Block", Vector((0.93, 0.93, 0.93)), (42, None) ), ) @staticmethod def find_nearest_color_block(target_color): min_dist = 10 min_index = 0 logging.debug("Target_color: {}".format(target_color.to_tuple())) for i, block in enumerate(BlockDef.BLOCK_LIST): dist = (block.color - target_color).length logging.debug(" i = {}, dist = {}".format(i, dist)) if dist < min_dist: min_index = i min_dist = dist logging.debug(" min_index is '{}'".format(min_index)) return BlockDef.BLOCK_LIST[min_index]
nilq/baby-python
python
from flask_wtf import FlaskForm from wtforms import StringField, DateField, SubmitField from wtforms.validators import DataRequired class QuestionsForm(FlaskForm): class Meta: csrf = False # Example of defining a field. A in depth description can be found. # field_name = FieldType(label, description="some description", validators=[]) question1 = StringField("Question 1", description="This is the form description for question number 1", validators=[DataRequired()]) question2 = StringField("q2", validators=[DataRequired()]) question3 = StringField("q3", validators=[DataRequired()]) question4 = StringField("q4", validators=[DataRequired()]) question5 = StringField("q5", validators=[DataRequired()]) question6 = StringField("q6", validators=[DataRequired()]) question7 = StringField("q7", validators=[DataRequired()])
nilq/baby-python
python
import numpy as np import torch import torch.nn as nn from torch.autograd import Variable from scipy.special import factorial2 class Hermite: def __init__(self, num_pol = 5): self.h = [] def h0(x): return torch.ones_like(x) self.h.append(h0) def h1(x): return x self.h.append(h1) def h2(x): return (x**2 - 1)/np.sqrt(np.math.factorial(2)) self.h.append(h2) def h3(x): return (x**3 - 3*x)/np.sqrt(np.math.factorial(3)) self.h.append(h3) def h4(x): return (x**4 - 6*(x**2) + 3)/np.sqrt(np.math.factorial(4)) self.h.append(h4) def h5(x): return (x**5 - 10*x**3 + 15*x)/np.sqrt(np.math.factorial(5)) self.h.append(h5) def h6(x): return (x**6 - 15*x**4 + 45*x**2 - 15)/np.sqrt(np.math.factorial(6)) self.h.append(h6) def h7(x): return (x**7 - 21*x**5 + 105*x**3 - 105*x)/np.sqrt(np.math.factorial(7)) self.h.append(h7) def h8(x): return (x**8 - 28*x**6 + 210*x**4 - 420*x**2 + 105)/np.sqrt(np.math.factorial(8)) self.h.append(h8) def h9(x): return (x**9 - 36*x**7 + 378*x**5 - 1260*x**3 + 945*x)/np.sqrt(np.math.factorial(9)) self.h.append(h9) def h10(x): return (x**10 - 45*x**8 + 630*x**6 - 3150*x**4 + 4725*x**2 - 945)/np.sqrt(np.math.factorial(10)) self.h.append(h10) self.bn1 = nn.BatchNorm2d(in_planes) def get_initializations(self, num_pol = 5, copy_fun = 'relu'): k = [] if copy_fun == 'relu': for n in range(num_pol): if n == 0: k.append(1.0/np.sqrt(2*np.pi)) #k.append(0.0) #k.append(0.3821) elif n == 1: k.append(1.0/2) #k.append(0.0) #k.append(0.3775) elif n == 2: k.append(1.0/np.sqrt(4*np.pi)) #k.append(0.0) #k.append(0.5535) elif n > 2 and n % 2 == 0: #c = 1.0 * np.math.factorial(np.math.factorial(n-3))**2 / np.sqrt(2*np.pi*np.math.factorial(n)) c = 1.0 * factorial2(n-3)**2 / np.sqrt(2*np.pi*np.math.factorial(n)) k.append(c) #k.append(0.0) #k.append(-0.4244) elif n >= 2 and n % 2 != 0: k.append(0.0) #k.append(0.2126) #k.append(0.0655) return k def get_vars(self, num_pol = 5, copy_fun = 'relu', seed = 1, dtype = torch.float32): torch.manual_seed(seed) if copy_fun == 'relu': k = self.get_initializations(num_pol, copy_fun) p = 0.00001*torch.randn(num_pol, requires_grad=True) + torch.tensor(k) p_param = torch.nn.Parameter(p) return p_param def hermite(self, x, k, num_pol = 5): evals = 0.0 for i in range(num_pol): #print('this', i) #print('a', k[i]) #print('b', self.h[i](x)) #print('a*b', k[i]*self.h[i](x)) #eval_c = k[i]*self.h[i](x) #if np.isnan(eval_c): # eval_c = 0. evals += k[i]*self.h[i](x) return evals def hermitePreBN(self, x, k, num_pol = 5): evals = [] for i in range(num_pol): evals.append(k[i]*self.h[i](x)) #print('this', i) #print('a', k[i]) #print('b', self.h[i](x)) #print('a*b', k[i]*self.h[i](x)) #eval_c = k[i]*self.h[i](x) #if np.isnan(eval_c): # eval_c = 0. return evals
nilq/baby-python
python
import numpy as np import math import cv2 class PSNR(): def __init__(self, range=1): self.range = range def __call__(self, img1, img2): mse = np.mean((img1 - img2) ** 2) return 20 * math.log10(self.range / math.sqrt(mse)) class SSIM(): def __init__(self, range=1): self.range = range def __call__(self, img1, img2): if not img1.shape == img2.shape: raise ValueError("Input images must have the same dimensions.") if img1.ndim == 2: # Grey or Y-channel image return self._ssim(img1, img2) elif img1.ndim == 3: if img1.shape[2] == 3: ssims = [] for i in range(3): ssims.append(self._ssim(img1, img2)) return np.array(ssims).mean() elif img1.shape[2] == 1: return self._ssim(np.squeeze(img1), np.squeeze(img2)) else: raise ValueError("Wrong input image dimensions.") def _ssim(self, img1, img2): C1 = (0.01 * self.range) ** 2 C2 = (0.03 * self.range) ** 2 img1 = img1.astype(np.float64) img2 = img2.astype(np.float64) kernel = cv2.getGaussianKernel(11, 1.5) window = np.outer(kernel, kernel.transpose()) mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] mu1_sq = mu1 ** 2 mu2_sq = mu2 ** 2 mu1_mu2 = mu1 * mu2 sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ( (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2) ) return ssim_map.mean()
nilq/baby-python
python
from excursion.sampler import * from excursion.models import ExcursionModel, SKLearnGP from excursion.acquisition import * from excursion.excursion import ExcursionProblem, ExcursionResult # # move this into the excursion result, unless we add scikit learn implementation # # def build_result(details: ExcursionProblem, acquisition, **kwargs): if kwargs['device'] == 'skcpu': X_pointsgrid = details.X_pointsgrid true_y = details.functions[0](X_pointsgrid) else: raise NotImplementedError("Only supports device 'SKCPU'") acquisition = acquisition # What if they passed in their own acq, then there is no string here. return ExcursionResult(ndim=details.ndim, thresholds=details.thresholds, true_y=true_y, invalid_region=details.invalid_region, X_pointsgrid=details.X_pointsgrid, X_meshgrid=details.X_meshgrid, rangedef=details.rangedef) def build_sampler(generator: str or SampleGenerator, **kwargs): """Build a default random sample generator. For the special generator called "random" the return value is None. Parameters ---------- generator : "random", "latin_sample", "latin_hypercube" or SampleGenerator instance" Should inherit from `skopt.sampler.SampleGenerator`. kwargs : dict Extra parameters provided to the generator at init time. """ if generator is None: generator = "random" elif isinstance(generator, str): generator = generator.lower() allowed_generator = ["random"] if generator not in allowed_generator: raise ValueError("Valid strings for the generator parameter " " are: 'latin', 'latin_hypercube', or 'random' not " "%s." % generator) elif not isinstance(generator, SampleGenerator): raise ValueError("generator has to be a SampleGenerator or str." "Got %s" % (str(type(generator)))) if isinstance(generator, str): if generator == "random": generator = RandomChoice() generator.set_params(**kwargs) return generator def build_acquisition_func(acq_function: str or AcquisitionFunction, **kwargs): """Build an acquisition function. For the special acq_function called "random" the return value is None. Parameters ---------- function : "MES", "PES", or AcquisitionFunction instance" Should inherit from `skopt.sampler.SampleGenerator`. kwargs : dict Extra parameters provided to the acq_function at init time. """ if acq_function is None: acq_function = "PES" elif isinstance(acq_function, str): acq_function = acq_function.lower() allowed_acq_funcs = ["pes"] if acq_function not in allowed_acq_funcs: raise ValueError("Valid strings for the acq_function parameter " " are: %s, not %s." % (",".join(allowed_acq_funcs), acq_function)) elif not isinstance(acq_function, AcquisitionFunction): raise TypeError("acq_function has to be an AcquisitionFunction. Got %s" % (str(type(acq_function)))) if isinstance(acq_function, str): if acq_function == "pes": acq_function = SKPES() acq_function.set_params(**kwargs) return acq_function def build_model(model: str or ExcursionModel, rangedef, **kwargs): """ Build an acquisition function. For the special acq_function called "random" the return value is None. Parameters ---------- model : "GPyTorchGP", "GridGP", or ExcursionModel instance" Should inherit from `excursion.models.ExcursionModel`. kwargs : dict Extra parameters provided to the acq_function at init time. """ if model is None: model = "sklearngp" elif isinstance(model, str): model = model.lower() allowed_models = ["sklearngp"] if model not in allowed_models: raise ValueError("Valid strings for the model parameter are: 'SKLearnGP' not %s." % model) elif not isinstance(model, ExcursionModel): raise TypeError("model has to be an ExcursionModel or str. Got %s" % (str(type(model)))) if isinstance(model, str): if model == "sklearngp": model = SKLearnGP(ndim=len(rangedef)) model.set_params(**kwargs) return model
nilq/baby-python
python
def settings (): # Input manually all extensions and copy settings extensions = [] key = "Y" while (key != "N"): extension = str(input("Enter a extension to search and organize: ")).strip().replace(".", "").lower() extensions.append(extension) key = str(input("Continue? Y/N: ")).strip().upper() answer = str(input("Would you like to copy all files? Yes/No: ")) available_copy = True if answer.strip().lower() == "yes" else False answer = str(input("Would you like to search recursively? Yes/No: ")) recursiveSearch = True if answer.strip().lower() == "yes" else False return extensions, available_copy, recursiveSearch
nilq/baby-python
python
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'resources/treeDialog.ui' # # Created by: PyQt5 UI code generator 5.13.0 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_TreeDialog(object): def setupUi(self, TreeDialog): TreeDialog.setObjectName("TreeDialog") TreeDialog.resize(912, 804) self.gridLayout = QtWidgets.QGridLayout(TreeDialog) self.gridLayout.setObjectName("gridLayout") self.groups = QtWidgets.QComboBox(TreeDialog) self.groups.setObjectName("groups") self.gridLayout.addWidget(self.groups, 0, 0, 1, 1) self.showButton = QtWidgets.QPushButton(TreeDialog) self.showButton.setObjectName("showButton") self.gridLayout.addWidget(self.showButton, 0, 1, 1, 1) self.lineEdit = QtWidgets.QLineEdit(TreeDialog) self.lineEdit.setObjectName("lineEdit") self.gridLayout.addWidget(self.lineEdit, 0, 2, 1, 1) self.label = QtWidgets.QLabel(TreeDialog) self.label.setText("") self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 0, 3, 1, 1) self.treeView = QtWidgets.QTreeView(TreeDialog) self.treeView.setObjectName("treeView") self.gridLayout.addWidget(self.treeView, 1, 0, 1, 3) self.tableWidget = QtWidgets.QTableWidget(TreeDialog) self.tableWidget.setObjectName("tableWidget") self.tableWidget.setColumnCount(0) self.tableWidget.setRowCount(0) self.gridLayout.addWidget(self.tableWidget, 1, 3, 1, 3) self.buttonBox = QtWidgets.QDialogButtonBox(TreeDialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 6) self.checkBox = QtWidgets.QCheckBox(TreeDialog) self.checkBox.setObjectName("checkBox") self.gridLayout.addWidget(self.checkBox, 0, 5, 1, 1) self.retranslateUi(TreeDialog) self.buttonBox.accepted.connect(TreeDialog.accept) self.buttonBox.rejected.connect(TreeDialog.reject) QtCore.QMetaObject.connectSlotsByName(TreeDialog) def retranslateUi(self, TreeDialog): _translate = QtCore.QCoreApplication.translate TreeDialog.setWindowTitle(_translate("TreeDialog", "Dialog")) self.showButton.setText(_translate("TreeDialog", "Show")) self.checkBox.setText(_translate("TreeDialog", "Select all"))
nilq/baby-python
python
# Crie um programa que leia algo e mostre seu tipo pimitivo e todas as informações possíveis sobre ele. print('=-'*7, 'DESAFIO 4', '=-'*7) n = input('Digite algo: ') print('O tipo primitivo desse valor é {}.'.format(type(n))) # Ponto de melhoria! print('Só tem espaços? {}'.format(n.isspace())) print('É um número? {}'.format(n.isnumeric())) print('É alfanumérico? {}'.format(n.isalnum())) print('É alfabético? {}'.format(n.isalpha())) print('Está em minúsculas? {}'.format(n.islower())) print('Está em maiúsculas? {}'.format(n.isupper())) print('Está capitalizada? {}'.format(n.istitle()))
nilq/baby-python
python
import sys import math import json class Point: def __init__(self, x, y, z, index): self.x = x self.y = y self.z = z self.index = index def getDist(a, b): return math.sqrt((a.x - b.x)*(a.x - b.x) + (a.y - b.y)*(a.y - b.y) + (a.z - b.z)*(a.z - b.z)) for arg in sys.argv: filename = arg input = open('../problems/input/tsp.txt', 'r') pts = [] for line in input: l = line.split() index = int(l[0]) x = int(l[1]) y = int(l[2]) z = int(l[3]) pts.append(Point(x, y, z, index)) nums = [] with open(filename) as fileIn: for line in fileIn: for w in line.split(' '): if len(w) > 0: try: nums.append(int(w)) except ValueError: print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"})) sys.exit(-1) for a in nums: if a > 500 or a < 1: print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"})) sys.exit(-1) beenTo = [] for a in range(0, 500): beenTo.append(False) dist = 0.0 for a in range(1, len(nums)): if beenTo[nums[a] - 1]: print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"})) sys.exit(-1) beenTo[nums[a] - 1] = True b = a - 1 dist += getDist(pts[nums[b] - 1], pts[nums[a] - 1]) if beenTo[nums[0] - 1]: print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"})) sys.exit(-1) beenTo[nums[0] - 1] = True dist += getDist(pts[nums[0] - 1], pts[nums[-1] - 1]) for a in beenTo: if not(a): print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"})) sys.exit(-1) print(json.dumps({"isError": False, "score": dist, "message": "You got a score of " + str(dist) + "!"}))
nilq/baby-python
python
from scipy import interp import numpy as np from itertools import cycle from sklearn.metrics import roc_curve, auc from sklearn.metrics import precision_recall_curve from sklearn.metrics import average_precision_score from itertools import cycle import matplotlib.pyplot as plt def plot_roc_pr( y_pred : np.ndarray, y_test : np.ndarray ) -> None: """ Plots the ROC + Precision recall curves for """ n_classes = y_test.shape[1] # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_pred.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves # First aggregate all false positive rates lw =2 all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(15,6)) ax1.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), color='gold', linestyle=':', linewidth=4) ax1.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["macro"]), color='deeppink', linestyle=':', linewidth=4) colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal']) for i, color in zip(range(n_classes), colors): ax1.plot(fpr[i], tpr[i], color=color, lw=lw, label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) ax1.plot([0, 1], [0, 1], 'k--', lw=lw) ax1.set_xlim([0.0, 1.0]) ax1.set_ylim([0.0, 1.05]) ax1.set_xlabel('False Positive Rate') ax1.set_ylabel('True Positive Rate') ax1.set_title('Extension of Receiver operating characteristic to multi-class') ax1.legend(loc="lower left") # ax1.show() # setup plot details precision = dict() recall = dict() average_precision = dict() for i in range(n_classes): precision[i], recall[i], _ = precision_recall_curve(y_test[:, i], y_pred[:, i]) average_precision[i] = average_precision_score(y_test[:, i], y_pred[:, i]) # A "micro-average": quantifying score on all classes jointly precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(), y_pred.ravel()) average_precision["micro"] = average_precision_score(y_test, y_pred, average="micro") colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal']) # plt.figure(figsize=(7, 8)) f_scores = np.linspace(0.2, 0.8, num=4) lines = [] labels = [] for f_score in f_scores: x = np.linspace(0.01, 1) y = f_score * x / (2 * x - f_score) l, = ax2.plot(recall["micro"], precision["micro"], color='gold', lw=2) lines.append(l) labels.append('micro-average Precision-recall (area = {0:0.2f})' ''.format(average_precision["micro"])) for i, color in zip(range(n_classes), colors): l, = ax2.plot(recall[i], precision[i], color=color, lw=2) lines.append(l) labels.append('Precision-recall for class {0} (area = {1:0.2f})' ''.format(i, average_precision[i])) # fig = plt.gcf() # fig.subplots_adjust(bottom=0.25) ax2.set_xlim([0.0, 1.0]) ax2.set_ylim([0.0, 1.05]) ax2.set_xlabel('Recall') ax2.set_ylabel('Precision') ax2.set_title('Extension of Precision-Recall curve to multi-class') ax2.legend(lines, labels) #, loc=(0, -.38), prop=dict(size=14))
nilq/baby-python
python
class withdelta(object): """ Wraps any object into the `value` property, and adds a `delta` floating point property that can be used to store extra information, such as percentage of improvement over a over a different values. All the attributes are forwarded to `value`, except for `value` and `delta`. This means that you can call any weird method on withdelta, and this will reflect the implementation of the current object stored in `value`. Use val_of to quickly unwrap any object from its withdelta wrapper. """ value = None delta = None def __getattr__(self, name): if name in ['value', 'delta']: return super(withdelta, self).__getattr__(name) else: return getattr(self.value, name) def __setattr__(self, name, value): if name in ['value', 'delta']: super(withdelta, self).__setattr__(name, value) else: setattr(self.value, name, value) def __repr__(self): return 'withdelta(' + str(self.value) + ', ' + str(self.delta) + ')' def __init__(self, obj, delta = float('NaN')): self.value = obj self.delta = delta def val_of(obj): """ Returns `obj.value` if obj is a withdelta instance, otherwise just obj. """ return obj.value if isinstance(obj, withdelta) else obj
nilq/baby-python
python
class Dummy(object): def purge(self, path): pass
nilq/baby-python
python
""" cluster_graph.py ClusterGraph are a class for tracking all possible smirks decorators in a group (or cluster) of molecular fragments. Moving forward these will be used to find the minimum number of smirks decorators that are required to have a set of smirks patterns that maintain a given clustering of fragments. """ import networkx as nx from functools import total_ordering from chemper.graphs.single_graph import SingleGraph from chemper.graphs.environment import ChemicalEnvironment as CE from chemper.mol_toolkits import mol_toolkit @total_ordering class ClusterGraph(SingleGraph): """ ChemPerGraphs are a graph based class for storing atom and bond information. They use the chemper.mol_toolkits Atoms, Bonds, and Mols """ @total_ordering class AtomStorage: """ AtomStorage tracks information about an atom """ def __init__(self, atoms=None, label=None): """ Parameters ---------- atoms : ChemPer Atom or list of ChemPer Atoms this is one or more atoms whose information should be stored label : int SMIRKS index (:n) for writing SMIRKS if the value is less than zero it is used for storage purposes only as SMIRKS can only be written with positive integer indices """ self.decorators = set() if atoms is not None: # check if this is a single atom if 'Atom' in str(type(atoms)): atoms = [atoms] # otherwise it should be iterable for atom in atoms: self.decorators.add(self.make_atom_decorators(atom)) self.label = label def __lt__(self, other): """ Overrides the default implementation This method was primarily written for making SMIRKS patterns predictable. If atoms are sortable, then the SMIRKS patterns are always the same making tests easier to write. However, the specific sorting was created to also make SMIRKS output as human readable as possible, that is to at least make it easier for a human to see how the indexed atoms are related to each other. It is typically easier for humans to read SMILES/SMARTS/SMIRKS with less branching (indicated with ()). For example in: [C:1]([H])([H])~[N:2]([C])~[O:3] it is easier to see that the atoms C~N~O are connected in a "line" instead of: [C:1]([N:2]([O:3])[C])([H])[H] which is equivalent, but with all the () it is hard for a human to read the branching Parameters ---------- other : AtomStorage Returns ------- is_less_than : boolean self is less than other """ # if either smirks index is None, then you can't directly compare # make a temporary index that is negative if it was None self_index = self.label if self.label is not None else -1000 other_index = other.label if other.label is not None else -1000 # if either index is greater than 0, the one that is largest should go at the end of the list if self_index > 0 or other_index > 0: return self_index < other_index # Both SMIRKS indices are not positive or None so compare the SMIRKS patterns instead return self.as_smirks() < other.as_smirks() def __eq__(self, other): return self.as_smirks() == other.as_smirks() and self.label == other.label def __hash__(self): return id(self) def __str__(self): return self.as_smirks() def make_atom_decorators(self, atom): """ extract information from a ChemPer Atom that would be useful in a smirks parameters ---------- atom : ChemPer atom object returns ------- decorators : tuple of str tuple of all possible decorators for this atom """ aromatic = 'a' if atom.is_aromatic() else 'A' charge = atom.formal_charge() if charge >= 0: charge = '+%i' % charge else: charge = '%i' % charge min_ring_size = atom.min_ring_size() if min_ring_size == 0: ring = '!r' else: ring = 'r%i' % min_ring_size return ( '#%i' % atom.atomic_number(), 'H%i' % atom.hydrogen_count(), 'X%i' % atom.connectivity(), 'x%i' % atom.ring_connectivity(), ring, charge, aromatic, ) def as_smirks(self, compress=False): """ Parameters ---------- compress : boolean should decorators common to all sets be combined for example '#6X4,#7X3;+0!r...' Returns ------- smirks : str how this atom would be represented in a SMIRKS string with the minimal combination of SMIRKS decorators """ if len(self.decorators) == 0: if self.label is None or self.label <= 0: return '[*]' return '[*:%i]' % self.label if compress and len(self.decorators) > 1: base_smirks = self._compress_smirks() else: base_smirks = ','.join(sorted([''.join(l) for l in self.decorators])) if self.label is None or self.label <= 0: return '[%s]' % base_smirks return '[%s:%i]' % (base_smirks, self.label) def _sort_decs(self, dec_set, wild=True): """ Parameters ---------- dec_set : list like single set of atom decorators wild : boolean insert * for decorator lists with no #n decorator Returns ------- sorted_dec_set : list same set of decorators sorted with atomic number or * first """ temp_dec_set = list(dec_set) atom_num = [i for i in temp_dec_set if '#' in i] if len(atom_num) == 0 and wild: atom_num = ["*"] temp_dec_set = set(temp_dec_set) - set(atom_num) aro = [i for i in temp_dec_set if 'a' in i.lower()] temp_dec_set = set(temp_dec_set) - set(aro) return atom_num + sorted(list(temp_dec_set)) + aro def _compress_smirks(self): """ Returns ------- smirks : str This SMIRKS is compressed with all common decorators and'd to the end of the pattern """ set_decs = [set(d) for d in self.decorators] ands = set_decs[0] for d_set in set_decs: ands = ands & d_set # check for atomic number in the "ands" atomic = [a for a in ands if '#' in a] if len(atomic) == 1: # remove from and ands.remove(atomic[0]) # put in all sets for s in set_decs: s.add(atomic[0]) or_sets = [self._sort_decs(d.difference(ands)) for d in set_decs] ors = [''.join(o) for o in or_sets] # add commas between ors base = ','.join(sorted(ors)) # add and decorators if len(ands) > 0: base += ';'+ ';'.join(self._sort_decs(ands, wild=False)) return base def add_atom(self, atom): """ Expand current AtomStorage by adding information about a new ChemPer Atom Parameters ---------- atom : ChemPer Atom """ self.decorators.add(self.make_atom_decorators(atom)) def compare_atom(self, atom): """ Compares decorators in this AtomStorage with the provided ChemPer atom. The decorators are compared separately and the highest score is returned. For example, if this storage had two sets of decorators - #7H1X3x0!r+0A - #6H1X4x0!r+0A and the input atom would have the decorators: - #6H1X3x2!r+0a The score is calculated by finding the number of decorators in common which would be - #7H1X3x0!r+0A and #6H1X3x2r6+0a have 3 decorators in common (H1,X3,+0) - #6H1X4x0!r+0A and #6H1X3x2r6+0a also have 3 decorators in common (#6, H1, +0) However, we weight atoms with the same atomic number as more similar by dividing the score by 10 if the atomic numbers do not agree. Therefore the final scores will be: - 0.3 for #7H1X3x0!r+0A - 3 for #6H1X4x0!r+0A The highest score for any set of decorators is returned so 3 is the returned score in this example. Parameters ---------- atom : ChemPer Atom Returns ------- score : float A score describing how similar the input atom is to any set of decorators currently in this storage, based on its SMIRKS decorators. This score ranges from 0 to 7. 7 comes from the number of decorators on any atom, if this atom matches perfectly with one of the current decorator sets then 7 decorators agree.However, if the atomic number doesn't agree, then that set of decorators is considered less ideal, thus if the atomic numbers don't agree, then the score is given by the number other decorators divided by 10. If the current storage is empty, then the score is given as 7 since any atom matches a wildcard atom. """ # If decorators is empty (no known atom information, return 7 (current max) if len(self.decorators) == 0: return 7 score = 0 decs = self.make_atom_decorators(atom) for ref in self.decorators: # get atomic number for this set of decorators current = len(set(ref) & set(decs)) # if atomic numbers don't agree, get the number of common decorators / 10 # if there are no matching atomic numbers, priority should still be given # when the current atom matches stored decorators most closely if ref[0] != decs[0]: current = current / 10.0 if current > score: score = current return score @total_ordering class BondStorage: """ BondStorage tracks information about a bond """ def __init__(self, bonds=None, label=None): """ Parameters ---------- bonds : list of ChemPer Bonds this is one or more bonds whose information should be stored label : a label for the object, it can be anything unlike atoms, bonds in smirks don't have labels so this is only used for labeling the object if wanted """ self.order = set() self.ring = set() self.order_dict = {1:'-', 1.5:':', 2:'=', 3:'#'} if bonds is not None: if 'Bond' in str(type(bonds)): bonds = [bonds] for bond in bonds: self.order.add(bond.get_order()) self.ring.add(bond.is_ring()) self.label = label def __str__(self): return self.as_smirks() def __lt__(self, other): if self.as_smirks() == other.as_smirks(): return self.label < other.label return self.as_smirks() < other.as_smirks() def __eq__(self, other): return self.label == other.label and self.as_smirks() == other.as__smirks() def __hash__(self): return id(self) def as_smirks(self): """ Returns ------- smirks : str how this bond would be represented in a SMIRKS string using only the required number of """ if len(self.order) == 0: order = '~' else: order = ','.join([self.order_dict.get(o, '~') for o in sorted(list(self.order))]) # the ring set has booleans, if the length of the set is 1 then only ring (@) or non-ring (!@) # bonds haven been added to this storage and we AND that decorator to the end of the bond if len(self.ring) == 1: if list(self.ring)[0]: return order+';@' else: return order+';!@' return order def add_bond(self, bond): """ Expand current BondStorage by adding information about a new ChemPer Bond Parameters ---------- bond : ChemPer Bond """ self.order.add(bond.get_order()) self.ring.add(bond.is_ring()) def compare_bond(self, bond): """ Parameters ---------- bond : ChemPer Bond bond you want to compare to the current storage Returns ------- score : int (0,1,2) A score describing how similar the input bond is to any set of decorators currently in this storage, based on its SMIRKS decorators. 1 for the bond order + 1 base on if this is a ring bond """ score = 0 if bond.get_order() in self.order or len(self.order) == 0: score += 1 # the ring set has booleans, if the length of the set is 1 then only ring or non-ring # bonds haven been added to this storage. That is the only time the ring contributes to the score if len(self.ring) == 1 and list(self.ring)[0] == bond.is_ring(): score += 1 return score # Initiate ClusterGraph def __init__(self, mols=None, smirks_atoms_lists=None, layers=0): """ Initialize a SingleGraph from a molecule and list of indexed atoms For the example, imagine we wanted to get a SMIRKS that would match the carbon-carbon bonds in ethane and propane. The carbon atoms are have indices (0,1) in ethane and (0,1) and (1,2) in propane. For this example, we will assume we also want to include the atoms one bond away from the indexed atoms (1 layer away). Parameters ---------- mols : list of molecules (optional) default = None (makes an empty graph) these can be ChemPer Mols or molecule objects from any supported toolkit (currently OpenEye or RDKit) smirks_atoms_lists : list of list of tuples (optional) default = None (must be paired with mols=None) There is a list of tuples for each molecule, where each tuple specifies a molecular fragment using the atoms' indices. In the ethane and propane example, the `smirks_atoms_lists` would be [ [ (0,1) ], [ (0,1), (1,2) ] ] with one carbon-carbon bond in ethane and two carbon-carbon bonds in propane layers : int (optional) default = 0 layers specifies how many bonds away from the indexed atoms should be included in the the SMIRKS patterns. Instead of an int, the string 'all' would lead to all atoms in the molecules being included in the SMIRKS (not recommended) """ SingleGraph.__init__(self) self.mols = list() self.smirks_atoms_lists = list() self.layers = layers self._symmetry_funct = self._no_symmetry if mols is not None: temp_mols = [mol_toolkit.Mol(m) for m in mols] if len(temp_mols) != len(smirks_atoms_lists): raise Exception('Number of molecules and smirks dictionaries should be equal') for idx, mol in enumerate(temp_mols): self.add_mol(mol, smirks_atoms_lists[idx]) def as_smirks(self, compress=False): """ Parameters ---------- compress : boolean returns the shorter version of atom SMIRKS patterns that is atoms have decorators "anded" to the end rather than listed in each set that are OR'd together. For example "[#6AH2X3x0!r+0,#6AH1X3x0!r+0:1]-;!@[#1AH0X1x0!r+0]" compresses to: "[#6H2,#6H1;AX3x0!r+0:1]-;!@[#1AH0X1x0!r+0]" Returns ------- SMIRKS : str a SMIRKS string matching the exact atom and bond information stored """ # The atom compression is different, but otherwise this is the # same function as the parent class (SingleGraph) return SingleGraph.as_smirks(self, compress) def get_symmetry_funct(self, sym_label): """ Determine the symmetry function that should be used when adding atoms to this graph. For example, imagine a user is trying to make a SMIRKS for all of the C-H bonds in methane. In most toolkits the index for the carbon is 0 and the hydrogens are 1,2,3,4. The final SMIRKS should have the form [#6AH4X4x0!r+0:1]-;!@[#1AH0X1x0!r+0] no matter what order the atoms are input into ClusterGraph. So if the user provides (0,1), (0,2), (3,0), (4,0) ClusterGraph should figure out that the carbons in (3,0) and (4,0) should be in the atom index :1 place like they were in the first set of atoms. Bond atoms in (1,2) or (2,1) are symmetric, for angles its (1,2,3) or (3,2,1) for proper torsions (1,2,3,4) or (4,3,2,1) and for improper torsions (1,2,3,4), (3,2,1,4), (4,2,1,3). For any other fragment type the atoms will be added to the graph in the order they are provided since the symmetry function is unknown. # TODO: In theory you could generalize this for generic linear fragments # where those with an odd number of atoms behave like angles and an # even number behave like proper torsions, however I think that is # going to be outside the scope of ChemPer for the foreseeable future. Parameters ---------- sym_label : str or None type of symmetry, options which will change the way symmetry is handled in the graph are "bond", "angle", "ProperTorsion", and "ImproperTorsion" Returns ------- symmetry_funct : function returns the function that should be used to handle the appropriate symmetry """ if sym_label is None: return self._no_symmetry if sym_label.lower() == 'bond': return self._bond_symmetry if sym_label.lower() == 'angle': return self._angle_symmetry if sym_label.lower() == 'propertorsion': return self._proper_torsion_symmetry if sym_label.lower() == 'impropertorsion': return self._improper_torsion_symmetry return self._no_symmetry def add_mol(self, input_mol, smirks_atoms_list): """ Expand the information in this graph by adding a new molecule Parameters ---------- input_mol : ChemPer Mol smirks_atoms_list : list of tuples This is a list of tuples with atom indices [ (indices), ... ] """ mol = mol_toolkit.Mol(input_mol) if len(smirks_atoms_list) == 0: return if len(self.mols) == 0: self._add_first_smirks_atoms(mol, smirks_atoms_list[0]) self._symmetry_funct = self.get_symmetry_funct(CE(self.as_smirks()).get_type()) self._add_mol(mol, smirks_atoms_list[1:]) else: self._add_mol(mol, smirks_atoms_list) self.mols.append(mol) self.smirks_atoms_lists.append(smirks_atoms_list) def _add_first_smirks_atoms(self, mol, smirks_atoms): """ private function for adding the first molecule to an empty ClusterGraph add_mol calls this if the graph is empty Parameters ---------- mol : ChemPer Mol smirks_atoms : tuple tuple of atom indices for the first atoms to add to the graph. i.e. (0, 1) """ atom_dict = dict() for key, atom_index in enumerate(smirks_atoms, 1): atom_dict[atom_index] = key atom1 = mol.get_atom_by_index(atom_index) new_atom_storage = self.AtomStorage([atom1], key) self._graph.add_node(new_atom_storage) self.atom_by_label[key] = new_atom_storage # Check for bonded atoms already in the graph for neighbor_key in range(len(smirks_atoms), 0, -1): if neighbor_key not in self.atom_by_label: continue # check if atoms are already connected on the graph neighbor_storage = self.atom_by_label[neighbor_key] if nx.has_path(self._graph, new_atom_storage, neighbor_storage): continue # check if atoms are connected in the molecule atom2 = mol.get_atom_by_index(smirks_atoms[neighbor_key-1]) bond = mol.get_bond_by_atoms(atom1, atom2) if bond is not None: # Atoms are connected add edge bond_smirks = tuple(sorted([neighbor_key, key])) bond_storage = self.BondStorage([bond], bond_smirks) self.bond_by_label[bond_smirks] = bond_storage self._graph.add_edge(new_atom_storage, neighbor_storage, bond=bond_storage) # for each indexed atoms add unindexed atoms for the number of specified layers for atom_label, atom_index in enumerate(smirks_atoms, 1): atom = mol.get_atom_by_index(atom_index) storage = self.atom_by_label[atom_label] self._add_layers(mol, atom, storage, self.layers, atom_dict, is_first=True) def _add_layers(self, mol, atom, storage, layers, idx_dict, is_first=False): """ Parameters ---------- mol : ChemPer Mol molecule containing provided atom atom : ChemPer Atom storage: AtomStorage corresponding to the ChemPer Atom provided layers : int or 'all' number of layers left to add (or all) idx_dict : dict form {atom index: label} for this smirks_list in this molecule """ # if layers is 0 there are no more atoms to add so end the recursion if layers == 0: return # find atom neighbors that are not already included in SMIRKS indexed atoms atom_neighbors = [(a, mol.get_bond_by_atoms(a,atom)) for a in atom.get_neighbors() \ if a.get_index() not in idx_dict] # get the smirks indices already added to the storage # This includes all previous layers since the idx_dict is updated as you go storage_labels = [e for k,e in idx_dict.items()] # similar to atoms find neighbors already in the graph that haven't already been used storage_neighbors = [(s, self.get_connecting_bond(s, storage)) for s in self.get_neighbors(storage) \ if s.label not in storage_labels] new_pairs = list() # if this is the first set of atoms added, just make a new # storage for all neighboring atoms if is_first: min_smirks = storage.label * 10 if min_smirks > 0: min_smirks = min_smirks * -1 for a, b in atom_neighbors: new_bond_smirks = tuple(sorted([storage.label, min_smirks])) adding_new_storage = self.add_atom(a,b,storage, min_smirks, new_bond_smirks) idx_dict[a.get_index()] = min_smirks self.atom_by_label[min_smirks] = adding_new_storage min_smirks -= 1 new_pairs.append((a, adding_new_storage)) else: # this isn't the first set of atoms so you need to # pair up the atoms with their storage pairs = self.find_pairs(atom_neighbors, storage_neighbors) for new_atom, new_bond, new_storage_atom, new_storage_bond in pairs: # if no storage is paired to this atom skip it if new_storage_atom is None: continue # if there is no atom paired to a storage remove that branch if new_atom is None: self.remove_atom(new_storage_atom) continue # add atom and bond information to the storage new_storage_atom.add_atom(new_atom) new_storage_bond.add_bond(new_bond) new_pairs.append((new_atom, new_storage_atom)) idx_dict[new_atom.get_index()] = new_storage_atom.label # Repeat for the extra layers if layers == 'all': new_layers = 'all' else: new_layers = layers - 1 if new_layers == 0: return for new_atom, new_storage in new_pairs: self._add_layers(mol, new_atom, new_storage, new_layers, idx_dict, is_first) def find_pairs(self, atoms_and_bonds, storages): """ Find pairs is used to determine which current AtomStorage from storages atoms should be paired with. This function takes advantage of the maximum scoring function in networkx to find the pairing with the highest "score". Scores are determined using functions in the atom and bond storage objects that compare those storages to the new atom or bond. If there are less atoms than storages then the atoms with the lowest pair are assigned a None pairing. Parameters ---------- atoms_and_bonds : list of tuples in form (ChemPer Atom, ChemPer Bond, ...) storages: list of tuples in form (AtomStorage, BondStorage, ...) Tuples can be of any length as long as they are the same, so for example, in a bond you might only care about the outer atoms for comparison so you would compare (atom1,) and (atom2,) with (atom_storage1,) and (atom_storage2,) However, in a torsion, you might want the atoms and bonds for each outer bond so in that case you would compare (atom1, bond1, atom2) and (atom4, bond3, atom3) with the corresponding storage objects. Returns ------- pairs : list of lists pairs of atoms and storage objects that are most similar, these lists always come in the form (all atom/bonds, all storage objects) for the bond example above you might get [ [atom1, storage1], [atom2, storage2] ] for the torsion example you might get [ [atom4, bond4, atom3, atom_storage1, bond_storage1, atom_storage2], [atom1, bond1, atom2, atom_storage4, bond_storage3, atom_storage3] """ # store paired stets of atoms/bonds and corresponding storages pairs = list() # check for odd cases combo = atoms_and_bonds + storages # 1. both lists are empty if len(combo) == 0: return pairs nones = [None] * len(combo[0]) # 2. no atom/bond storage if len(atoms_and_bonds) == 0: for storage_set in storages: pairs.append(nones + list(storage_set)) return pairs # 3. no storages if len(storages) == 0: for atom_set in atoms_and_bonds: pairs.append(list(atom_set) + nones) return pairs g = nx.Graph() atom_dict = dict() storage_dict = dict() # create a bipartite graph with atoms/bonds on one side for idx, atom_set in enumerate(atoms_and_bonds): g.add_node(idx+1, bipartite=0) atom_dict[idx+1] = atom_set # and atom/bond storage objects on the other for idx, storage_set in enumerate(storages): g.add_node((idx*-1)-1, bipartite=1) storage_dict[(idx*-1)-1] = storage_set # Fill in the weight on each edge of the graph using the compare_atom/bond functions for a_idx, atom_set in atom_dict.items(): for s_idx, storage_set in storage_dict.items(): # sum up score for every entry in the atom and storage set score = 0 for sa, a in zip(storage_set, atom_set): if isinstance(sa, self.BondStorage): score += sa.compare_bond(a) else: score += sa.compare_atom(a) # score can't be zero so save score+1 g.add_edge(a_idx,s_idx,weight=score+1) # calculate maximum matching, that is the pairing of atoms/bonds to # storage objects that leads the the highest overall score matching = nx.algorithms.max_weight_matching(g,maxcardinality=False) # track the atoms assigned a paired storage object pair_set = set() # store all pairs for idx_1, idx_2 in matching: pair_set.add(idx_1) pair_set.add(idx_2) if idx_1 in atom_dict: atom_set = atom_dict[idx_1] storage_set = storage_dict[idx_2] else: atom_set = atom_dict[idx_2] storage_set = storage_dict[idx_1] pairs.append(list(atom_set) + list(storage_set)) # check for missing atom storages for a_idx, atom_set in atom_dict.items(): if a_idx not in pair_set: pairs.append(list(atom_set) + nones) # check for missing atoms for s_idx, storage_set in storage_dict.items(): if s_idx not in pair_set: pairs.append(nones + list(storage_set)) return pairs def _add_mol(self, mol, smirks_atoms_list): """ private function for adding a new molecule This is used by add_mol if the graph is not empty, allowing the user to not have to track if the graph already has information before adding molecules Parameters ---------- mol : any Mol smirks_atoms_list : list of dicts This is a list of dictionaries of the form [{smirks index: atom index}] each atom (by index) in the dictionary will be added the relevant AtomStorage by smirks index """ for smirks_atoms in smirks_atoms_list: atom_dict = dict() sorted_smirks_atoms = self._symmetry_funct(mol, smirks_atoms) for key, atom_index in enumerate(sorted_smirks_atoms, 1): atom_dict[atom_index] = key atom1 = mol.get_atom_by_index(atom_index) self.atom_by_label[key].add_atom(atom1) for neighbor_key, neighbor_index in enumerate(sorted_smirks_atoms, 1): # check for connecting bond atom2 = mol.get_atom_by_index(neighbor_index) bond = mol.get_bond_by_atoms(atom1, atom2) if bond is not None and (neighbor_key, key) in self.bond_by_label: bond_smirks = tuple(sorted([neighbor_key, key])) self.bond_by_label[bond_smirks].add_bond(bond) for atom_label, atom_index in enumerate(sorted_smirks_atoms, 1): atom = mol.get_atom_by_index(atom_index) storage = self.atom_by_label[atom_label] self._add_layers(mol, atom, storage, self.layers, atom_dict) def _no_symmetry(self, mol, smirks_atoms): """ No change is made to the atom order for this molecule """ return smirks_atoms def _bond_symmetry(self, mol, smirks_atoms): """ Returns a tuple of two atom indices in the order that leads to the atoms that match with previously stored atoms. Parameters ----------- mol : ChemPer Mol smirks_atoms : two tuple tuple of atom indices Returns -------- ordered_smirks_atoms : two tuple tuple of atom indices as they should be added to the graph """ # pair atoms and bonds atom1 = mol.get_atom_by_index(smirks_atoms[0]) atom2 = mol.get_atom_by_index(smirks_atoms[1]) # Find potential storages for those atoms and bonds atoms_and_bonds = [(atom1,), (atom2,)] storages = [ (self.atom_by_label[1],), (self.atom_by_label[2],) ] pairs = self.find_pairs(atoms_and_bonds, storages) ordered_smirks_atoms = [p[0].get_index() for p in sorted(pairs, key=lambda x: x[1].label)] return tuple(ordered_smirks_atoms) def _angle_symmetry(self, mol, smirks_atoms): """ Returns a tuple of three atom indices in the order that leads to the atoms that match with previously stored atoms. Parameters ----------- mol : ChemPer Mol smirks_atoms : three tuple tuple of atom indices Returns -------- ordered_smirks_atoms : three tuple tuple of atom indices as they should be added to the graph """ # get all three atoms atom1 = mol.get_atom_by_index(smirks_atoms[0]) atom2 = mol.get_atom_by_index(smirks_atoms[1]) atom3 = mol.get_atom_by_index(smirks_atoms[2]) # get both bonds bond1 = mol.get_bond_by_atoms(atom1, atom2) bond2 = mol.get_bond_by_atoms(atom2, atom3) if None in (bond1, bond2): return smirks_atoms # save atom and bond pairs that could be reordered atoms_and_bonds = [(atom1, bond1), (atom3, bond2)] # find current atom and bond storage storages = [ (self.atom_by_label[1], self.bond_by_label[(1,2)]), (self.atom_by_label[3], self.bond_by_label[(2,3)]) ] pairs = self.find_pairs(atoms_and_bonds, storages) order = [p[0].get_index() for p in sorted(pairs, key=lambda x: x[2].label)] return tuple((order[0], smirks_atoms[1], order[1])) def _proper_torsion_symmetry(self, mol, smirks_atoms): """ Returns a tuple of four atom indices for a proper torsion reordered to match with previously stored atoms. Parameters ----------- mol : ChemPer Mol smirks_atoms : four tuple tuple of atom indices Returns -------- ordered_smirks_atoms : four tuple tuple of atom indices as they should be added to the graph """ # get all four atoms atom1 = mol.get_atom_by_index(smirks_atoms[0]) atom2 = mol.get_atom_by_index(smirks_atoms[1]) atom3 = mol.get_atom_by_index(smirks_atoms[2]) atom4 = mol.get_atom_by_index(smirks_atoms[3]) # get two relevant bonds bond1 = mol.get_bond_by_atoms(atom1, atom2) bond3 = mol.get_bond_by_atoms(atom3, atom4) if None in (bond1, bond3): return smirks_atoms # make pairs atoms_and_bonds = [ (atom2, bond1, atom1), (atom3, bond3, atom4) ] # get atom and bond storages storages = [ (self.atom_by_label[2], self.bond_by_label[(1,2)], self.atom_by_label[1]), (self.atom_by_label[3], self.bond_by_label[(3,4)], self.atom_by_label[4]) ] pairs = self.find_pairs(atoms_and_bonds, storages) order = [p[0].get_index() for p in sorted(pairs, key=lambda x: x[3].label)] if order[0] == smirks_atoms[1]: return smirks_atoms temp = list(smirks_atoms) temp.reverse() return tuple(temp) def _improper_torsion_symmetry(self, mol, smirks_atoms): """ Returns a tuple of four atom indices for an improper torsion reordered to match with previously stored atoms. Parameters ----------- mol : ChemPer Mol smirks_atoms : four tuple tuple of atom indices Returns -------- ordered_smirks_atoms : four tuple tuple of atom indices as they should be added to the graph """ # get all four atoms atom1 = mol.get_atom_by_index(smirks_atoms[0]) atom2 = mol.get_atom_by_index(smirks_atoms[1]) atom3 = mol.get_atom_by_index(smirks_atoms[2]) atom4 = mol.get_atom_by_index(smirks_atoms[3]) # get all three bonds bond1 = mol.get_bond_by_atoms(atom1, atom2) bond2 = mol.get_bond_by_atoms(atom2, atom3) bond3 = mol.get_bond_by_atoms(atom2, atom4) if None in (bond1, bond2, bond3): return smirks_atoms # make pairs of atoms and bonds to be reordered atoms_and_bonds = [ (atom1, bond1), (atom3, bond2), (atom4, bond3) ] # find current atom and bond storages storages = [ (self.atom_by_label[1], self.bond_by_label[(1,2)]), (self.atom_by_label[3], self.bond_by_label[(2,3)]), (self.atom_by_label[4], self.bond_by_label[(2,4)]) ] pairs = self.find_pairs(atoms_and_bonds, storages) order = [p[0].get_index() for p in sorted(pairs, key=lambda x: x[2].label)] return tuple((order[0], smirks_atoms[1], order[1], order[2]))
nilq/baby-python
python
from abc import ABC from typing import Any class IWord(ABC): command: Any class Word(IWord): def __init__(self, command=None): self.command = command self.address = 0 def dump(self): return self.command.dump() @property def original(self): return self.command.original def set_instance_params(self, **kwargs): self.command.set_instance_params(**kwargs) def execute(self): return self.command.execute()
nilq/baby-python
python
#!/usr/bin/env python3 import argparse import os def main(dir): with open(os.path.join(dir, 'text'), 'w', encoding='utf-8') as out_f: for line in open(os.path.join(dir, 'text.ort2'), encoding='utf-8'): key, sent = line.strip().split(None, 1) if len(sent) > 0 and sent[0] == "*": sent = sent[1:] sent = sent.replace("[sta]", " ").replace(" ", " ").replace(" ", " ") sent = sent.replace("_", "") print("{} {}".format(key, sent), file=out_f) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('dir') parser.add_argument('lexicon') args = parser.parse_args() main(args.dir)
nilq/baby-python
python
import json import unittest import urllib.request from multiprocessing.dummy import Pool from tests.gunicorn_utils import run_gunicorn def run_code_in_snekbox(code: str) -> tuple[str, int]: body = {"input": code} json_data = json.dumps(body).encode("utf-8") req = urllib.request.Request("http://localhost:8060/eval") req.add_header("Content-Type", "application/json; charset=utf-8") req.add_header("Content-Length", str(len(json_data))) with urllib.request.urlopen(req, json_data, timeout=30) as response: response_data = response.read().decode("utf-8") return response_data, response.status class IntegrationTests(unittest.TestCase): def test_memory_limit_separate_per_process(self): """ Each NsJail process should have its own memory limit. The memory used by one process should not contribute to the memory cap of other processes. See https://github.com/python-discord/snekbox/issues/83 """ with run_gunicorn(): code = "import time; ' ' * 33000000; time.sleep(0.1)" processes = 3 args = [code] * processes with Pool(processes) as p: results = p.map(run_code_in_snekbox, args) responses, statuses = zip(*results) self.assertTrue(all(status == 200 for status in statuses)) self.assertTrue(all(json.loads(response)["returncode"] == 0 for response in responses))
nilq/baby-python
python
import numpy as np import tensorflow as tf import random as rn from keras.layers import multiply,concatenate,Embedding from keras.layers.merge import dot from keras import backend as K from keras.models import Sequential # The below is necessary in Python 3.2.3 onwards to # have reproducible behavior for certain hash-based operations. # See these references for further details: # https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED # https://github.com/fchollet/keras/issues/2280#issuecomment-306959926 import os os.environ['PYTHONHASHSEED'] = '0' # The below is necessary for starting Numpy generated random numbers # in a well-defined initial state. np.random.seed(42) # The below is necessary for starting core Python generated random numbers # in a well-defined state. rn.seed(12345) # Force TensorFlow to use single thread. # Multiple threads are a potential source of # non-reproducible results. # For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) from keras import backend as K # The below tf.set_random_seed() will make random number generation # in the TensorFlow backend have a well-defined initial state. # For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed tf.set_random_seed(1234) sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) K.set_session(sess) x1 = np.array([]).reshape(0,4) x1 = np.append(x1,np.array([1,2,3,4]).reshape(1,4),axis=0) x1 = np.append(x1,np.array([3,4,5,6]).reshape(1,4),axis=0) x1 = np.append(x1,np.array([5,6,7,8]).reshape(1,4),axis=0) y1 = np.array([]).reshape(0,4) y1 = np.append(y1,np.array([7,8,9,10]).reshape(1,4),axis=0) y1 = np.append(y1,np.array([9,10,11,12]).reshape(1,4),axis=0) y1 = np.append(y1,np.array([11,12,13,14]).reshape(1,4),axis=0) print(x1-y1) x = tf.placeholder(tf.float64, [3, 4]) y = tf.placeholder(tf.float64, [3, 4]) labels = tf.placeholder(tf.float64, [256]) xxx = K.sum(K.square(x-y),1,keepdims=True) yyy = dot([x,K.transpose(y)],(0,1)) zzz = tf.matmul(tf.transpose(x,perm=[0,1]),tf.transpose(y,perm=[1,0])) hhh = multiply([x,y]) labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1)) labels_not_equal = tf.logical_not(labels_equal) with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) xxxx = sess.run(xxx, feed_dict={x:x1,y:y1}) print(xxxx) yyyy = sess.run(yyy, feed_dict={x:x1,y:y1}) print(yyyy) zzzz = sess.run(zzz, feed_dict={x:x1,y:y1}) print(zzzz) hhhh = sess.run(hhh, feed_dict={x:x1,y:y1}) print(hhhh) labels_test = sess.run(labels_equal, feed_dict={labels:np.random.randint(256, size=(256))}) labels_test_not_equal = sess.run(labels_not_equal, feed_dict={labels:np.random.randint(256, size=(256))}) print(labels_test) # Rest of code follows ... # x = K.variable(value=x1) # y = K.variable(value=y1) # # z = K.dot(x,K.transpose(y)) # # # Here you need to use K.eval() instead of z.eval() because this uses the backend session # print(K.eval(z)) # x_batch = K.ones(shape=(32, 20, 1)) # y_batch = K.ones(shape=(32, 30, 20)) # xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2]) # print(K.int_shape(xy_batch_dot)) #Lambda(lambda x: K.batch_dot(x, x, axes=(2, 2)), output_shape=lambda s: (s[0], s[1], s[1])) # def multiply(x,n): # x_prime = tf.reshape(x, (-1, n, 1)) # x_transpose = tf.transpose(x_prime, perm=[0,2, 1]) # return tf.batch_matmul(x_transpose,x_prime) # Lambda(lambda x: multiply(x, n), output_shape =(n, n)) model = Sequential() model.add(Embedding(1000, 64, input_length=10)) # the model will take as input an integer matrix of size (batch, input_length). # the largest integer (i.e. word index) in the input should be no larger than 999 (vocabulary size). # now model.output_shape == (None, 10, 64), where None is the batch dimension. input_array = np.random.randint(1000, size=(32, 10)) model.compile('rmsprop', 'mse') output_array = model.predict(input_array) assert output_array.shape == (32, 10, 64)
nilq/baby-python
python
from __future__ import print_function import os import sys from burlap import ServiceSatchel from burlap.constants import * from burlap.decorators import task class ApacheSatchel(ServiceSatchel): name = 'apache' post_deploy_command = 'reload' templates = [ '{site_template}', ] @property def packager_system_packages(self): mod_lst = [] if self.env.modevasive_enabled: mod_lst.append('libapache2-mod-evasive') if self.env.modsecurity_enabled: mod_lst.append('libapache2-modsecurity') if self.env.modrpaf_enabled: mod_lst.append('libapache2-mod-rpaf') if self.env.visitors_enabled: #TODO:fix? package removed in Ubuntu 16? mod_lst.append('visitors') return { FEDORA: ['httpd'] + mod_lst, UBUNTU: ['apache2'] + mod_lst, (UBUNTU, '12.04'): ['apache2', 'libapache2-mod-wsgi'] + mod_lst, (UBUNTU, '12.10'): ['apache2', 'libapache2-mod-wsgi'] + mod_lst, (UBUNTU, '14.04'): ['apache2', 'libapache2-mod-wsgi', 'apache2-utils'] + mod_lst, (UBUNTU, '14.10'): ['apache2', 'libapache2-mod-wsgi', 'apache2-utils'] + mod_lst, (UBUNTU, '16.04'): ['apache2', 'libapache2-mod-wsgi', 'apache2-utils'] + mod_lst, (UBUNTU, '16.10'): ['apache2', 'libapache2-mod-wsgi', 'apache2-utils'] + mod_lst, } def set_defaults(self): self.env.service_commands = { # START:{ # UBUNTU: 'service network-manager start', # }, # STOP:{ # UBUNTU: 'service network-manager stop', # }, # DISABLE:{ # UBUNTU: 'chkconfig network-manager off', # }, # ENABLE:{ # UBUNTU: 'chkconfig network-manager on', # }, # RESTART:{ # UBUNTU: 'service network-manager restart', # }, # STATUS:{ # UBUNTU: 'service network-manager status', # }, START:{ FEDORA: 'systemctl start httpd.service', UBUNTU: 'service apache2 start', }, STOP:{ FEDORA: 'systemctl stop httpd.service', UBUNTU: 'service apache2 stop', }, DISABLE:{ FEDORA: 'systemctl disable httpd.service', UBUNTU: 'chkconfig apache2 off', (UBUNTU, '14.04'): 'update-rc.d -f apache2 remove', }, ENABLE:{ FEDORA: 'systemctl enable httpd.service', UBUNTU: 'chkconfig apache2 on', (UBUNTU, '14.04'): 'update-rc.d apache2 defaults', }, RELOAD:{ FEDORA: 'systemctl reload httpd.service', UBUNTU: 'service apache2 reload', }, RESTART:{ FEDORA: 'systemctl restart httpd.service', #UBUNTU: 'service apache2 restart', # Note, the sleep 5 is necessary because the stop/start appears to # happen in the background but gets aborted if Fabric exits before # it completes. UBUNTU: 'service apache2 restart; sleep 3', }, } # An Apache-conf file and filename friendly string that uniquely identifies # your web application. self.env.application_name = None # The Jinja-formatted template file used to render site configurations. self.env.site_template = 'apache/apache_site.template.conf' self.env.error_log = '/var/log/apache2/error.log' self.env.log_level = 'warn' self.env.auth_basic = False self.env.auth_basic_authuserfile = '{apache_docroot}/.htpasswd_{apache_site}' self.env.auth_basic_users = [] # [(user,password)] # If true, activates a rewrite rule that causes domain.com to redirect # to www.domain.com. self.env.enforce_subdomain = True self.env.ssl = True self.env.ssl_chmod = 440 # A list of path patterns that should have HTTPS enforced. self.env.ssl_secure_paths_enforce = True self.env.ssl_secure_paths = ['/admin/(.*)'] self.env.web_user = 'www-data' self.env.web_group = 'www-data' self.env.wsgi_user = 'www-data' self.env.wsgi_group = 'www-data' self.env.chmod = 775 self.env.mods_enabled = ['rewrite', 'wsgi', 'ssl'] # The value of the Apache's ServerName field. Usually should be set # to the domain. self.env.server_name = None self.env.server_aliases_template = '' self.env.docroot = '/usr/local/{apache_application_name}' self.env.ports_path = '{apache_root}/ports.conf' self.env.ssl_path = '{apache_root}/ssl' self.env.domain_with_sub_template = '' self.env.domain_without_sub_template = '' self.env.domain_with_sub = None self.env.domain_without_sub = None self.env.wsgi_enabled = False self.env.wsgi_template = 'django/django.template.wsgi' self.env.wsgi_python_path = None self.env.wsgi_scriptalias = None self.env.wsgi_server_memory_gb = 8 self.env.wsgi_processes = 5 self.env.wsgi_threads = 15 self.env.domain_redirect_templates = [] # [(wrong_domain,right_domain)] self.env.domain_redirects = [] # [(wrong_domain,right_domain)] self.env.extra_rewrite_rules = '' self.env.modrpaf_enabled = False self.env.visitors_enabled = False self.env.modevasive_enabled = False self.env.modevasive_DOSEmailNotify = 'admin@localhost' self.env.modevasive_DOSPageInterval = 1 # seconds self.env.modevasive_DOSPageCount = 2 self.env.modevasive_DOSSiteCount = 50 self.env.modevasive_DOSSiteInterval = 1 # seconds self.env.modevasive_DOSBlockingPeriod = 10 # seconds self.env.modsecurity_enabled = False self.env.modsecurity_download_url = 'https://github.com/SpiderLabs/owasp-modsecurity-crs/tarball/master' # OS specific default settings. self.env.specifics = type(self.genv)() self.env.specifics[LINUX] = type(self.genv)() self.env.specifics[LINUX][FEDORA] = type(self.genv)() self.env.specifics[LINUX][FEDORA].root = '/etc/httpd' self.env.specifics[LINUX][FEDORA].conf = '/etc/httpd/conf/httpd.conf' self.env.specifics[LINUX][FEDORA].sites_available = '/etc/httpd/sites-available' self.env.specifics[LINUX][FEDORA].sites_enabled = '/etc/httpd/sites-enabled' self.env.specifics[LINUX][FEDORA].log_dir = '/var/log/httpd' self.env.specifics[LINUX][FEDORA].pid = '/var/run/httpd/httpd.pid' self.env.specifics[LINUX][UBUNTU] = type(self.genv)() self.env.specifics[LINUX][UBUNTU].root = '/etc/apache2' self.env.specifics[LINUX][UBUNTU].conf = '/etc/apache2/httpd.conf' self.env.specifics[LINUX][UBUNTU].sites_available = '/etc/apache2/sites-available' self.env.specifics[LINUX][UBUNTU].sites_enabled = '/etc/apache2/sites-enabled' self.env.specifics[LINUX][UBUNTU].log_dir = '/var/log/apache2' self.env.specifics[LINUX][UBUNTU].pid = '/var/run/apache2/apache2.pid' self.env.delete_site_command = None self.env.manage_httpd_conf = True self.env.manage_ports_conf = True self.env.manage_site_conf = True self.env.ssl_certificates = None self.env.ssl_certificates_templates = [] # Apache site config files use a similar syntax to our template syntax, # so instead of having to escape all of Apache's variables, we list them here so # our templating system knows to not try interpolating them. self.env.ignored_template_variables = [ 'APACHE_LOG_DIR', 'GLOBAL', 'DOCUMENT_ROOT', 'SCRIPT_FILENAME', 'SERVER_NAME', 'REQUEST_URI', 'GROUP', 'Referer', 'User-Agent', 'X-Forwarded-For', 'HTTP:X-Forwarded-Proto', 'HTTPS', 'HTTP', 'HTTP_HOST', 'HTTP_USER_AGENT', 'REMOTE_ADDR', ] # The local and remote relative directory where the SSL certificates are stored. self.env.ssl_dir_local = 'ssl' # An optional segment to insert into the domain, customizable by role. # Useful for easily keying domain-local.com/domain-dev.com/domain-staging.com. self.env.locale = '' self.env.sync_sets = {} # {name:[dict(local_path='static/', remote_path='$AWS_BUCKET:/')]} # This will be appended to the custom Apache configuration file. self.env.httpd_conf_append = [] @task def enable_mod(self, name): self.sudo('a2enmod %s' % name) @task def disable_mod(self, name): with self.settings(warn_only=True): self.sudo('a2dismod %s' % name) @task def enable_site(self, name): self.sudo('a2ensite %s' % name) @task def disable_site(self, name): self.sudo('a2dissite %s' % name) @task def optimize_wsgi_processes(self): """ Based on the number of sites per server and the number of resources on the server, calculates the optimal number of processes that should be allocated for each WSGI site. """ r = self.local_renderer #r.env.wsgi_processes = 5 r.env.wsgi_server_memory_gb = 8 verbose = self.verbose all_sites = list(self.iter_sites(site=ALL, setter=self.set_site_specifics)) #(current_mem/current_sites)/current_process = () #(16/x)/(8/16) = y #(16/x)*(16/8) = y #(16*16)/(num_sites*8) = y # @task # def visitors(self, force=0): # """ # Generates an Apache access report using the Visitors command line tool. # Requires the APACHE2_VISITORS service to be enabled for the current host. # """ # if not int(force): # assert ApacheVisitors.name.upper() in self.genv.services or ApacheVisitors.name.lower() in self.genv.services, \ # 'Visitors has not been configured for this host.' # self.run('visitors -o text /var/log/apache2/%(apache_application_name)s-access.log* | less' % self.genv) def create_local_renderer(self): """ Instantiates a new local renderer. Override this to do any additional initialization. """ r = super(ApacheSatchel, self).create_local_renderer() # Dynamically set values based on target operating system. os_version = self.os_version apache_specifics = r.env.specifics[os_version.type][os_version.distro] r.env.update(apache_specifics) return r # def iter_certificates(self): # if self.verbose: # print('apache_ssl_domain:', self.genv.apache_ssl_domain, file=sys.stderr) # for cert_type, cert_file_template in self.genv.apache_ssl_certificates_templates: # if self.verbose: # print('cert_type, cert_file_template:', cert_type, cert_file_template, file=sys.stderr) # _local_cert_file = os.path.join(self.genv.apache_ssl_dir_local, cert_file_template % self.genv) # local_cert_file = self.find_template(_local_cert_file) # assert local_cert_file, 'Unable to find local certificate file: %s' % (_local_cert_file,) # remote_cert_file = os.path.join(self.genv.apache_ssl_dir, cert_file_template % self.genv) # yield cert_type, local_cert_file, remote_cert_file # # @task # def install_ssl(self, site=ALL): # from burlap.common import iter_sites # verbose = self.verbose # # for site, site_data in iter_sites(site=site, setter=self.set_site_specifics): # # site_secure = site+'_secure' # if site_secure not in self.genv.sites: # continue # self.set_site_specifics(site_secure) # # self.sudo_or_dryrun('mkdir -p %(apache_ssl_dir)s' % self.genv) # # if self.genv.apache_ssl: # for cert_type, local_cert_file, remote_cert_file in self.iter_certificates(): # if verbose: # print('='*80) # print('Installing certificate %s...' % (remote_cert_file,)) # self.put_or_dryrun( # local_path=local_cert_file, # remote_path=remote_cert_file, # use_sudo=True) # # self.sudo_or_dryrun('mkdir -p %(apache_ssl_dir)s' % self.genv) # self.sudo_or_dryrun('chown -R %(apache_web_user)s:%(apache_web_group)s %(apache_ssl_dir)s' % self.genv) # self.sudo_or_dryrun('chmod -R %(apache_ssl_chmod)s %(apache_ssl_dir)s' % self.genv) @task def install_auth_basic_user_file(self, site=None): """ Installs users for basic httpd auth. """ r = self.local_renderer hostname = self.current_hostname target_sites = self.genv.available_sites_by_host.get(hostname, None) for _site, site_data in self.iter_sites(site=site, setter=self.set_site_specifics): if self.verbose: print('~'*80, file=sys.stderr) print('Site:', _site, file=sys.stderr) print('env.apache_auth_basic:', r.env.auth_basic, file=sys.stderr) # Only load site configurations that are allowed for this host. if target_sites is not None: assert isinstance(target_sites, (tuple, list)) if _site not in target_sites: continue if not r.env.auth_basic: continue assert r.env.auth_basic_users, 'No apache auth users specified.' for username, password in r.env.auth_basic_users: r.env.auth_basic_username = username r.env.auth_basic_password = password r.env.apache_site = _site r.env.fn = r.format(r.env.auth_basic_authuserfile) if self.files.exists(r.env.fn): r.sudo('htpasswd -b {fn} {auth_basic_username} {auth_basic_password}') else: r.sudo('htpasswd -b -c {fn} {auth_basic_username} {auth_basic_password}') @task def install_auth_basic_user_file_all(self): self.install_auth_basic_user_file(site='all') @task def view_error_log(self): self.run('tail -f {apache_error_log}') @task def sync_media(self, sync_set=None, clean=0, iter_local_paths=0): """ Uploads select media to an Apache accessible directory. """ #from burlap.dj import render_remote_paths # Ensure a site is selected. self.genv.SITE = self.genv.SITE or self.genv.default_site # apache.get_apache_settings() #render_remote_paths() r = self.local_renderer clean = int(clean) self.vprint('Getting site data for %s...' % self.genv.SITE) self.set_site_specifics(self.genv.SITE) #site_data = self.genv.sites[self.genv.SITE] #self.genv.update(site_data) sync_sets = r.env.sync_sets if sync_set: sync_sets = [sync_set] ret_paths = [] for _sync_set in sync_sets: for paths in r.env.sync_sets[_sync_set]: #print 'paths:',paths r.env.sync_local_path = os.path.abspath(paths['local_path'] % self.genv) if paths['local_path'].endswith('/') and not r.env.sync_local_path.endswith('/'): r.env.sync_local_path += '/' if iter_local_paths: ret_paths.append(r.env.sync_local_path) continue r.env.sync_remote_path = paths['remote_path'] % self.genv if clean: r.sudo('rm -Rf {apache_sync_remote_path}') print('Syncing %s to %s...' % (r.env.sync_local_path, r.env.sync_remote_path)) r.env.tmp_chmod = paths.get('chmod', r.env.chmod) #with settings(warn_only=True): r.sudo('mkdir -p {apache_sync_remote_path}') r.sudo('chmod -R {apache_tmp_chmod} {apache_sync_remote_path}') r.local('rsync -rvz --progress --recursive --no-p --no-g ' '--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {apache_sync_local_path} {user}@{host_string}:{apache_sync_remote_path}') r.sudo('chown -R {apache_web_user}:{apache_web_group} {apache_sync_remote_path}') if iter_local_paths: return ret_paths def get_media_timestamp(self): """ Called after a deployment to record any data necessary to detect changes for a future deployment. """ from burlap.common import get_last_modified_timestamp data = 0 for path in self.sync_media(iter_local_paths=1): data = min(data, get_last_modified_timestamp(path) or data) #TODO:hash media names and content if self.verbose: print('date:', data) return data @task def record_manifest(self): """ Called after a deployment to record any data necessary to detect changes for a future deployment. """ manifest = super(ApacheSatchel, self).record_manifest() manifest['available_sites'] = self.genv.available_sites manifest['available_sites_by_host'] = self.genv.available_sites_by_host manifest['media_timestamp'] = self.get_media_timestamp() return manifest @task def configure_modevasive(self): """ Installs the mod-evasive Apache module for combating DDOS attacks. https://www.linode.com/docs/websites/apache-tips-and-tricks/modevasive-on-apache """ r = self.local_renderer if r.env.modevasive_enabled: self.install_packages() # Write conf for each Ubuntu version since they don't conflict. fn = r.render_to_file('apache/apache_modevasive.template.conf') # Ubuntu 12.04 r.put( local_path=fn, remote_path='/etc/apache2/mods-available/mod-evasive.conf', use_sudo=True) # Ubuntu 14.04 r.put( local_path=fn, remote_path='/etc/apache2/mods-available/evasive.conf', use_sudo=True) self.enable_mod('evasive') else: # print('self.last_manifest:', self.last_manifest) # print('a:', self.last_manifest.apache_modevasive_enabled) # print('b:', self.last_manifest.modevasive_enabled) if self.last_manifest.modevasive_enabled: self.disable_mod('evasive') @task def configure_modsecurity(self): """ Installs the mod-security Apache module. https://www.modsecurity.org """ r = self.local_renderer if r.env.modsecurity_enabled and not self.last_manifest.modsecurity_enabled: self.install_packages() # Write modsecurity.conf. fn = self.render_to_file('apache/apache_modsecurity.template.conf') r.put(local_path=fn, remote_path='/etc/modsecurity/modsecurity.conf', use_sudo=True) # Write OWASP rules. r.env.modsecurity_download_filename = '/tmp/owasp-modsecurity-crs.tar.gz' r.sudo('cd /tmp; wget --output-document={apache_modsecurity_download_filename} {apache_modsecurity_download_url}') r.env.modsecurity_download_top = r.sudo( "cd /tmp; " "tar tzf %(apache_modsecurity_download_filename)s | sed -e 's@/.*@@' | uniq" % self.genv) r.sudo('cd /tmp; tar -zxvf %(apache_modsecurity_download_filename)s' % self.genv) r.sudo('cd /tmp; cp -R %(apache_modsecurity_download_top)s/* /etc/modsecurity/' % self.genv) r.sudo('mv /etc/modsecurity/modsecurity_crs_10_setup.conf.example /etc/modsecurity/modsecurity_crs_10_setup.conf') r.sudo('rm -f /etc/modsecurity/activated_rules/*') r.sudo('cd /etc/modsecurity/base_rules; ' 'for f in * ; do ln -s /etc/modsecurity/base_rules/$f /etc/modsecurity/activated_rules/$f ; done') r.sudo('cd /etc/modsecurity/optional_rules; ' 'for f in * ; do ln -s /etc/modsecurity/optional_rules/$f /etc/modsecurity/activated_rules/$f ; done') r.env.httpd_conf_append.append('Include "/etc/modsecurity/activated_rules/*.conf"') self.enable_mod('evasive') self.enable_mod('headers') elif not self.env.modsecurity_enabled and self.last_manifest.modsecurity_enabled: self.disable_mod('modsecurity') @task def configure_modrpaf(self): """ Installs the mod-rpaf Apache module. https://github.com/gnif/mod_rpaf """ r = self.local_renderer if r.env.modrpaf_enabled: self.install_packages() self.enable_mod('rpaf') else: if self.last_manifest.modrpaf_enabled: self.disable_mod('mod_rpaf') @task def configure_site(self, full=1, site=None, delete_old=0): """ Configures Apache to host one or more websites. """ from burlap import service r = self.local_renderer print('Configuring Apache...', file=sys.stderr) site = site or self.genv.SITE if int(delete_old) and site == ALL: # Delete all existing enabled and available sites. r.sudo('rm -f {sites_available}/*') r.sudo('rm -f {sites_enabled}/*') if r.env.manage_site_conf: # Run an optional customizable command to clear or delete old sites before writing the new ones. if r.env.delete_site_command: r.sudo(r.env.delete_site_command) for _site, site_data in self.iter_sites(site=site, setter=self.set_site_specifics): r = self.local_renderer #r.env.site = site if self.verbose: print('-'*80, file=sys.stderr) print('Site:', _site, file=sys.stderr) print('-'*80, file=sys.stderr) r.env.apache_site = _site r.env.server_name = r.format(r.env.domain_template) print('r.env.server_name:', r.env.server_name) # Write WSGI template if r.env.wsgi_enabled: r.pc('Writing WSGI template for site %s...' % _site) r.env.wsgi_scriptalias = r.format(r.env.wsgi_scriptalias) fn = self.render_to_file(r.env.wsgi_template) r.env.wsgi_dir = r.env.remote_dir = os.path.split(r.env.wsgi_scriptalias)[0] r.sudo('mkdir -p {remote_dir}') r.put(local_path=fn, remote_path=r.env.wsgi_scriptalias, use_sudo=True) # Write site configuration. r.pc('Writing site configuration for site %s...' % _site) from functools import partial genv = r.collect_genv() genv['current_hostname'] = self.current_hostname print('*'*80) print('apache_wsgi_scriptalias:', genv.apache_wsgi_scriptalias) print('apache_auth_basic_authuserfile:', self.env.auth_basic_authuserfile) r.env.auth_basic_authuserfile = r.format(self.env.auth_basic_authuserfile) fn = self.render_to_file( self.env.site_template, extra=genv, formatter=partial(r.format, ignored_variables=self.env.ignored_template_variables)) r.env.site_conf = _site+'.conf' r.env.site_conf_fqfn = os.path.join(r.env.sites_available, r.env.site_conf) r.put(local_path=fn, remote_path=r.env.site_conf_fqfn, use_sudo=True) self.enable_site(_site) self.clear_local_renderer() # Enable modules. for mod_name in r.env.mods_enabled: with self.settings(warn_only=True): self.enable_mod(mod_name) if int(full): # Write master Apache configuration file. if r.env.manage_httpd_conf: fn = self.render_to_file('apache/apache_httpd.template.conf') r.put(local_path=fn, remote_path=r.env.conf, use_sudo=True) # Write Apache listening ports configuration. if r.env.manage_ports_conf: fn = self.render_to_file('apache/apache_ports.template.conf') r.put(local_path=fn, remote_path=r.env.ports_path, use_sudo=True) r.sudo('chown -R {apache_web_user}:{apache_web_group} {apache_root}') @task(precursors=['packager', 'user', 'hostname', 'ip']) def configure(self): self.configure_modevasive() self.configure_modsecurity() self.configure_modrpaf() self.configure_site(full=1, site=ALL) self.install_auth_basic_user_file(site=ALL) self.sync_media() #self.install_ssl(site=ALL) apache = ApacheSatchel()
nilq/baby-python
python
import requests import nels_master_api def get_nels_ids(): try: ids = [] response = requests.get(nels_master_api.get_full_url("users/ids" ),auth=(nels_master_api.CLIENT_KEY, nels_master_api.CLIENT_SECRET)) if(response.status_code == requests.codes.ok): json_response = response.json() for uid in json_response: ids.append(uid[u'id']) return ids except: return None def get_user(nels_id): try: response = requests.get(nels_master_api.get_full_url("users/%s" %nels_id ),auth=(nels_master_api.CLIENT_KEY, nels_master_api.CLIENT_SECRET)) if(response.status_code == requests.codes.ok): return response.json() except: return None
nilq/baby-python
python
import pandas as pd import math data = pd.read_csv('data/DATALOG2.CSV', delimiter=",", names=['date', 'time', 'lat', 'lon', 'vgps', 'velocity', 'course', 'heading', 'pitch', 'roll']) # data['vhead'] = data['velocity']*math.cos(math.pi/180*(data['course']-data['heading'])) data['drift'] = data.apply(lambda row: math.fabs(row['velocity'] * math.sin(math.pi / 180 * math.fabs(row['course'] - row['heading']))), axis=1) data['vhead'] = data.apply(lambda row: math.fabs(row['velocity'] * math.cos(math.pi / 180 * (row['course'] - row['heading']))), axis=1) print(data)
nilq/baby-python
python
import logging import multiprocessing import multiprocessing_logging import os log_level_from_env = os.environ.get('LOGLEVEL', '').upper() log_format = '%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(funcName)s %(message)s' log_level = logging.DEBUG if log_level_from_env == 'DEBUG' else logging.INFO logging.basicConfig(format=log_format, level=log_level) logger = logging.getLogger(__name__) mp_logger = multiprocessing.get_logger() # mp_handler = logging.StreamHandler() # mp_handler.setLevel(log_level) # mp_handler.setFormatter(logging.Formatter(log_format)) # mp_logger.addHandler(mp_handler) # Handle records from parallel processes to the main process so that they are handled correctly. multiprocessing_logging.install_mp_handler() def _make_debug_record(message): fn, lno, func, sinfo = logger.findCaller() record = logger.makeRecord(logger.name, logging.DEBUG, fn, lno, message, None, None, func=func, extra=None, sinfo=sinfo) return record def debug(message: str): record = _make_debug_record(message) logger.handle(record)
nilq/baby-python
python
## Data and Visual Analytics - Homework 4 ## Georgia Institute of Technology ## Applying ML algorithms to detect seizure import numpy as np import pandas as pd import time from sklearn.model_selection import cross_val_score, GridSearchCV, cross_validate, train_test_split from sklearn.metrics import accuracy_score, classification_report from sklearn.svm import SVC from sklearn.linear_model import LinearRegression from sklearn.neural_network import MLPClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler, normalize ######################################### Reading and Splitting the Data ############################################### # XXX # TODO: Read in all the data. Replace the 'xxx' with the path to the data set. # XXX data = pd.read_csv('seizure_dataset.csv') # Separate out the x_data and y_data. x_data = data.loc[:, data.columns != "y"] y_data = data.loc[:, "y"] # The random state to use while splitting the data. random_state = 100 # XXX # TODO: Split 70% of the data into training and 30% into test sets. Call them x_train, x_test, y_train and y_test. # Use the train_test_split method in sklearn with the paramater 'shuffle' set to true and the 'random_state' set to 100. x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size = 0.3, random_state = random_state) # XXX # ############################################### Linear Regression ################################################### # XXX # TODO: Create a LinearRegression classifier and train it. linearReg = LinearRegression().fit(x_train,y_train) # XXX # XXX # TODO: Test its accuracy (on the training set) using the accuracy_score method. print("For Linear Regression:") y_predict_train = linearReg.predict(x_train) y_predict_train_round = [round(k) for k in y_predict_train] train_score = accuracy_score(y_train, y_predict_train_round) print(" Accuracy for training set: " + str(train_score)) # TODO: Test its accuracy (on the testing set) using the accuracy_score method. y_predict_test = linearReg.predict(x_test) y_predict_test_round = [round(k) for k in y_predict_test] test_score = accuracy_score(y_test, y_predict_test_round) print(" Accuracy for testing set: " + str(test_score)) # Note: Use y_predict.round() to get 1 or 0 as the output. # XXX # ############################################### Multi Layer Perceptron ################################################# # XXX # TODO: Create an MLPClassifier and train it. mlpReg = MLPClassifier().fit(x_train,y_train) # XXX # XXX # TODO: Test its accuracy on the training set using the accuracy_score method. print("For Multi Layer Perceptron:") y_predict_train_mlp = mlpReg.predict(x_train) y_predict_train_mlp_round = [round(k) for k in y_predict_train_mlp] train_mlp_score = accuracy_score(y_train, y_predict_train_mlp_round) print(" Accuracy for training set: " + str(train_mlp_score)) # TODO: Test its accuracy on the test set using the accuracy_score method. y_predict_test_mlp = mlpReg.predict(x_test) y_predict_test_mlp_round = [round(k) for k in y_predict_test_mlp] test_mlp_score = accuracy_score(y_test, y_predict_test_mlp_round) print(" Accuracy for testing set: " + str(test_mlp_score)) # XXX # ############################################### Random Forest Classifier ############################################## # XXX # TODO: Create a RandomForestClassifier and train it. rfReg = RandomForestClassifier().fit(x_train, y_train) # XXX # XXX # TODO: Test its accuracy on the training set using the accuracy_score method. print("For Random Forest Classifier:") y_predict_train_rf = rfReg.predict(x_train) y_predict_train_rf_round = [round(k) for k in y_predict_train_rf] train_rf_score = accuracy_score(y_train, y_predict_train_rf_round) print(" (Default) Accuracy for training set: " + str(train_rf_score)) # TODO: Test its accuracy on the test set using the accuracy_score method. y_predict_test_rf = rfReg.predict(x_test) y_predict_test_rf_round = [round(k) for k in y_predict_test_rf] test_rf_score = accuracy_score(y_test, y_predict_test_rf_round) print(" (Default) Accuracy for testing set: " + str(test_rf_score)) # ----------------------------------------------------------------------- rfReg_best = RandomForestClassifier(n_estimators=60, max_depth=50).fit(x_train, y_train) y_predict_train_rf_best = rfReg_best.predict(x_train) y_predict_train_rf_round_best = [round(k) for k in y_predict_train_rf_best] train_rf_score_best = accuracy_score(y_train, y_predict_train_rf_round_best) print(" (Best) Accuracy for training set: " + str(train_rf_score_best)) # TODO: Test its accuracy on the test set using the accuracy_score method. y_predict_test_rf_best = rfReg_best.predict(x_test) y_predict_test_rf_round_best = [round(k) for k in y_predict_test_rf_best] test_rf_score_best = accuracy_score(y_test, y_predict_test_rf_round_best) print(" (Best) Accuracy for testing set: " + str(test_rf_score_best)) # XXX # XXX # TODO: Tune the hyper-parameters 'n_estimators' and 'max_depth'. # Print the best params, using .best_params_, and print the best score, using .best_score_. parameters_rf = {'n_estimators':[10, 20, 40, 60, 80, 100, 120, 140], 'max_depth':[6, 8, 10, 30, 50, 75, 100]} rfReg_tune = RandomForestClassifier() rlf = GridSearchCV(rfReg_tune, parameters_rf, cv = 10) rlf.fit(x_train, y_train) print(" Best paramaters after CV:") print(" "+str(rlf.best_params_)) print(" "+str(rlf.best_score_)) # XXX # ############################################ Support Vector Machine ################################################### # XXX # TODO: Pre-process the data to standardize or normalize it, otherwise the grid search will take much longer x_train_nor = normalize(x_train) x_test_nor = normalize(x_test) # TODO: Create a SVC classifier and train it. rfReg = SVC(gamma = 'auto').fit(x_train_nor, y_train) # XXX # XXX # TODO: Test its accuracy on the training set using the accuracy_score method. print("For Support Vector Machine:") y_predict_train_rf = rfReg.predict(x_train_nor) y_predict_train_rf_round = [round(k) for k in y_predict_train_rf] train_rf_score = accuracy_score(y_train, y_predict_train_rf_round) print(" (Default) Accuracy for training set: " + str(train_rf_score)) # TODO: Test its accuracy on the test set using the accuracy_score method. y_predict_test_rf = rfReg.predict(x_test_nor) y_predict_test_rf_round = [round(k) for k in y_predict_test_rf] test_rf_score = accuracy_score(y_test, y_predict_test_rf_round) print(" (Default) Accuracy for testing set: " + str(test_rf_score)) # ----------------------------------------------------------- rfReg_best = SVC(gamma = 'auto', kernel='linear', C=0.001).fit(x_train_nor, y_train) y_predict_train_rf_best = rfReg_best.predict(x_train_nor) y_predict_train_rf_round_best = [round(k) for k in y_predict_train_rf_best] train_rf_score_best = accuracy_score(y_train, y_predict_train_rf_round_best) print(" (Best) Accuracy for training set: " + str(train_rf_score_best)) # TODO: Test its accuracy on the test set using the accuracy_score method. y_predict_test_rf_best = rfReg_best.predict(x_test_nor) y_predict_test_rf_round_best = [round(k) for k in y_predict_test_rf_best] test_rf_score_best = accuracy_score(y_test, y_predict_test_rf_round_best) print(" (Best) Accuracy for testing set: " + str(test_rf_score_best)) # XXX # XXX # TODO: Tune the hyper-parameters 'C' and 'kernel' (use rbf and linear). # Print the best params, using .best_params_, and print the best score, using .best_score_. parameters_rf = {'kernel':('linear', 'rbf'), 'C':[0.001, 0.01, 0.1, 1, 10, 100]} rfReg_tune = SVC(gamma = 'auto') clf = GridSearchCV(rfReg_tune, parameters_rf, cv = 10, return_train_score=True) clf.fit(x_train_nor, y_train) print(" Best paramaters after CV:") print(" "+str(clf.best_params_)) print(" "+str(clf.best_score_)) print("mean training score:") print(clf.cv_results_['mean_train_score']) print("mean testing score:") print(clf.cv_results_['mean_test_score']) print("mean fit time:") print(clf.cv_results_['mean_fit_time']) # XXX
nilq/baby-python
python
#!/usr/bin/env python from __future__ import absolute_import import os import shutil import time import datetime from flask.ext.script import Manager from modelconvert import create_app from modelconvert.utils import fs app = create_app() manager = Manager(app) @manager.command def run(): app.run(threaded=True) @manager.command def celeryworker(): """ Runs celery worker within the Flask app context """ from modelconvert.extensions import celery with app.app_context(): if app.config['DEBUG']: celery.worker_main(['worker', '-E', '-l', 'DEBUG']) else: celery.worker_main(['worker', '-E', '-l', 'INFO']) # # FIXME: move this to a celerybeats task # @manager.command def cleanup(longevity=151200, uploads=False): """ Removes generated files. Use cleanup -h for more info """ download_path = os.path.abspath(app.config["DOWNLOAD_PATH"]) upload_path = os.path.abspath(app.config["UPLOAD_PATH"]) # simple protection against dummies. However it is questionable to # give them Unix rm command in this case ;) if not 'tmp/downloads' in download_path or download_path == '/': print("You are using a non-standard location for the download path.") print("Please create your own deletion procedure. If your fs is") print("mounted with mtime support, this command will work fine:\n") print(" find /your/path -mtime +30 -exec rm -rf '{}' \;\n") exit(-1) #longevity = 6300 * 24 longevity = int(longevity) current_time = time.time(); print("Removing files older than {0}".format(datetime.timedelta(seconds=longevity))) def _clean(path, longevity): for root, dirs, files in os.walk(path, topdown=False): for name in files: filepath = os.path.join(root, name) filetime = os.path.getmtime(filepath) if current_time - filetime > longevity: print("Removing file %s" % filepath) os.remove(filepath) for name in dirs: dirpath = os.path.join(root, name) #dirtime = os.path.getmtime(dirpath) #if current_time - dirtime > longevity: if not os.listdir(dirpath): print("Removing directory %s" % dirpath) os.rmdir(dirpath) _clean(download_path, longevity) if uploads: _clean(upload_path, longevity) @manager.command def purge(): """ Kill all files in download paths NOW""" cleanup(0, uploads=True) @manager.command def mkdirs(): """ Create required directories from settings """ dirs = [ app.config['UPLOAD_PATH'], app.config['DOWNLOAD_PATH'], ] for directory in dirs: directory = os.path.abspath(directory) print("Creating directory {0}".format(directory)) fs.mkdir_p(directory) if __name__ == "__main__": manager.run()
nilq/baby-python
python
"""Configuration classes for ``varfish-cli case *`` commands.""" import attr import uuid import typing from ..common import CommonConfig @attr.s(frozen=True, auto_attribs=True) class CaseConfig: """Configuration for the ``varfish-cli case`` command.""" #: Global configuration. global_config: CommonConfig @staticmethod def create(args, global_config, toml_config=None): # toml_config = toml_config or {} return CaseConfig(global_config=global_config) @attr.s(frozen=True, auto_attribs=True) class CaseListConfig: """Configuration for the ``varfish-cli case list`` command.""" #: Case configuration. case_config: CaseConfig #: UUID of the case to pull. project_uuid: uuid.UUID @staticmethod def create(args, case_config, toml_config=None): _ = toml_config # toml_config = toml_config or {} return CaseListConfig(case_config=case_config, project_uuid=args.project_uuid) @attr.s(frozen=True, auto_attribs=True) class CaseListImportInfoConfig: """Configuration for the ``varfish-cli case list-import-info`` command.""" #: Case configuration. case_config: CaseConfig #: UUID of the case to pull. project_uuid: uuid.UUID #: Optionally, owner to query for. owner: typing.Optional[str] = None @staticmethod def create(args, case_config, toml_config=None): # toml_config = toml_config or {} return CaseListImportInfoConfig( case_config=case_config, project_uuid=args.project_uuid, owner=args.owner ) @attr.s(frozen=True, auto_attribs=True) class CaseCreateImportInfoConfig: """Configuration for the ``varfish-cli case create-import-info`` command.""" #: Case configuration. case_config: CaseConfig #: Suffix to append to the case name. case_name_suffix: str #: UUID of the case to pull. project_uuid: uuid.UUID #: Path to files to import. paths: typing.List[str] #: Regular expression to use for modifying family. strip_family_regex: str #: Whether to force resubmittal of old resubmit: bool #: Whether to force creation of fresh case import info. force_fresh: bool #: Expected genome build. genomebuild: str @staticmethod def create(args, case_config, strip_family_regex, toml_config=None): _ = toml_config # toml_config = toml_config or {} return CaseCreateImportInfoConfig( case_config=case_config, project_uuid=args.project_uuid, paths=args.paths, strip_family_regex=args.strip_family_regex, case_name_suffix=args.case_name_suffix, resubmit=args.resubmit, force_fresh=args.force_fresh, genomebuild=args.genomebuild, )
nilq/baby-python
python
import subprocess import sys import os import time import cProfile def prepare_io(list_of_files, exe_file, input_path, output_path, job_number): # read file names with open(list_of_files, "r") as files_to_read: list_files = files_to_read.read().split("\n") job_number = int(job_number) - 1 input_file = list_files[job_number] output_dir = os.path.join(output_path, input_file).replace(".vcf.gz", "/") zip_output_path = os.path.join(output_path, input_file).replace(".vcf.gz", ".tar.xz") to_read = os.path.join(input_path, input_file) if not os.path.isdir(output_dir): subprocess.run("mkdir {}".format(output_dir), shell=True, stdout=subprocess.PIPE) logs_path = os.path.join(output_path, "logs") profs_path = os.path.join(output_path, "profs") if not os.path.isdir(output_dir): subprocess.run("mkdir {}".format(output_dir), shell=True, stdout=subprocess.PIPE) if not os.path.isdir(logs_path): subprocess.run("mkdir {}".format(logs_path), shell=True, stdout=subprocess.PIPE) if not os.path.isdir(profs_path): subprocess.run("mkdir {}".format(profs_path), shell=True, stdout=subprocess.PIPE) log_file = open(os.path.join(logs_path, input_file).replace(".vcf.gz", "_logs.txt"), "a") log_file.write("{} \n".format(input_file)) log_file.flush() exe = "{} {} {}".format(exe_file, to_read, output_dir) start = time.time() if job_number == 0: # run vcf to tensor -- c++ code prof = cProfile.Profile() prof.enable() subprocess.run(exe, shell=True, stdout=subprocess.PIPE) end = time.time() prof.disable() prof_path = os.path.join(profs_path, input_file).replace(".vcf.gz", "sample.prof") prof.dump_stats(prof_path) elapsed = (end - start) / 360 log_file.write("{} was done in {} hours \n".format(exe, elapsed)) log_file.flush() else: subprocess.run(exe, shell=True, stdout=subprocess.PIPE) end = time.time() elapsed = (end - start) / 360 log_file.write("{} was done in {} hours \n".format(exe, elapsed)) log_file.flush() # zip output files exe_2 = "tar -cjf {} {}".format(zip_output_path, output_dir) start = time.time() subprocess.run(exe_2, shell=True, stdout=subprocess.PIPE) end = time.time() elapsed = (end - start) / 360 log_file.write("{} was done in {} hours \n".format(exe_2, elapsed)) log_file.flush() # remove residual files exe_3 = "rsync -a --delete /home/eniktab/LocalBin/empty/ {}".format(output_dir) log_file.write("{} started \n".format(exe_3)) subprocess.run(exe_3, shell=True, stdout=subprocess.PIPE) log_file.write("{} was done \n".format(exe_3)) log_file.flush() log_file.close() def main(argv): prepare_io(list_of_files=argv[0], exe_file=argv[1], input_path=argv[2], output_path=argv[3], job_number=argv[4]) if __name__ == "__main__": main(sys.argv[1:])
nilq/baby-python
python
import numpy as np # Collection of activation functions # Reference: https://en.wikipedia.org/wiki/Activation_function class Sigmoid(): def __call__(self, x): return 1 / (1 + np.exp(-x)) def gradient(self, x): return self.__call__(x) * (1 - self.__call__(x)) class Softmax(): def __call__(self, x): e_x = np.exp(x - np.max(x, axis=-1, keepdims=True)) return e_x / np.sum(e_x, axis=-1, keepdims=True) def gradient(self, x): p = self.__call__(x) return p * (1 - p) class TanH(): def __call__(self, x): return 2 / (1 + np.exp(-2*x)) - 1 def gradient(self, x): return 1 - np.power(self.__call__(x), 2) class ReLU(): def __call__(self, x): return np.where(x >= 0, x, 0) def gradient(self, x): return np.where(x >= 0, 1, 0) class LeakyReLU(): def __init__(self, alpha=0.2): self.alpha = alpha def __call__(self, x): return np.where(x >= 0, x, self.alpha * x) def gradient(self, x): return np.where(x >= 0, 1, self.alpha) class ELU(): def __init__(self, alpha=0.1): self.alpha = alpha def __call__(self, x): return np.where(x >= 0.0, x, self.alpha * (np.exp(x) - 1)) def gradient(self, x): return np.where(x >= 0.0, 1, self.__call__(x) + self.alpha) class SELU(): # Reference : https://arxiv.org/abs/1706.02515, # https://github.com/bioinf-jku/SNNs/blob/master/SelfNormalizingNetworks_MLP_MNIST.ipynb def __init__(self): self.alpha = 1.6732632423543772848170429916717 self.scale = 1.0507009873554804934193349852946 def __call__(self, x): return self.scale * np.where(x >= 0.0, x, self.alpha*(np.exp(x)-1)) def gradient(self, x): return self.scale * np.where(x >= 0.0, 1, self.alpha * np.exp(x)) class SoftPlus(): def __call__(self, x): return np.log(1 + np.exp(x)) def gradient(self, x): return 1 / (1 + np.exp(-x))
nilq/baby-python
python
#!/usr/bin/env python3.7 import sys from blist import blist from collections import defaultdict # Solution to the day 9 puzzle from Advent of Code 2018. # https://adventofcode.com/2018/day/9 def parse_data(filename): """ Load the data from FILENAME. """ data = list() with open(filename) as f: elements = f.readline().rstrip().split(' ') data = [int(elements[0]), int(elements[6])] return data if __name__ == "__main__": if len(sys.argv) == 2: players, marbles = parse_data(sys.argv[1]) current_player = 0 board = blist([0]) current_marble = 0 scores = defaultdict(int) for i in range(marbles): marble_value = i + 1 if marble_value % 23 == 0: current_marble = (current_marble - 7) % len(board) scores[current_player] += (marble_value + board.pop(current_marble)) else: current_marble = ((current_marble + 1) % len(board)) + 1 board.insert(current_marble,marble_value) current_player = (current_player + 1) % players print("For " + str(players) + " players with " + str(marbles) + " marbles, the high score is " + str(max(scores.values())) + ".") else: print("Usage: " + sys.argv[0] + " <data-file>")
nilq/baby-python
python
import time import hashlib import requests import urllib3 from lxml import etree urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) def xdl_proxy(orderno, secret, host, port): host_port = host + ":" + port # get sign timestamp = str(int(time.time())) string = "" string = "orderno=" + orderno + "," + "secret=" + secret + "," + "timestamp=" + timestamp string = string.encode() md5_string = hashlib.md5(string).hexdigest() sign = md5_string.upper() # get auth auth = "sign=" + sign + "&" + "orderno=" + orderno + "&" + "timestamp=" + timestamp proxy = { "http": "http://" + host_port, "https": "https://" + host_port} return proxy, auth
nilq/baby-python
python
# -*- coding: utf8 -*- """ ====================================== Project Name: NLP File Name: utils Author: czh Create Date: 2021/8/6 -------------------------------------- Change Activity: ====================================== """ import torch import torch.nn as nn import torch.nn.functional as func from torch.nn.parameter import Parameter import numpy as np class LayerNorm(nn.Module): def __init__(self, input_dim, cond_dim=0, center=True, scale=True, epsilon=None, conditional=False, hidden_units=None, hidden_initializer='xaiver'): """ :param input_dim: inputs.shape[-1] :param cond_dim: cond.shape[-1] :param center: :param scale: :param epsilon: :param conditional: 如果为True,则是条件LayerNorm :param hidden_units: :param hidden_initializer: """ super(LayerNorm, self).__init__() self.center = center self.scale = scale self.conditional = conditional self.hidden_units = hidden_units self.hidden_initializer = hidden_initializer self.epsilon = epsilon or 1e-12 self.input_dim = input_dim self.cond_dim = cond_dim if self.center: self.beta = Parameter(torch.zeros(input_dim)) if self.scale: self.gamma = Parameter(torch.ones(input_dim)) if self.conditional: if self.hidden_units is not None: self.hidden_dense = nn.Linear(in_features=self.cond_dim, out_features=self.hidden_units, bias=False) if self.center: self.beta_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False) if self.scale: self.gamma_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False) self.initialize_weights() def initialize_weights(self): if self.conditional: if self.hidden_units is not None: if self.hidden_initializer == 'normal': torch.nn.init.normal(self.hidden_dense.weight) elif self.hidden_initializer == 'xavier': # glorot_uniform torch.nn.init.xavier_uniform_(self.hidden_dense.weight) # 下面这两个为什么都初始化为0呢? # 为了防止扰乱原来的预训练权重,两个变换矩阵可以全零初始化(单层神经网络可以用全零初始化,连续的多层神经网络才不应当用全零初始化), # 这样在初始状态,模型依然保持跟原来的预训练模型一致。 if self.center: torch.nn.init.constant_(self.beta_dense.weight, 0) if self.scale: torch.nn.init.constant_(self.gamma_dense.weight, 0) def forward(self, inputs, cond=None): """ 如果是条件Layer Norm,则cond不是None """ gamma = 1 beta = 0 if self.conditional: if self.hidden_units is not None: cond = self.hidden_dense(cond) # for _ in range(K.ndim(inputs) - K.ndim(cond)): # K.ndim: 以整数形式返回张量中的轴数。 # TODO: 这两个为什么有轴数差呢? 为什么在 dim=1 上增加维度?? # 为了保持维度一致,cond可以是(batch_size, cond_dim) for _ in range(len(inputs.shape) - len(cond.shape)): cond = cond.unsqueeze(1) # cond = K.expand_dims(cond, 1) # cond在加入beta和gamma之前做一次线性变换,以保证与input维度一致 if self.center: beta = self.beta_dense(cond) + self.beta if self.scale: gamma = self.gamma_dense(cond) + self.gamma else: if self.center: beta = self.beta if self.scale: gamma = self.gamma outputs = inputs if self.center: mean = torch.mean(outputs, dim=-1).unsqueeze(-1) outputs = outputs - mean if self.scale: variance = torch.mean(outputs ** 2, dim=-1).unsqueeze(-1) std = (variance + self.epsilon) ** 2 outputs = outputs / std outputs = outputs * gamma if self.center: outputs = outputs + beta return outputs def sequence_masking(x: torch.Tensor, mask: torch.Tensor, value=0.0, axis=None): """为序列条件mask的函数 mask: 形如(batch_size, seq_len)的0-1矩阵; value: mask部分要被替换成的值,可以是'-inf'或'inf'; axis: 序列所在轴,默认为1; """ if mask is None: return x else: if mask.dtype != x.dtype: mask = mask.to(x.dtype) if value == '-inf': value = -1e12 elif value == 'inf': value = 1e12 if axis is None: axis = 1 elif axis < 0: axis = x.ndim + axis assert axis > 0, 'axis must be greater than 0' for _ in range(axis - 1): mask = torch.unsqueeze(mask, 1) for _ in range(x.ndim - mask.ndim): mask = torch.unsqueeze(mask, mask.ndim) return x * mask + value * (1 - mask) def _generate_relative_positions_matrix(length, max_relative_position, cache=False): """Generates matrix of relative positions between inputs.""" if not cache: range_vec = torch.arange(length) range_mat = range_vec.repeat(length).view(length, length) distance_mat = range_mat - torch.t(range_mat) else: distance_mat = torch.arange(-length + 1, 1, 1).unsqueeze(0) distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position) final_mat = distance_mat_clipped + max_relative_position return final_mat def _generate_relative_positions_embeddings(seq_length, embed_dim, max_relative_position=127): vocab_size = max_relative_position * 2 + 1 range_vec = torch.arange(seq_length) range_mat = range_vec.repeat(seq_length).view(seq_length, seq_length) distance_mat = range_mat - torch.t(range_mat) distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position) final_mat = distance_mat_clipped + max_relative_position embeddings_table = np.zeros([vocab_size, embed_dim]) for pos in range(vocab_size): for i in range(embed_dim // 2): embeddings_table[pos, 2 * i] = np.sin(pos / np.power(10000, 2 * i / embed_dim)) embeddings_table[pos, 2 * i + 1] = np.cos(pos / np.power(10000, 2 * i / embed_dim)) embeddings_table_tensor = torch.tensor(embeddings_table).float() flat_relative_positions_matrix = final_mat.view(-1) one_hot_relative_positions_matrix = func.one_hot(flat_relative_positions_matrix, num_classes=vocab_size).float() embeddings = torch.matmul(one_hot_relative_positions_matrix, embeddings_table_tensor) my_shape = list(final_mat.size()) my_shape.append(embed_dim) embeddings = embeddings.view(my_shape) # print(embeddings.shape) return embeddings # Test: # print(_generate_relative_positions_embeddings(6, 32, 4)[0, 0, :]) class HandshakingKernel(nn.Module): """ TPLinker 方法 """ def __init__(self, hidden_size, shaking_type, inner_enc_type): super().__init__() self.shaking_type = shaking_type if shaking_type == "cat": self.combine_fc = nn.Linear(hidden_size * 2, hidden_size) elif shaking_type == "cat_plus": self.combine_fc = nn.Linear(hidden_size * 3, hidden_size) elif shaking_type == "cln": self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional=True) elif shaking_type == "cln_plus": self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional=True) self.inner_context_cln = LayerNorm(hidden_size, hidden_size, conditional=True) self.inner_enc_type = inner_enc_type if inner_enc_type == "mix_pooling": self.lamtha = Parameter(torch.rand(hidden_size)) elif inner_enc_type == "lstm": self.inner_context_lstm = nn.LSTM(hidden_size, hidden_size, num_layers=1, bidirectional=False, batch_first=True) def enc_inner_hiddens(self, seq_hiddens, inner_enc_type="lstm"): # seq_hiddens: (batch_size, seq_len, hidden_size) def pool(seqence, pooling_type): if pooling_type == "mean_pooling": pooling = torch.mean(seqence, dim=-2) # (batch_size, hidden_size) elif pooling_type == "max_pooling": pooling, _ = torch.max(seqence, dim=-2) # (batch_size, hidden_size) elif pooling_type == "mix_pooling": pooling = self.lamtha * torch.mean(seqence, dim=-2) + (1 - self.lamtha) * torch.max(seqence, dim=-2)[0] else: raise ValueError("'pooling_type must be one of the list: " "['mean_pooling', 'max_pooling', 'mix_pooling']'") return pooling if "pooling" in inner_enc_type: inner_context = torch.stack( [pool(seq_hiddens[:, :i + 1, :], inner_enc_type) for i in range(seq_hiddens.size()[1])], dim=1) elif inner_enc_type == "lstm": inner_context, _ = self.inner_context_lstm(seq_hiddens) else: raise ValueError("'inner_enc_type' must be one of the list: " "['mean_pooling', 'max_pooling', 'mix_pooling', 'lstm']") return inner_context def forward(self, seq_hiddens): """ seq_hiddens: (batch_size, seq_len, hidden_size) return: shaking_hiddenss: (batch_size, (1 + seq_len) * seq_len / 2, hidden_size) (32, 5+4+3+2+1, 5) """ seq_len = seq_hiddens.size()[-2] shaking_hiddens_list = [] for ind in range(seq_len): hidden_each_step = seq_hiddens[:, ind, :] visible_hiddens = seq_hiddens[:, ind:, :] # ind: only look back repeat_hiddens = hidden_each_step[:, None, :].repeat(1, seq_len - ind, 1) if self.shaking_type == "cat": shaking_hiddens = torch.cat([repeat_hiddens, visible_hiddens], dim=-1) shaking_hiddens = torch.tanh(self.combine_fc(shaking_hiddens)) elif self.shaking_type == "cat_plus": inner_context = self.enc_inner_hiddens(visible_hiddens, self.inner_enc_type) shaking_hiddens = torch.cat([repeat_hiddens, visible_hiddens, inner_context], dim=-1) shaking_hiddens = torch.tanh(self.combine_fc(shaking_hiddens)) elif self.shaking_type == "cln": shaking_hiddens = self.tp_cln(visible_hiddens, repeat_hiddens) elif self.shaking_type == "cln_plus": inner_context = self.enc_inner_hiddens(visible_hiddens, self.inner_enc_type) shaking_hiddens = self.tp_cln(visible_hiddens, repeat_hiddens) shaking_hiddens = self.inner_context_cln(shaking_hiddens, inner_context) else: raise ValueError("'shaking_type' must be one of the list: " "['cat', 'cat_plus', 'cln', 'cln_plus']") shaking_hiddens_list.append(shaking_hiddens) long_shaking_hiddens = torch.cat(shaking_hiddens_list, dim=1) return long_shaking_hiddens class MyMaths: @staticmethod def handshaking_len2matrix_size(hsk_len): matrix_size = int((2 * hsk_len + 0.25) ** 0.5 - 0.5) return matrix_size class MyMatrix: @staticmethod def get_shaking_idx2matrix_idx(matrix_size): """ :param matrix_size: :return: a list mapping shaking sequence points to matrix points """ shaking_idx2matrix_idx = [(ind, end_ind) for ind in range(matrix_size) for end_ind in list(range(matrix_size))[ind:]] return shaking_idx2matrix_idx @staticmethod def get_matrix_idx2shaking_idx(matrix_size): """ :param matrix_size: :return: a matrix mapping matrix points to shaking sequence points """ matrix_idx2shaking_idx = [[0 for _ in range(matrix_size)] for _ in range(matrix_size)] shaking_idx2matrix_idx = MyMatrix.get_shaking_idx2matrix_idx(matrix_size) for shaking_ind, matrix_ind in enumerate(shaking_idx2matrix_idx): matrix_idx2shaking_idx[matrix_ind[0]][matrix_ind[1]] = shaking_ind return matrix_idx2shaking_idx @staticmethod def mirror(shaking_seq): """ copy upper region to lower region :param shaking_seq: :return: """ batch_size, handshaking_seq_len, hidden_size = shaking_seq.size() matrix_size = MyMaths.handshaking_len2matrix_size(handshaking_seq_len) map_ = MyMatrix.get_matrix_idx2shaking_idx(matrix_size) mirror_select_ids = [map_[i][j] if i <= j else map_[j][i] for i in range(matrix_size) for j in range(matrix_size)] mirror_select_vec = torch.tensor(mirror_select_ids).to(shaking_seq.device) matrix = torch.index_select(shaking_seq, dim=1, index=mirror_select_vec) matrix = matrix.view(batch_size, matrix_size, matrix_size, hidden_size) return matrix @staticmethod def upper_reg2seq(ori_tensor): """ drop lower triangular part and flat upper triangular part to sequence :param ori_tensor: (batch_size, matrix_size, matrix_size, hidden_size) :return: (batch_size, matrix_size + ... + 1, hidden_size) """ tensor = ori_tensor.permute(0, 3, 1, 2).contiguous() uppder_ones = torch.ones([tensor.size()[-2], tensor.size()[-1]]).long().triu().to(ori_tensor.device) upper_diag_ids = torch.nonzero(uppder_ones.view(-1), as_tuple=False).view(-1) # flat_tensor: (batch_size, matrix_size * matrix_size, hidden_size) flat_tensor = tensor.view(tensor.size()[0], tensor.size()[1], -1).permute(0, 2, 1) tensor_upper = torch.index_select(flat_tensor, dim=1, index=upper_diag_ids) return tensor_upper @staticmethod def lower_reg2seq(ori_tensor): """ drop upper triangular part and flat lower triangular part to sequence :param ori_tensor: (batch_size, matrix_size, matrix_size, hidden_size) :return: (batch_size, matrix_size + ... + 1, hidden_size) """ tensor = ori_tensor.permute(0, 3, 1, 2).contiguous() lower_ones = torch.ones([tensor.size()[-2], tensor.size()[-1]]).long().tril().to(ori_tensor.device) lower_diag_ids = torch.nonzero(lower_ones.view(-1), as_tuple=False).view(-1) # flat_tensor: (batch_size, matrix_size * matrix_size, hidden_size) flat_tensor = tensor.view(tensor.size()[0], tensor.size()[1], -1).permute(0, 2, 1) tensor_lower = torch.index_select(flat_tensor, dim=1, index=lower_diag_ids) return tensor_lower @staticmethod def shaking_seq2matrix(sequence): """ map sequence tensor to matrix tensor; only upper region has values, pad 0 to the lower region :param sequence: :return: """ # sequence: (batch_size, seq_len, hidden_size) batch_size, seq_len, hidden_size = sequence.size() matrix_size = MyMaths.handshaking_len2matrix_size(seq_len) map_ = MyMatrix.get_matrix_idx2shaking_idx(matrix_size) index_ids = [map_[i][j] if i <= j else seq_len for i in range(matrix_size) for j in range(matrix_size)] sequence_w_ze = func.pad(sequence, (0, 0, 0, 1), "constant", 0) index_tensor = torch.LongTensor(index_ids).to(sequence.device) long_seq = torch.index_select(sequence_w_ze, dim=1, index=index_tensor) return long_seq.view(batch_size, matrix_size, matrix_size, hidden_size) class SingleSourceHandshakingKernel(nn.Module): def __init__(self, hidden_size, shaking_type, only_look_after=True, distance_emb_dim=-1): super().__init__() self.shaking_types = shaking_type.split("+") self.only_look_after = only_look_after cat_length = 0 if "cat" in self.shaking_types: self.cat_fc = nn.Linear(hidden_size * 2, hidden_size) cat_length += hidden_size if "cmm" in self.shaking_types: self.cat_fc = nn.Linear(hidden_size * 4, hidden_size) self.guide_fc = nn.Linear(hidden_size, hidden_size) self.vis_fc = nn.Linear(hidden_size, hidden_size) cat_length += hidden_size if "mul" in self.shaking_types: self.guide_fc = nn.Linear(hidden_size, hidden_size) self.vis_fc = nn.Linear(hidden_size, hidden_size) self.mul_fc = nn.Linear(hidden_size, hidden_size) if "cln" in self.shaking_types: self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional=True) cat_length += hidden_size if "lstm" in self.shaking_types: assert only_look_after is True self.lstm4span = nn.LSTM(hidden_size, hidden_size, num_layers=1, bidirectional=False, batch_first=True) cat_length += hidden_size elif "gru" in self.shaking_types: assert only_look_after is True self.lstm4span = nn.GRU(hidden_size, hidden_size, num_layers=1, bidirectional=False, batch_first=True) cat_length += hidden_size if "bilstm" in self.shaking_types: assert only_look_after is True self.lstm4span = nn.LSTM(hidden_size, hidden_size // 2, num_layers=1, bidirectional=False, batch_first=True) self.lstm4span_back = nn.LSTM(hidden_size, hidden_size // 2, num_layers=1, bidirectional=False, batch_first=True) cat_length += hidden_size elif "bigru" in self.shaking_types: assert only_look_after is True self.lstm4span = nn.GRU(hidden_size, hidden_size // 2, num_layers=1, bidirectional=False, batch_first=True) self.lstm4span_back = nn.GRU(hidden_size, hidden_size // 2, num_layers=1, bidirectional=False, batch_first=True) cat_length += hidden_size if "biaffine" in self.shaking_types: self.biaffine = nn.Bilinear(hidden_size, hidden_size, hidden_size) cat_length += hidden_size self.distance_emb_dim = distance_emb_dim if distance_emb_dim > 0: self.dist_emb = nn.Embedding(512, distance_emb_dim) self.dist_ids_matrix = None # for cache cat_length += distance_emb_dim self.aggr_fc = nn.Linear(cat_length, hidden_size) def forward(self, seq_hiddens): """ seq_hiddens: (batch_size, seq_len, hidden_size_x) return: if only look after: shaking_hiddenss: (batch_size, (1 + seq_len) * seq_len / 2, hidden_size); e.g. (32, 5+4+3+2+1, 5) else: shaking_hiddenss: (batch_size, seq_len * seq_len, hidden_size) """ # seq_len = seq_hiddens.size()[1] batch_size, seq_len, vis_hidden_size = seq_hiddens.size() guide = seq_hiddens[:, :, None, :].repeat(1, 1, seq_len, 1) visible = guide.permute(0, 2, 1, 3) feature_pre_list = [] if self.only_look_after: if len({"lstm", "bilstm", "gru", "bigru"}.intersection(self.shaking_types)) > 0: # batch_size, _, matrix_size, vis_hidden_size = visible.size() # mask lower triangle part upper_visible = visible.permute(0, 3, 1, 2).triu().permute(0, 2, 3, 1).contiguous() # visible4lstm: (batch_size * matrix_size, matrix_size, hidden_size) visible4lstm = upper_visible.view(batch_size * seq_len, seq_len, -1) span_pre, _ = self.lstm4span(visible4lstm) span_pre = span_pre.view(batch_size, seq_len, seq_len, -1) if len({"bilstm", "bigru"}.intersection(self.shaking_types)) > 0: # mask upper triangle part lower_visible = visible.permute(0, 3, 1, 2).tril().permute(0, 2, 3, 1).contiguous() visible4lstm_back = lower_visible.view(batch_size * seq_len, seq_len, -1) visible4lstm_back = torch.flip(visible4lstm_back, [1, ]) span_pre_back, _ = self.lstm4span_back(visible4lstm_back) span_pre_back = torch.flip(span_pre_back, [1, ]) span_pre_back = span_pre_back.view(batch_size, seq_len, seq_len, -1) span_pre_back = span_pre_back.permute(0, 2, 1, 3) span_pre = torch.cat([span_pre, span_pre_back], dim=-1) # drop lower triangle and convert matrix to sequence # span_pre: (batch_size, shaking_seq_len, hidden_size) span_pre = MyMatrix.upper_reg2seq(span_pre) feature_pre_list.append(span_pre) # guide, visible: (batch_size, shaking_seq_len, hidden_size) guide = MyMatrix.upper_reg2seq(guide) visible = MyMatrix.upper_reg2seq(visible) if "cat" in self.shaking_types: tp_cat_pre = torch.cat([guide, visible], dim=-1) tp_cat_pre = torch.relu(self.cat_fc(tp_cat_pre)) feature_pre_list.append(tp_cat_pre) if "cmm" in self.shaking_types: # cat and multiple tp_cat_pre = torch.cat([guide, visible, torch.abs(guide - visible), torch.mul(self.guide_fc(guide), self.vis_fc(visible))], dim=-1) tp_cat_pre = torch.relu(self.cat_fc(tp_cat_pre)) feature_pre_list.append(tp_cat_pre) if "cln" in self.shaking_types: tp_cln_pre = self.tp_cln(visible, guide) feature_pre_list.append(tp_cln_pre) if "biaffine" in self.shaking_types: biaffine_pre = self.biaffine(guide, visible) biaffine_pre = torch.relu(biaffine_pre) feature_pre_list.append(biaffine_pre) if self.distance_emb_dim > 0: if self.dist_ids_matrix is None or \ self.dist_ids_matrix.size()[0] != batch_size or \ self.dist_ids_matrix.size()[1] != seq_len: # need to update cached distance ids t = torch.arange(0, seq_len).to(seq_hiddens.device)[:, None].repeat(1, seq_len) self.dist_ids_matrix = torch.abs(t - t.permute(1, 0)).long()[None, :, :].repeat(batch_size, 1, 1) if self.only_look_after: # matrix to handshaking seq self.dist_ids_matrix = MyMatrix.upper_reg2seq( self.dist_ids_matrix[:, :, :, None]).view(batch_size, -1) dist_embeddings = self.dist_emb(self.dist_ids_matrix) feature_pre_list.append(dist_embeddings) output_hiddens = self.aggr_fc(torch.cat(feature_pre_list, dim=-1)) return output_hiddens class CrossLSTM(nn.Module): def __init__(self, in_feature_dim=None, out_feature_dim=None, num_layers=1, hv_comb_type="cat" ): super().__init__() self.vertical_lstm = nn.LSTM(in_feature_dim, out_feature_dim // 2, num_layers=num_layers, bidirectional=True, batch_first=True) self.horizontal_lstm = nn.LSTM(in_feature_dim, out_feature_dim // 2, num_layers=num_layers, bidirectional=True, batch_first=True) self.hv_comb_type = hv_comb_type if hv_comb_type == "cat": self.combine_fc = nn.Linear(out_feature_dim * 2, out_feature_dim) elif hv_comb_type == "add": pass elif hv_comb_type == "interpolate": self.lamtha = Parameter(torch.rand(out_feature_dim)) # [0, 1) def forward(self, matrix): # matrix: (batch_size, matrix_ver_len, matrix_hor_len, hidden_size) batch_size, matrix_ver_len, matrix_hor_len, hidden_size = matrix.size() hor_context, _ = self.horizontal_lstm(matrix.view(-1, matrix_hor_len, hidden_size)) hor_context = hor_context.view(batch_size, matrix_ver_len, matrix_hor_len, hidden_size) ver_context, _ = self.vertical_lstm( matrix.permute(0, 2, 1, 3).contiguous().view(-1, matrix_ver_len, hidden_size)) ver_context = ver_context.view(batch_size, matrix_hor_len, matrix_ver_len, hidden_size) ver_context = ver_context.permute(0, 2, 1, 3) comb_context = None if self.hv_comb_type == "cat": comb_context = torch.relu(self.combine_fc(torch.cat([hor_context, ver_context], dim=-1))) elif self.hv_comb_type == "interpolate": comb_context = self.lamtha * hor_context + (1 - self.lamtha) * ver_context elif self.hv_comb_type == "add": comb_context = (hor_context + ver_context) / 2 return comb_context class CrossConv(nn.Module): def __init__(self, channel_dim, hor_dim, ver_dim ): super(CrossConv, self).__init__() self.alpha = Parameter(torch.randn([channel_dim, hor_dim, 1])) self.beta = Parameter(torch.randn([channel_dim, 1, ver_dim])) def forward(self, matrix_tensor): # matrix_tensor: (batch_size, ver_dim, hor_dim, hidden_size) # hor_cont: (batch_size, hidden_size (channel dim), ver_dim, 1) hor_cont = torch.matmul(matrix_tensor.permute(0, 3, 1, 2), self.alpha) # ver_cont: (batch_size, hidden_size, 1, hor_dim) ver_cont = torch.matmul(self.beta, matrix_tensor.permute(0, 3, 1, 2)) # cross_context: (batch_size, ver_dim, hor_dim, hidden_size) cross_context = torch.matmul(hor_cont, ver_cont).permute(0, 2, 3, 1) return cross_context class CrossPool(nn.Module): def __init__(self, hidden_size): super(CrossPool, self).__init__() self.lamtha = Parameter(torch.rand(hidden_size)) def mix_pool(self, tensor, dim): return self.lamtha * torch.mean(tensor, dim=dim) + (1 - self.lamtha) * torch.max(tensor, dim=dim)[0] def forward(self, matrix_tensor): # matrix_tensor: (batch_size, ver_dim, hor_dim, hidden_size) # hor_cont: (batch_size, hidden_size, ver_dim, 1) hor_cont = self.mix_pool(matrix_tensor, dim=2)[:, :, None, :].permute(0, 3, 1, 2) # ver_cont: (batch_size, hidden_size, 1, hor_dim) ver_cont = self.mix_pool(matrix_tensor, dim=1)[:, None, :, :].permute(0, 3, 1, 2) # cross_context: (batch_size, ver_dim, hor_dim, hidden_size) cross_context = torch.matmul(hor_cont, ver_cont).permute(0, 2, 3, 1) return cross_context class EdgeUpdate(nn.Module): def __init__(self, hidden_dim, dim_e, dropout_ratio=0.5): super(EdgeUpdate, self).__init__() self.hidden_dim = hidden_dim self.dim_e = dim_e self.dropout = dropout_ratio self.W = nn.Linear(self.hidden_dim * 2 + self.dim_e, self.dim_e) def forward(self, edge, node1, node2): """ :param edge: [batch, seq, seq, dim_e] :param node1: [batch, seq, seq, dim] :param node2: [batch, seq, seq, dim] :return: """ node = torch.cat([node1, node2], dim=-1) # [batch, seq, seq, dim * 2] edge = self.W(torch.cat([edge, node], dim=-1)) return edge # [batch, seq, seq, dim_e] class GraphConvLayer(nn.Module): """ A GCN module operated on dependency graphs. """ def __init__(self, dep_embed_dim, gcn_dim, pooling='avg'): super(GraphConvLayer, self).__init__() self.gcn_dim = gcn_dim self.dep_embed_dim = dep_embed_dim self.pooling = pooling self.W = nn.Linear(self.gcn_dim, self.gcn_dim) self.highway = EdgeUpdate(gcn_dim, self.dep_embed_dim, dropout_ratio=0.5) def forward(self, weight_adj, node_hiddens): """ :param weight_adj: [batch, seq, seq, dim_e] :param node_hiddens: [batch, seq, dim] :return: """ batch, seq, dim = node_hiddens.shape weight_adj = weight_adj.permute(0, 3, 1, 2) # [batch, dim_e, seq, seq] node_hiddens = node_hiddens.unsqueeze(1).expand(batch, self.dep_embed_dim, seq, dim) ax = torch.matmul(weight_adj, node_hiddens) # [batch, dim_e, seq, dim] if self.pooling == 'avg': ax = ax.mean(dim=1) elif self.pooling == 'max': ax, _ = ax.max(dim=1) elif self.pooling == 'sum': ax = ax.sum(dim=1) # Ax: [batch, seq, dim] gcn_outputs = self.W(ax) weights_gcn_outputs = func.relu(gcn_outputs) node_outputs = weights_gcn_outputs # Edge update weight_adj[batch, dim_e, seq, seq] weight_adj = weight_adj.permute(0, 2, 3, 1).contiguous() # [batch, seq, seq, dim_e] node_outputs1 = node_outputs.unsqueeze(1).expand(batch, seq, seq, dim) node_outputs2 = node_outputs1.permute(0, 2, 1, 3).contiguous() edge_outputs = self.highway(weight_adj, node_outputs1, node_outputs2) return edge_outputs, node_outputs class Indexer: def __init__(self, tag2id, max_seq_len, spe_tag_dict): self.tag2id = tag2id self.max_seq_len = max_seq_len self.spe_tag_dict = spe_tag_dict def index_tag_list_w_matrix_pos(self, tags): """ :param tags: [[pos_i, pos_j, tag1], [pos_i, pos_j, tag2], ...] :return: """ for t in tags: if t[2] in self.tag2id: t[2] = self.tag2id[t[2]] else: t[2] = self.spe_tag_dict["[UNK]"] return tags @staticmethod def pad2length(tags, padding_tag, length): if len(tags) < length: tags.extend([padding_tag] * (length - len(tags))) return tags[:length] def index_tag_list(self, tags): """ tags: [t1, t2, t3, ...] """ tag_ids = [] for t in tags: if t not in self.tag2id: tag_ids.append(self.spe_tag_dict["[UNK]"]) else: tag_ids.append(self.tag2id[t]) if len(tag_ids) < self.max_seq_len: tag_ids.extend([self.spe_tag_dict["[PAD]"]] * (self.max_seq_len - len(tag_ids))) return tag_ids[:self.max_seq_len] @staticmethod def get_shaking_idx2matrix_idx(matrix_size): return MyMatrix.get_shaking_idx2matrix_idx(matrix_size) @staticmethod def get_matrix_idx2shaking_idx(matrix_size): return MyMatrix.get_matrix_idx2shaking_idx(matrix_size) @staticmethod def points2multilabel_shaking_seq(points, matrix_size, tag_size): """ Convert points to a shaking sequence tensor points: [(start_ind, end_ind, tag_id), ] return: shaking_seq: (shaking_seq_len, tag_size) """ matrix_idx2shaking_idx = Indexer.get_matrix_idx2shaking_idx(matrix_size) shaking_seq_len = matrix_size * (matrix_size + 1) // 2 shaking_seq = torch.zeros(shaking_seq_len, tag_size).long() for sp in points: shaking_idx = matrix_idx2shaking_idx[sp[0]][sp[1]] shaking_seq[shaking_idx][sp[2]] = 1 return shaking_seq @staticmethod def points2multilabel_shaking_seq_batch(batch_points, matrix_size, tag_size): """ Convert points to a shaking sequence tensor in batch (for training tags) batch_points: a batch of points, [points1, points2, ...] points: [(start_ind, end_ind, tag_id), ] return: batch_shaking_seq: (batch_size_train, shaking_seq_len, tag_size) """ matrix_idx2shaking_idx = Indexer.get_matrix_idx2shaking_idx(matrix_size) shaking_seq_len = matrix_size * (matrix_size + 1) // 2 batch_shaking_seq = torch.zeros(len(batch_points), shaking_seq_len, tag_size).long() for batch_id, points in enumerate(batch_points): for sp in points: shaking_idx = matrix_idx2shaking_idx[sp[0]][sp[1]] batch_shaking_seq[batch_id][shaking_idx][sp[2]] = 1 return batch_shaking_seq @staticmethod def points2shaking_seq_batch(batch_points, matrix_size): """ Convert points to a shaking sequence tensor batch_points: a batch of points, [points1, points2, ...] points: [(start_ind, end_ind, tag_id), ] return: batch_shaking_seq: (batch_size_train, shaking_seq_len) """ matrix_idx2shaking_idx = Indexer.get_matrix_idx2shaking_idx(matrix_size) shaking_seq_len = matrix_size * (matrix_size + 1) // 2 batch_shaking_seq = torch.zeros(len(batch_points), shaking_seq_len).long() for batch_id, points in enumerate(batch_points): for sp in points: try: shaking_idx = matrix_idx2shaking_idx[sp[0]][sp[1]] except Exception as e: raise e else: batch_shaking_seq[batch_id][shaking_idx] = sp[2] return batch_shaking_seq @staticmethod def points2matrix_batch(batch_points, matrix_size): """ Convert points to a matrix tensor batch_points: a batch of points, [points1, points2, ...] points: [(start_ind, end_ind, tag_id), ] return: batch_matrix: (batch_size_train, matrix_size, matrix_size) """ batch_matrix = torch.zeros(len(batch_points), matrix_size, matrix_size).long() for batch_id, points in enumerate(batch_points): for pt in points: batch_matrix[batch_id][pt[0]][pt[1]] = pt[2] return batch_matrix @staticmethod def points2multilabel_matrix_batch(batch_points, matrix_size, tag_size): """ Convert points to a matrix tensor for multi-label tasks batch_points: a batch of points, [points1, points2, ...] points: [(i, j, tag_id), ] return: batch_matrix: shape: (batch_size_train, matrix_size, matrix_size, tag_size) # element 0 or 1 """ batch_matrix = torch.zeros(len(batch_points), matrix_size, matrix_size, tag_size).long() for batch_id, points in enumerate(batch_points): for pt in points: batch_matrix[batch_id][pt[0]][pt[1]][pt[2]] = 1 return batch_matrix @staticmethod def shaking_seq2points(shaking_tag): """ shaking_tag -> points shaking_tag: shape: (shaking_seq_len, tag_size) points: [(start_ind, end_ind, tag_id), ] """ points = [] shaking_seq_len = shaking_tag.size()[0] matrix_size = int((2 * shaking_seq_len + 0.25) ** 0.5 - 0.5) shaking_idx2matrix_idx = Indexer.get_shaking_idx2matrix_idx(matrix_size) nonzero_points = torch.nonzero(shaking_tag, as_tuple=False) for point in nonzero_points: shaking_idx, tag_idx = point[0].item(), point[1].item() pos1, pos2 = shaking_idx2matrix_idx[shaking_idx] point = (pos1, pos2, tag_idx) points.append(point) return points @staticmethod def matrix2points(matrix_tag): """ matrix_tag -> points matrix_tag: shape: (matrix_size, matrix_size, tag_size) points: [(i, j, tag_id), ] """ points = [] nonzero_points = torch.nonzero(matrix_tag, as_tuple=False) for point in nonzero_points: i, j, tag_idx = point[0].item(), point[1].item(), point[2].item() point = (i, j, tag_idx) points.append(point) return points
nilq/baby-python
python
''' Author: Mario Liu Description: Module to detect faces with R200 camera. Adapted from https://docs.opencv.org/3.4.3/d7/d8b/tutorial_py_face_detection.html ''' import logging logging.basicConfig(level=logging.INFO) import time import numpy as np import cv2 import pyrealsense as pyrs face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml') with pyrs.Service() as serv: with serv.Device() as dev: dev.apply_ivcam_preset(0) cnt = 0 last = time.time() smoothing = 0.9 fps_smooth = 30 while True: cnt += 1 if (cnt % 10) == 0: now = time.time() dt = now - last fps = 10/dt fps_smooth = (fps_smooth * smoothing) + (fps * (1.0-smoothing)) last = now dev.wait_for_frames() # color c = dev.color c = cv2.cvtColor(c, cv2.COLOR_RGB2BGR) gray = cv2.cvtColor(c, cv2.COLOR_BGR2GRAY) # detect face faces = face_cascade.detectMultiScale(c, 1.3, 5) for (x,y,w,h) in faces: cv2.rectangle(c,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] roi_color = c[y:y+h, x:x+w] # find distance to center cx = int(round(x+(w/2))) cy = int(round(y+(h/2))) depth = dev.depth[cy][cx] print("Face found at distance: " + str(depth/10.0) + " cm") # depth d = dev.depth * dev.depth_scale * 1000 d = cv2.applyColorMap(d.astype(np.uint8), cv2.COLORMAP_RAINBOW) # join color and depth cd = np.concatenate((c, d), axis=1) cv2.putText(cd, str(fps_smooth)[:4], (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0)) cv2.imshow('', cd) if cv2.waitKey(1) & 0xFF == ord('q'): break
nilq/baby-python
python
import cv2 import numpy as np from matplotlib import pyplot as plt img = cv2.imread('sud2.jpeg',0) img = cv2.medianBlur(img,5) ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY) th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\ cv2.THRESH_BINARY,11,2) th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\ cv2.THRESH_BINARY,11,2) titles = ['Original Image', 'Global Thresholding (v = 127)', 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding'] images = [img, th1, th2, th3] for i in range(4): plt.subplot(2,2,i+1),plt.imshow(images[i],'gray') plt.title(titles[i]) plt.xticks([]),plt.yticks([]) plt.show()
nilq/baby-python
python
# # Imported module functions # #https://camo.githubusercontent.com/582226b9ba41bcbc13eaa81d2764092abb443bd416578c175bc2c1c5742d0647/68747470733a2f2f692e696d6775722e636f6d2f6b7a6978316a492e706e67 # Use our SimpleRequests module for this experimental version. from SimpleRequests import SimpleRequest from SimpleRequests.SimpleRequest import error # Use the datetime module for generating timestamps and snowflakes. from datetime import datetime, timedelta,timezone # Use the time module for generating timestamps that are backwards compatible with Python 2. from time import mktime # Use the os module for creating directories and writing files. from os import makedirs, getcwd, path # Use the mimetypes module to determine the mimetype of a file. from mimetypes import MimeTypes # Use the sqlite3 module to access SQLite databases. from sqlite3 import connect, Row, IntegrityError # Use the random module to choose from a list at random. from random import choice # Convert JSON to a Python dictionary for ease of traversal. from json import loads import dateutil.parser import textmine as tx from concurrent.futures import ThreadPoolExecutor as pool import logging import asyncio from contextlib import suppress # # Lambda functions # logging.basicConfig(filename='./output.log', filemode='w', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) # Return a random string of a specified length. random_str = lambda length: ''.join([choice('0123456789ABCDEF') for i in range(length)]) # Get the mimetype string from an input filename. mimetype = lambda name: MimeTypes().guess_type(name)[0] \ if MimeTypes().guess_type(name)[0] is not None \ else 'application/octet-stream' # Return a Discord snowflake from a timestamp. snowflake = lambda timestamp_s: (timestamp_s * 1000 - 1420070400000) << 22 # Return a timestamp from a Discord snowflake. timestamp = lambda snowflake_t: ((snowflake_t >> 22) + 1420070400000) / 1000.0 time_dff = 4 # # Global functions # class Message(object): def __init__(self, id, user_id, timestamp, content): self.id = id self.user_id = user_id self.timestamp = timestamp self.content = content def snowtodatetime(snowflake_value): ts = ((snowflake_value / 4194304) + 1420070400000)/1000 timestamp = datetime.utcfromtimestamp(ts) return timestamp def utctosnow(timestamp): return((timestamp*1000) - 1420070400000) * 4194304 def get_day(day, month, year): """Get the timestamps from 00:00 to 23:59 of the given day. :param day: The target day. :param month: The target month. :param year: The target year. """ min_time = mktime((year, month, day, 0, 0, 0, -1, -1, -1)) max_time = mktime((year, month, day, 23, 59, 59, -1, -1, -1)) return { '00:00': snowflake(int(min_time)), '23:59': snowflake(int(max_time)) } def safe_name(name): """Convert name to a *nix/Windows compliant name. :param name: The filename to convert. """ output = "" for char in name: if char not in '\\/<>:"|?*': output += char return output def create_query_body(**kwargs): """Generate a search query string for Discord.""" query = "" for key, value in kwargs.items(): if value is True and key != 'nsfw': query += '&has=%s' % key[:-1] if key == 'nsfw': query += '&include_nsfw=%s' % str(value).lower() return query class DiscordConfig(object): """Just a class used to store configs as objects.""" class Discord: """Experimental Discord scraper class.""" def __init__(self, config='config.json', apiver='v6'): """Discord constructor. :param config: The configuration JSON file. :param apiver: The current Discord API version. """ with open(config, 'r') as configfile: configdata = loads(configfile.read()) cfg = type('DiscordConfig', (object,), configdata)() if cfg.token == "" or cfg.token is None: error('You must have an authorization token set in %s' % config) exit(-1) self.api = apiver self.buffer = cfg.buffer self.headers = { 'user-agent': cfg.agent, 'authorization': cfg.token } self.types = cfg.types self.query = create_query_body( images=cfg.query['images'], files=cfg.query['files'], embeds=cfg.query['embeds'], links=cfg.query['links'], videos=cfg.query['videos'], nsfw=cfg.query['nsfw'] ) self.directs = cfg.directs if len(cfg.directs) > 0 else {} self.servers = cfg.servers if len(cfg.servers) > 0 else {} # Save us the time by exiting out when there's nothing to scrape. if len(cfg.directs) == 0 and len(cfg.servers) == 0: error('No servers or DMs were set to be grabbed, exiting.') exit(0) ''' dbdir = path.join(getcwd(), 'data') if not path.exists(dbdir): makedirs(dbdir) dbfile = path.join(dbdir, 'users.db') self.db = connect(dbfile) self.c = self.db.cursor() self.c.row_factory = Row ''' self.tx_obj = tx.NLPstock() self.start_time = None self.end_time = None self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) def get_server_name(self, serverid, isdm=False): """Get the server name by its ID. :param serverid: The server ID. :param isdm: A flag to check whether we're in a DM or not. """ if isdm: return serverid request = SimpleRequest(self.headers).request server = request.grab_page('https://discordapp.com/api/%s/guilds/%s' % (self.api, serverid)) if server is not None and len(server) > 0: return '%s_%s' % (serverid, safe_name(server['name'])) else: error('Unable to fetch server name from id, generating one instead.') return '%s_%s' % (serverid, random_str(12)) def get_channel_name(self, channelid, isdm=False): """Get the channel name by its ID. :param channelid: The channel ID. :param isdm: A flag to check whether we're in a DM or not. """ if isdm: return channelid request = SimpleRequest(self.headers).request channel = request.grab_page('https://discordapp.com/api/%s/channels/%s' % (self.api, channelid)) if channel is not None and len(channel) > 0: return '%s_%s' % (channelid, safe_name(channel['name'])) else: error('Unable to fetch channel name from id, generating one instead.') return '%s_%s' % (channelid, random_str(12)) @staticmethod def create_folders(server, channel): """Create the folder structure. :param server: The server name. :param channel: The channel name. """ folder = path.join(getcwd(), 'data', server, channel) if not path.exists(folder): makedirs(folder) return folder def download(self, url, folder): """Download the contents of a URL. :param url: The target URL. :param folder: The target folder. """ request = SimpleRequest(self.headers).request request.set_header('user-agent', 'Mozilla/5.0 (X11; Linux x86_64) Chrome/78.0.3904.87 Safari/537.36') filename = safe_name('%s_%s' % (url.split('/')[-2], url.split('/')[-1])) if not path.exists(filename): request.stream_file(url, folder, filename, self.buffer) def check_config_mimetypes(self, source, folder): """Check the config settings against the source mimetype. :param source: Response from Discord search. :param folder: Folder where the data will be stored. """ for attachment in source['attachments']: if self.types['images'] is True: if mimetype(attachment['proxy_url']).split('/')[0] == 'image': self.download(attachment['proxy_url'], folder) if self.types['videos'] is True: if mimetype(attachment['proxy_url']).split('/')[0] == 'video': self.download(attachment['proxy_url'], folder) if self.types['files'] is True: if mimetype(attachment['proxy_url']).split('/')[0] not in ['image', 'video']: self.download(attachment['proxy_url'], folder) @staticmethod def insert_text(server, channel, message): """Insert the text data into our SQLite database file. :param server: The server name. :param channel: The channel name. :param message: Our message object. """ dbdir = path.join(getcwd(), 'data') if not path.exists(dbdir): makedirs(dbdir) dbfile = path.join(dbdir, 'text.db') db = connect(dbfile) c = db.cursor() c.execute('''CREATE TABLE IF NOT EXISTS text_%s_%s ( id TEXT, name TEXT, content TEXT, timestamp TEXT )''' % (server, channel)) c.execute('INSERT INTO text_%s_%s VALUES (?,?,?,?)' % (server, channel), ( message['author']['id'], '%s#%s' % (message['author']['username'], message['author']['discriminator']), message['content'], message['timestamp'] )) #print(message.keys()) #print(f"{message['author']['id']} {message['author']['username']} {message['author']['discriminator']} {message['timestamp']}") #dt_time = dateutil.parser.isoparse(message['timestamp']) #ts_comp = dt_time.replace(tzinfo=timezone.utc).timestamp() print(f"{message['content']} {message['timestamp']}") db.commit() db.close() def check_AH(self, dt): start = dt.replace(hour=9, minute=30, second=0, microsecond=0) end = dt.replace(hour=16, minute=0, second=0, microsecond=0) if dt > start: if dt > end: return True else: return False else: return True def insert_text_player(self, server, channel, message, message_hour): """Insert the text data into our SQLite database file. :param server: The server name. :param channel: The channel name. :param message: Our message object. """ global time_dff dbdir = path.join(getcwd(), 'data') if not path.exists(dbdir): makedirs(dbdir) dbfile = path.join(dbdir, 'user.db') db = connect(dbfile) c = db.cursor() ''' if self.check_AH(message_hour+timedelta(hours= -time_dff)): self.tx_obj.AH = True logging.info(f"staring after hours for the day {message_hour+timedelta(hours= -time_dff)}") else: self.tx_obj.AH = False ''' self.tx_obj.current_time = message_hour #try: stock_string = self.tx_obj.get_stocks(message) #except Exception as e: #logging.error(f"getting stocks error {e} {message}") mentions = message["mentions"] if mentions: try: reference = message['message_reference'] try: c.execute("SELECT * FROM text_%s_%s WHERE id = ?" % (server, mentions[0]['id']) , (reference['message_id'],)) #rows = self.c.fetchall() #mention_stock_string = rows[-1] #print("EXECUTING finding message from refered user: ", mentions[0]['id']) except Exception as e: #print("cant find token table from user ", mentions[0]['id']) pass except KeyError: #print("not reply simply pin acess last topics org") try: c.execute('SELECT * FROM text_%s_%s ORDER BY id DESC LIMIT 1' % (server, mentions[0]['id'])) #print("EXECUTING finding last message from pinned user: ", mentions[0]['id']) except Exception: pass result = c.fetchone() if result: #print(f"ORG from {mentions[0]['id']} is {result[-1]} {result[2]}") stocks_temp = result[-1].split() stock_string += stocks_temp stock_string = set(stock_string) #stock_string += mention_stock_string stock_string = ' '.join(stock_string) c.execute('''CREATE TABLE IF NOT EXISTS text_%s_%s ( id TEXT NOT NULL PRIMARY KEY, name TEXT, content TEXT, timestamp TEXT, stocks TEXT )''' % (server, message['author']['id'])) c.execute('INSERT INTO text_%s_%s VALUES (?,?,?,?,?)' % (server, message['author']['id']), ( message['id'], channel, message['content'], message['timestamp'], stock_string )) #print(message.keys()) #print(f"{message['author']['id']} {message['author']['username']} {message['author']['discriminator']} {message['timestamp']}") #dt_time = dateutil.parser.isoparse(message['timestamp']) #ts_comp = dt_time.replace(tzinfo=timezone.utc).timestamp() print(f"{message['content']} - stocks: {stock_string}") db.commit() db.close() def grab_data_test(self, folder, server, channel, isdm=False, inter=30): """Scan and grab the attachments. :param folder: The folder name. :param server: The server name. :param channel: The channel name. :param isdm: A flag to check whether we're in a DM or not. :param inter: interval of scrape in seconds """ date = datetime.now() target_day = date + timedelta(days=-200) while target_day.day <= date.day: print(f"getting data for {date} target is {target_day}") #start_snow = int(utctosnow(date.replace(day = date.day-1, hour=0, minute=0, second=0, microsecond=0, tzinfo=timezone.utc).timestamp())) #end_snow = int(utctosnow(date.replace(hour=23, minute=59, second=59, microsecond=59, tzinfo=timezone.utc).timestamp())) today = get_day(target_day.day, target_day.month, target_day.year) start_snow = today["00:00"] end_snow = today['23:59'] print(f"{start_snow}-{end_snow}") print() request = SimpleRequest(self.headers).request request.set_header('referer', 'https://discordapp.com/channels/@me/%s' % channel) content = request.grab_page( 'https://discordapp.com/api/%s/channels/%s/messages/search?min_id=%s&max_id=%s&%s' % (self.api, channel, start_snow, end_snow, self.query) ) try: if content['messages'] is not None: for messages in content['messages'][::-1]: for message in messages[::-1]: #self.check_config_mimetypes(message, folder) if self.types['text']: if len(message['content']) > 0: try: self.insert_text_player(server, channel, message) except IntegrityError: pass except TypeError as e: print("type error on getting message ", e) #break target_day += timedelta(days=1) def grab_server_data(self): """Scan and grab the attachments within a server.""" for server, channels in self.servers.items(): for channel in channels: folder = self.create_folders( self.get_server_name(server), self.get_channel_name(channel) ) self.grab_data_current(folder, server, channel) def grab_dm_data(self): """Scan and grab the attachments within a direct message.""" for alias, channel in self.directs.items(): folder = self.create_folders( path.join('Direct Messages', alias), channel ) self.grab_data(folder, alias, channel, True) async def grab_data_current(self, server, channel, isdm=False, inter=30): #the end time """Scan and grab the attachments. :param folder: The folder name. :param server: The server name. :param channel: The channel name. :param isdm: A flag to check whether we're in a DM or not. :param inter: interval of scrape in seconds """ global time_dff inter_before = datetime.now() + timedelta(hours=time_dff) print("current time is ", inter_before) inter_after = inter_before + timedelta(seconds=inter) #ts_value_now = dt_time.replace(tzinfo=timezone.utc).timestamp() while True: current_time = datetime.now() + timedelta(hours=time_dff) #print(f"waiting for {inter_after}, current {current_time}") if current_time >= inter_after: #inter_before -= timedelta(seconds=5) #offset to get the overlap message start_snow_dt = inter_before.replace(tzinfo=timezone.utc) + timedelta(seconds=-2) start_snow = int(utctosnow(start_snow_dt.timestamp())) end_snow_dt = inter_after.replace(tzinfo=timezone.utc) + timedelta(seconds=2) end_snow = int(utctosnow(end_snow_dt.timestamp())) print(f"Processing time interval {inter_before} to {current_time}") request = SimpleRequest(self.headers).request request.set_header('referer', 'https://discordapp.com/channels/%s/%s' % (server, channel)) content = request.grab_page( 'https://discordapp.com/api/%s/guilds/%s/messages/search?channel_id=%s&min_id=%s&max_id=%s&%s' % (self.api, server, channel, start_snow, end_snow, self.query) ) if content: if content['messages'] is not None: for messages in content['messages'][::-1]: for message in messages[::-1]: #self.check_config_mimetypes(message, folder) #print(message['id']) if self.types['text'] is True: if len(message['content']) > 0: try: self.insert_text_player(server, channel, message, start_snow_dt) except IntegrityError: logging.error(f"{message['id']} exists by {message['author']['id']} {message['content']} {message['author']['username']}") else: logging.info(f"{start_snow_dt}-{end_snow_dt} no content {content}") inter_before = current_time inter_after = inter_before + timedelta(seconds=inter) print() await asyncio.sleep(0.5) def grab_data(self, folder, server, channel, isdm=False): """Scan and grab the attachments. :param folder: The folder name. :param server: The server name. :param channel: The channel name. :param isdm: A flag to check whether we're in a DM or not. """ date = datetime.today() while date.year >= 2021: request = SimpleRequest(self.headers).request today = get_day(date.day, date.month, date.year) if not isdm: request.set_header('referer', 'https://discordapp.com/channels/%s/%s' % (server, channel)) content = request.grab_page( 'https://discordapp.com/api/%s/guilds/%s/messages/search?channel_id=%s&min_id=%s&max_id=%s&%s' % (self.api, server, channel, today['00:00'], today['23:59'], self.query) ) else: request.set_header('referer', 'https://discordapp.com/channels/@me/%s' % channel) content = request.grab_page( 'https://discordapp.com/api/%s/channels/%s/messages/search?min_id=%s&max_id=%s&%s' % (self.api, channel, today['00:00'], today['23:59'], self.query) ) try: if content['messages'] is not None: for messages in content['messages']: for message in messages: #self.check_config_mimetypes(message, folder) if self.types['text'] is True: if len(message['content']) > 0: self.insert_text(server, channel, message) except TypeError: continue break date += timedelta(days=-1) def grab_server_data(self): """Scan and grab the attachments within a server.""" for server, channels in self.servers.items(): for channel in channels: print(f'Scraping data from {self.get_server_name(server)} {self.get_channel_name(channel)}') self.loop.create_task(self.grab_data_current(server, channel)) self.loop.run_forever() def grab_dm_data(self): """Scan and grab the attachments within a direct message.""" for alias, channel in self.directs.items(): folder = self.create_folders( path.join('Direct Messages', alias), channel ) self.grab_data(folder, alias, channel, True) # # Initializer # if __name__ == '__main__': ds = Discord() ds.grab_server_data() #ds.grab_dm_data()
nilq/baby-python
python
from __future__ import annotations from spark_auto_mapper_fhir.fhir_types.uri import FhirUri from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType # This file is auto-generated by generate_classes so do not edit manually # noinspection PyPep8Naming class SpecimenContainerTypeCode(GenericTypeCode): """ SpecimenContainerType From: http://hl7.org/fhir/ValueSet/specimen-container-type in valuesets.xml Checks on the patient prior specimen collection. All SNOMED CT concepts descendants of 706041008 |Device for body fluid and tissue collection/transfer/processing (physical object)| """ def __init__(self, value: AutoMapperTextInputType): super().__init__(value=value) """ http://snomed.info/sct """ codeset: FhirUri = "http://snomed.info/sct"
nilq/baby-python
python
import json from itertools import groupby from operator import itemgetter import django from django import forms from django.conf import settings from django.contrib.admin.views.decorators import staff_member_required from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist from django.core.paginator import Paginator from django.http import HttpResponse from django.shortcuts import render from django.templatetags.static import static from django.urls import reverse, NoReverseMatch from django.views.decorators.csrf import csrf_exempt from linkcheck import update_lock from linkcheck.linkcheck_settings import RESULTS_PER_PAGE from linkcheck.models import Link from linkcheck.utils import get_coverage_data @staff_member_required def coverage(request): coverage_data = get_coverage_data() if request.GET.get('config', False): # Just render the suggested linklist code template = 'linkcheck/suggested_configs.html' context = {'coverage_data': [x['suggested_config'] for x in coverage_data]} else: # Render a nice report template = 'linkcheck/coverage.html' context = {'coverage_data': coverage_data} return render(request, template, context) @staff_member_required @csrf_exempt def report(request): outerkeyfunc = itemgetter('content_type_id') content_types_list = [] if request.method == 'POST': ignore_link_id = request.GET.get('ignore', None) if ignore_link_id is not None: link = Link.objects.get(id=ignore_link_id) link.ignore = True link.save() if request.is_ajax(): json_data = json.dumps({'link': ignore_link_id}) return HttpResponse(json_data, content_type='application/javascript') unignore_link_id = request.GET.get('unignore', None) if unignore_link_id is not None: link = Link.objects.get(id=unignore_link_id) link.ignore = False link.save() if request.is_ajax(): json_data = json.dumps({'link': unignore_link_id}) return HttpResponse(json_data, content_type='application/javascript') recheck_link_id = request.GET.get('recheck', None) if recheck_link_id is not None: link = Link.objects.get(id=recheck_link_id) url = link.url url.check_url(external_recheck_interval=0) links = [x[0] for x in url.links.values_list('id')] if request.is_ajax(): json_data = json.dumps({ 'links': links, 'message': url.message, 'colour': url.colour, }) return HttpResponse(json_data, content_type='application/javascript') link_filter = request.GET.get('filters', 'show_invalid') qset = Link.objects.order_by('-url__last_checked') if link_filter == 'show_valid': qset = qset.filter(ignore=False, url__status__exact=True) report_type = 'Good Links' elif link_filter == 'show_unchecked': qset = qset.filter(ignore=False, url__last_checked__exact=None) report_type = 'Untested Links' elif link_filter == 'ignored': qset = qset.filter(ignore=True) report_type = 'Ignored Links' else: qset = qset.filter(ignore=False, url__status__exact=False) report_type = 'Broken Links' paginated_links = Paginator(qset, RESULTS_PER_PAGE, 0, True) try: page = int(request.GET.get('page', '1')) except: page = 0 # offset = (page - 1) * RESULTS_PER_PAGE links = paginated_links.page(page) # This code groups links into nested lists by content type and object id # It's a bit nasty but we can't use groupby unless be get values() # instead of a queryset because of the 'Object is not subscriptable' error t = sorted(links.object_list.values(), key=outerkeyfunc) for tk, tg in groupby(t, outerkeyfunc): innerkeyfunc = itemgetter('object_id') objects = [] tg = sorted(tg, key=innerkeyfunc) for ok, og in groupby(tg, innerkeyfunc): content_type = ContentType.objects.get(pk=tk) og = list(og) try: object = None if content_type.model_class(): object = content_type.model_class().objects.get(pk=ok) except ObjectDoesNotExist: pass try: admin_url = object.get_admin_url() # TODO allow method name to be configurable except AttributeError: try: admin_url = reverse('admin:%s_%s_change' % (content_type.app_label, content_type.model), args=[ok]) except NoReverseMatch: admin_url = None objects.append({ 'object': object, 'link_list': Link.objects.in_bulk([x['id'] for x in og]).values(), # Convert values_list back to queryset. Do we need to get values() or do we just need a list of ids? 'admin_url': admin_url, }) content_types_list.append({ 'content_type': content_type, 'object_list': objects }) # Pass any querystring data back to the form minus page rqst = request.GET.copy() if 'page' in rqst: del rqst['page'] return render(request, 'linkcheck/report.html', { 'content_types_list': content_types_list, 'pages': links, 'filter': link_filter, 'media': forms.Media(js=[static(get_jquery_min_js())]), 'qry_data': rqst.urlencode(), 'report_type': report_type, 'ignored_count': Link.objects.filter(ignore=True).count(), }, ) def get_jquery_min_js(): """ Return the location of jquery.min.js. It's an entry point to adapt the path when it changes in Django. """ return 'admin/js/vendor/jquery/jquery.min.js' def get_status_message(): if update_lock.locked(): return "Still checking. Please refresh this page in a short while. " else: broken_links = Link.objects.filter(ignore=False, url__status=False).count() if broken_links: return ( "<span style='color: red;'>We've found {} broken link{}.</span><br>" "<a href='{}'>View/fix broken links</a>".format( broken_links, "s" if broken_links > 1 else "", reverse('linkcheck_report'), ) ) else: return ''
nilq/baby-python
python
# Sum Compare # Get 3 numbers from the user. Find the # biggest number and add them all together. # If the sum is bigger than 2 times the # biggest of the 3 numbers, then print the sum. # If it's smaller, multiply the sum by 3 and print the product. # write code here
nilq/baby-python
python
import tensorflow as tf import math class BatchNormalization(tf.keras.layers.BatchNormalization): """Make trainable=False freeze BN for real (the og version is sad). ref: https://github.com/zzh8829/yolov3-tf2 """ def call(self, x, training=False): if training is None: training = tf.constant(False) training = tf.logical_and(training, self.trainable) return super().call(x, training) def safe_norm(x, epsilon=1e-12, axis=None, keep_dims=False): return tf.sqrt(tf.reduce_sum(x ** 2, axis=axis, keepdims=keep_dims) + epsilon) class ArcMarginPenaltyLogists(tf.keras.layers.Layer): """ArcMarginPenaltyLogists""" def __init__(self, num_classes, margin=0.5, logist_scale=64, **kwargs): super(ArcMarginPenaltyLogists, self).__init__(**kwargs) self.num_classes = num_classes self.margin = margin self.logist_scale = logist_scale def build(self, input_shape): self.w = self.add_variable( "weights", shape=[int(input_shape[-1]), self.num_classes]) self.cos_m = tf.identity(math.cos(self.margin), name='cos_m') self.sin_m = tf.identity(math.sin(self.margin), name='sin_m') self.th = tf.identity(math.cos(math.pi - self.margin), name='th') self.mm = tf.multiply(self.sin_m, self.margin, name='mm') def call(self, embds, labels): # normed_embds = tf.nn.l2_normalize(embds, axis=1, name='normed_embd') # normed_w = tf.nn.l2_normalize(self.w, axis=0, name='normed_weights') embedding_norm = safe_norm(embds, axis=1, keep_dims=True) normed_embds = tf.divide(embds, embedding_norm, name='normed_embd') weights_norm = safe_norm(self.w, axis=0, keep_dims=True) normed_w = tf.divide(self.w, weights_norm, name='normed_weights') cos_t = tf.matmul(normed_embds, normed_w, name='cos_t') sin_t = tf.sqrt(1. - cos_t ** 2, name='sin_t') cos_mt = tf.subtract( cos_t * self.cos_m, sin_t * self.sin_m, name='cos_mt') cos_mt = tf.where(cos_t > self.th, cos_mt, cos_t - self.mm) mask = tf.one_hot(tf.cast(labels, tf.int32), depth=self.num_classes, name='one_hot_mask') logists = tf.where(mask == 1., cos_mt, cos_t) logists = tf.multiply(logists, self.logist_scale, 'arcface_logist') return logists
nilq/baby-python
python
""" Some simple code to make particle flux spectrograms with matplotlib @author: Liam M. Kilcommons (minor modifications R. Redmon, A.G. Burrell) """ import numpy as np import matplotlib.pyplot as pp import datetime as dt def dmsp_spectrogram(times, flux, channel_energies=None, lat=None, lt=None, fluxunits='eV/cm$^2$-s-sr-eV', logy=True, datalabel=None, cblims=None, title=None, ax=None, ax_cb=None, label_it=True, color_map="Spectral_r"): """ Plot the DMSP spectrogram Parameters ---------- times : numpy.ndarray (dtype=object)(shape=(n,1)) Array of datetimes corresponding to the timestamps of the rows of the flux array flux : numpy.ndarray (shape=(n,len(channel_energies))) Array of fluxes, 1 per channel, per timestamp channel_energies - numpy.ndarray Array of particle detector channel center energies in eV, if None uses default DMSP energies channel_energies = [ 30000., 20400., 13900., 9450., 6460., 4400., 3000., 2040., 1392., 949., 646., 440., 300., 204., 139., 95., 65., 44., 30.] fluxunits : str, optional Units of flux for labeling the spectrogram (title and colorbar) Defaults to eV/cm$^2$-s-sr-eV logy : boolean, optional Flag to make the y axis log scale (useful for log-spaced channel_energies) lat : numpy.ndarray (shape=(n,1)), optional If lat is not None, then it must be the latitude (magnetic or otherwise) of the spacecraft at every timestamp in times. Setting this value will cause the latitude to be added to the x axis labels lt : numpy.ndarray (shape=(n,1)), optional If lat is not None, then it must be the localtime (magnetic or otherwise) of the spacecraft at every timestamp in times. Setting this value will cause the localtime to be added to the x axis labels datalabel : str, optional Some text to add to the title of the graphic goes on a line above 'Flux [units of flux]' cblims : None or 2-element list, optional The limits for the colorbar. If None, then the colorbar range is set to [flux.min(),flux.max()] ax : None or axis reference, optional Allows caller to specify axis for spectrogram; helpful for stackplot. If 'ax' is specified then so should 'ax_cb'. ax_cb : None or colorbar axis reference, optional Allows caller to specify axis for spectrogram color bar; helpful for stackplot. If 'ax' is specified then so should 'ax_cb'. """ #Module for logrithmic colorbar spacing from matplotlib.colors import LogNorm #Module for locating dates on the x axis import matplotlib.dates as mpldates #Module for getting colormaps import matplotlib.cm as cm if channel_energies is None: channel_energies = np.array([ 30000., 20400., 13900., 9450., 6460., 4400., 3000., 2040., 1392., 949., 646., 440., 300., 204., 139., 95., 65., 44., 30.]) # if Axis not specified then create one if ax is None: f = pp.figure(figsize=(12,6),dpi=300) ax = pp.axes() if datalabel is not None: ax.set_title(datalabel+'\n Flux [%s]' %(fluxunits)) else: pass #ax.set_title('Flux [%s]' % (fluxunits)) if isinstance(times,np.ndarray): times = times.flatten() if isinstance(times[0], dt.datetime): mpl_times = mpldates.date2num(times) else: mpl_times = times #-------------------------------------------------------------------------- # Channel center energies to bin starts # Since DMSP SSJ channels are log-linearly spaced, the bins widths are taken # to be log-constant and the bins are placed symmetric about the channel # center energies. This is probably not exactly correct since the actual # instrument response/sensitivity functions are likely more linear than # log linear. Recall that channels are listed as [30,000 eV to 30 eV] in # reverse order. #-------------------------------------------------------------------------- # Hard coded start/end bins taken from SSDP; not sure how they are derived, # though this does result in bins visually centered correctly on their # central energies bin_edges = np.logspace(np.log10(36340.), np.log10(24.76), len(channel_energies) + 1) # add one for endpoint T,CH_E = np.meshgrid(mpl_times, bin_edges) # Infinite, and Negative fluxes => NaN inds = np.nonzero((~np.isfinite(flux)) | (flux < 0.)) flux[inds] = np.nan # Mask nan fluxes so that pcolor knows to use the cmap bad value masked_flux = np.ma.masked_where(np.isnan(flux),flux) if cblims is None: z_min = np.nanmin(flux) z_max = np.nanmax(flux) else: z_min = cblims[0] z_max = cblims[1] #Set the over and under-range colors for the colorbar cmap = cm.get_cmap(color_map) cmap.set_bad('white',.1) cmap.set_over('black') cmap.set_under('grey') mappable = ax.pcolormesh(T, CH_E, masked_flux.transpose(), cmap=cmap, norm=LogNorm(vmin=z_min, vmax=z_max)) #mappable.set_rasterized( True ) if ax_cb is None: pp.colorbar(mappable,label=fluxunits,ax=ax) else: pp.colorbar(mappable,label=fluxunits,cax=ax_cb) # if Axis not specified then add x-axis tick marks if label_it and isinstance(times[0], dt.datetime): plotwidth_h = (times[-1]-times[0]).total_seconds()/3600. plotwidth_m = (times[-1]-times[0]).total_seconds()/60. if plotwidth_m <= 10.: # if the plot width is less than 10 minutes tick mark every minute majloc = mpldates.MinuteLocator(interval=1) elif plotwidth_m <= 30.: # if the plot width is less than 1/2 hour tick mark every 5 minutes majloc = mpldates.MinuteLocator(interval=5) elif plotwidth_h <= 1: # if the plot width is less than 1 hour, but more than 30 minutes, # tick mark every 10 minutes majloc = mpldates.MinuteLocator(interval=10) elif plotwidth_h <= 3: # if less than 3 hours, but more than 1 use every 15 minutes majloc = mpldates.MinuteLocator(interval=15) elif plotwidth_h <= 5: # if less than 5 hours, but more than 3 use every half hour majloc = mpldates.MinuteLocator(interval=30) else: majloc = mpldates.HourLocator() #tick mark every hour #Set the date locator ax.xaxis.set_major_locator(majloc) #This is slow and throws errors if used with pcolor, used pcolormesh # instead #ax.set_yscale('log') #Manually create the tick labels #There is probably a better way to do this with FuncFormatter, but I # couldn't figure out how to get all of the relavent lat and LT # information into it #Get the tick marks xticks = ax.get_xticks() xlabels = [] for tick in xticks: ind = np.nonzero(mpl_times==tick)[0] #Nonzero returns array ARG! if len(ind)>0: #Sometimes tick is not found if it wants to tickmark outside of # data range. Have to put additional index to get datetime # instead of array of length 1 with datetime in it tickstr = "%.2d:%.2d" % (times[ind[0]].hour, times[ind[0]].minute) if lat is not None: tickstr+="\n%.2f" % (lat[ind]) if lt is not None: tickstr+="\n%.2f" % (lt[ind]) xlabels.append(tickstr) else: # Convert the tick position to a time dtime = mpldates.num2date(tick) xlabels.append('%.2d:%.2d' % (dtime.hour, dtime.minute)) ax.set_xticklabels(xlabels) ax.set_yscale('log') ax.set_ylim([channel_energies.min(),channel_energies.max()]) ax.set_ylabel('Channel E \n(log)[eV]') # In the case that caller didn't specify the axis to use return new figure if 'f' in locals(): # f.savefig('/home/liamk/test.png',dpi=300,figsize=(12,6)) return f
nilq/baby-python
python
from django.apps import apps from django.forms.models import ModelChoiceField, ModelMultipleChoiceField from django.forms import ChoiceField from smart_selects.widgets import ChainedSelect, ChainedSelectMultiple try: from django.utils.encoding import force_text except ImportError: from django.utils.encoding import force_str as force_text get_model = apps.get_model class ChainedModelChoiceField(ModelChoiceField): def __init__( self, to_app_name, to_model_name, chained_field, chained_model_field, foreign_key_app_name, foreign_key_model_name, foreign_key_field_name, show_all, auto_choose, sort=True, manager=None, initial=None, view_name=None, *args, **kwargs ): defaults = { "widget": ChainedSelect( to_app_name, to_model_name, chained_field, chained_model_field, foreign_key_app_name, foreign_key_model_name, foreign_key_field_name, show_all, auto_choose, sort, manager, view_name, ), } defaults.update(kwargs) if "queryset" not in kwargs: queryset = get_model(to_app_name, to_model_name).objects.all() super(ChainedModelChoiceField, self).__init__( queryset=queryset, initial=initial, *args, **defaults ) else: super(ChainedModelChoiceField, self).__init__( initial=initial, *args, **defaults ) def _get_choices(self): self.widget.queryset = self.queryset choices = super(ChainedModelChoiceField, self)._get_choices() return choices choices = property(_get_choices, ChoiceField._set_choices) class ChainedManyToManyField(ModelMultipleChoiceField): def __init__( self, to_app_name, to_model_name, chain_field, chained_model_field, foreign_key_app_name, foreign_key_model_name, foreign_key_field_name, auto_choose, horizontal, verbose_name="", manager=None, initial=None, *args, **kwargs ): defaults = { "widget": ChainedSelectMultiple( to_app_name, to_model_name, chain_field, chained_model_field, foreign_key_app_name, foreign_key_model_name, foreign_key_field_name, auto_choose, horizontal, verbose_name, manager, ), } defaults.update(kwargs) if "queryset" not in kwargs: queryset = get_model(to_app_name, to_model_name).objects.all() super(ChainedManyToManyField, self).__init__( queryset=queryset, initial=initial, *args, **defaults ) else: super(ChainedManyToManyField, self).__init__( initial=initial, *args, **defaults ) class GroupedModelSelect(ModelChoiceField): def __init__(self, queryset, order_field, *args, **kwargs): self.order_field = order_field super(GroupedModelSelect, self).__init__(queryset, *args, **kwargs) def _get_choices(self): # If self._choices is set, then somebody must have manually set # the property self.choices. In this case, just return self._choices. if hasattr(self, "_choices"): return self._choices # Otherwise, execute the QuerySet in self.queryset to determine the # choices dynamically. Return a fresh QuerySetIterator that has not been # consumed. Note that we're instantiating a new QuerySetIterator *each* # time _get_choices() is called (and, thus, each time self.choices is # accessed) so that we can ensure the QuerySet has not been consumed. This # construct might look complicated but it allows for lazy evaluation of # the queryset. group_indexes = {} choices = [("", self.empty_label or "---------")] i = len(choices) for item in self.queryset: order_field = getattr(item, self.order_field) group_index = order_field.pk if group_index not in group_indexes: group_indexes[group_index] = i choices.append([force_str(order_field), []]) i += 1 choice_index = group_indexes[group_index] choices[choice_index][1].append(self.make_choice(item)) return choices def make_choice(self, obj): return (obj.pk, " " + self.label_from_instance(obj)) choices = property(_get_choices, ChoiceField._set_choices)
nilq/baby-python
python
class Person(object): def demo(self): print('888')
nilq/baby-python
python
import datetime import json import argparse from typing import Any, Dict import pytz from astral import LocationInfo, Observer, sun options = argparse.ArgumentParser() options.add_argument( "-n", "--name", dest="name", default="Somewhere", help="Location name (free-form text)", ) options.add_argument( "-r", "--region", dest="region", default="On Earth", help="Region (free-form text)" ) options.add_argument( "-d", "--date", dest="date", help="Date to compute times for (yyyy-mm-dd)" ) options.add_argument("-t", "--tzname", help="Timezone name") options.add_argument("latitude", type=float, help="Location latitude (float)") options.add_argument("longitude", type=float, help="Location longitude (float)") options.add_argument( "elevation", nargs="?", type=float, default=0.0, help="Elevation in metres (float)" ) args = options.parse_args() loc = LocationInfo( args.name, args.region, args.tzname, args.latitude, args.longitude ) obs = Observer(args.latitude, args.longitude, args.elevation) kwargs: Dict[str, Any] = {} kwargs["observer"] = obs if args.date is not None: try: kwargs["date"] = datetime.datetime.strptime(args.date, "%Y-%m-%d").date() except: # noqa: E722 kwargs["date"] = datetime.date.today() sun_as_str = {} format_str = "%Y-%m-%dT%H:%M:%S" if args.tzname is None: tzinfo = pytz.utc format_str += "Z" else: tzinfo = pytz.timezone(loc.timezone) # type: ignore format_str += "%z" kwargs["tzinfo"] = tzinfo s = sun.sun(**kwargs) for key, value in s.items(): sun_as_str[key] = s[key].strftime(format_str) sun_as_str["timezone"] = tzinfo.zone sun_as_str["location"] = f"{loc.name}, {loc.region}" print(json.dumps(sun_as_str))
nilq/baby-python
python
n, m = map(int, input().split()) if n == 1: if m == 0: print(1, 2) else: print(-1) exit() if m < 0 or m + 2 > n: print(-1) else: print(1, 2 * (m + 2)) for i in range(1, m + 2): print(2 * i, 2 * i + 1) for j in range(m + 2, n): print(2 * j + 1, 2 * j + 2)
nilq/baby-python
python
from sklearn import preprocessing from tqdm import tqdm import time import pandas as pd import numpy as np import torch import torch.nn as nn from sklearn.metrics import accuracy_score, recall_score from sklearn.metrics import precision_score, f1_score from sklearn.metrics import classification_report from core.utils import AverageMeter from itertools import cycle from core.utils import save_checkpoint from sklearn.metrics import roc_auc_score, roc_curve, auc # from .grad_cam_log import record_output_gradcam import matplotlib.pyplot as plt target_names_dict = {"Non": 0, "Venous": 1, "Aterial": 2, "Others": 3} map_id_name = {0: "Non Contrast", 1: "Venous", 2: "Aterial", 3: "Others"} def valid_model( cfg, mode, epoch, model, dataloader, criterion, writer=None, save_prediction=True, best_metric=None, visual=False ): """Evaluate model performance on Validating dataset Args: cfg (CfgNode): Config object containing running configuration mode (str): Model running mode (valid/test) model (nn.Module): Model that need to have performance evaluated dataloader (data.DataLoader): Dataloader object to load data batch-wise criterion: Loss function writer (Summarywriter): Logger that log validation loss and plot it on Tensorboard save_prediction (Boolean): Whether to save prediction output or not (for bootstraping) best_metric (float, optional): Best performance result of loaded model. Defaults to None. """ # Declare variables gpu = cfg.SYSTEM.GPU output_log_dir = cfg.DIRS.OUTPUTS model.eval() losses = AverageMeter() tbar = tqdm(dataloader) targets, preds, filenames, study_IDs, seriesNumbers = ( list(), list(), list(), list(), list(), ) data = dict() total_time = 0 all_probs = [] for i, (filename, study_ID, seriesNumber, image, target) in enumerate(tbar): with torch.no_grad(): image = image.float() if gpu: image, target = image.cuda(), target.cuda() start = time.time() output = model(image) end = time.time() # Output prediction sigmoid = nn.Sigmoid() probs = sigmoid(output) pred = torch.argmax(probs, 1) probs = probs.cpu().numpy() all_probs.append(probs) # print(probs.shape) # print(pred.shape) # print("_--------------_") total_time += end - start # Compute loss loss = criterion(output, target) # Record loss losses.update(loss.item() * cfg.SOLVER.GD_STEPS, target.size(0)) tbar.set_description("Valid loss: %.9f" % (losses.avg)) # Convert target, prediction to numpy target = list(target.detach().cpu().numpy()) pred = list(pred.detach().cpu().numpy()) # print(pred) filename = list(filename) targets += target preds += pred filenames += filename study_IDs += study_ID seriesNumbers += list(np.array(seriesNumber)) # print(f"Inference time =", (total_time / len(tbar)) / 100) all_targets = [] for idx in range(len(targets)): cur = [0] * 4 cur[targets[idx]] = 1 all_targets.append([cur]) all_probs = np.concatenate(all_probs, axis=0) all_target = np.concatenate(all_targets, axis=0) # print(all_target.shape) # print(all_probs.shape) np.save("target.npy", all_target) np.save("probs.npy", all_probs) # print(type(targets), len(targets)) # print(all_probs.shape) if visual == True: fpr = dict() tpr = dict() roc_auc = dict() for i in range(4): fpr[i], tpr[i], _ = roc_curve(all_target[:, i], all_probs[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) colors = cycle(["aqua", "darkorange", "cornflowerblue", "red"]) lw = 2 plt.figure() for i, color in zip(range(4), colors): plt.plot( fpr[i], tpr[i], color=color, lw=lw, label=f"ROC curve of class {map_id_name[i]} (area = {roc_auc[i]})" ) plt.plot([0, 1], [0, 1], "k--", lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Some extension of Receiver operating characteristic to multiclass") plt.legend(loc="lower right") plt.show() # Record wrongly predicted sample and save confusion matrix # record_output(cfg, mode, output_log_dir, study_IDs, seriesNumbers, # targets, preds, filenames) # record_output_gradcam(cfg, mode, output_log_dir, targets, preds, filenames, model) # Calculate Metrics accuracy = accuracy_score(targets, preds) recall = recall_score(targets, preds, average="macro") precision = precision_score(targets, preds, average="macro") f1 = f1_score(targets, preds, average="macro") print( "ACCURACY: %.9f, RECALL: %.9f, PRECISION: %.9f, F1: %.9f" % (accuracy, recall, precision, f1) ) if len(np.unique(preds)) == cfg.MODEL.NUM_CLASSES: report = classification_report( targets, preds, target_names=["Non", "Venous", "Aterial", "Others"], digits=4, ) print(report) # else: # from core.utils import print_report, classification_report_ # report = classification_report_(targets, preds, target_names_dict) # print_report(report) data["Study_ID"] = study_IDs data["Filename"] = filenames data["SeriesNumber"] = seriesNumbers data["Prediction"] = preds data["Label"] = targets data = pd.DataFrame(data) all_series = [] for (studyuid, seriesuid), tmp_df in data.groupby(['Study_ID', 'SeriesNumber']): preds = tmp_df['Prediction'].tolist() labels = tmp_df['Label'].tolist() f1_series = f1_score(labels, preds, average='macro') all_series.append(f1_series) all_series = np.array(all_series) f1_series = np.mean(all_series) print("series", f1_series) save_dict = { "epoch": epoch + 1, "arch": cfg.NAME, "state_dict": model.state_dict(), "best_metric": best_metric, } save_filename = f"{cfg.NAME}_{str(f1)}_{str(f1_series)}.pth" save_checkpoint(save_dict, root=cfg.DIRS.WEIGHTS, filename=save_filename) # print(studyuid, seriesuid, f1) if mode == "train": # writer.add_scalars( # f"Metrics", # { # "F1_SCORE": f1, # "ACCURACY": accuracy, # "RECALL": recall, # "PRECISION": precision, # }, # epoch, # ) # CHECKPOINT is_best = f1 > best_metric best_metric = max(f1, best_metric) # Save All slices prediction for scan prediction and bootstraping if save_prediction: data.to_csv(f"eval_{mode}.csv", index=False) return best_metric
nilq/baby-python
python
#Actualizado Lunes,28 de mayo dos mil diez y ocho #Autor: Rosnel Alejandro Leyva-Cortes# import os import re import sys import struct import socket import urllib import time from subprocess import Popen, PIPE import json as m_json try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse try: import urllib.request #Python3.x except ImportError: import urllib2 #Python2.x #End of import# def q(): print(''' You are a horrible ''') exit() #initial ping #for the hostname def ping (): welcome = str = raw_input('''\nIn order to perform a test, we must determine if the host is up.''') hostname = str1 = raw_input("\nInput Hostname: ") response = os.system("ping -c 10 " + hostname) #and then check the response... if response == 0: print (hostname + ' is up! ') #End result is self explanatory mainMenu() def Cloudflare(): print('Not ready yet') mainMenu() def traceroute(url,*arg): print('''This function uses ICMP to trace a host and give an IP. Please run as root and don't include HTTPS in url. ''') url = raw_input("\nPlease type in url to traceroute a website: "); while True: if 'http' not in url: url = "http://" + url elif "www" not in url: url = "www."[:7] + url[7:] else: url = url break url = urlparse(url) url = url.netloc print(url) p = Popen(['tracert', url], stdout=PIPE) while True: line = p.stdout.readline() line2 = str(line).replace('\\r','').replace('\\n','') if len(arg)>0: file = open(arg[0], "a") file.write(line2) file.close() print(line2) if not line: break def mainMenu(): print (''' _______ ______ _______ / \ / \ / \ $$$$$$$ |/$$$$$$ |$$$$$$$ | $$ |__$$ |$$ | $$/ $$ |__$$ | $$ $$< $$ | $$ $$/ $$$$$$$ |$$ | __ $$$$$$$/ $$ | $$ |$$ \__/ |$$ | $$ | $$ |$$ $$/ $$ | $$/ $$/ $$$$$$/ $$/ net https://sourceforge.net/projects/rcpnet/ https://twitter.com/PotatoSkins16 Choose one ''') print('1. Ping host') print('2. Cloudflare check') print('3. tracert') print('4 Quit') sel=int(input("\nEnter choice: ")) if sel==1: ping() elif sel==2: Cloudflare() elif sel==3: traceroute() elif sel==4: q() else: print('That is not a valid choice!!!') mainMenu() mainMenu()
nilq/baby-python
python
import pymysql from sshtunnel import SSHTunnelForwarder class Database: def initialize(self, server_name): self.server = SSHTunnelForwarder( '51.75.163.1', ssh_username='karthik', ssh_password='btm56Vy.3', remote_bind_address=('127.0.0.1', 3306) ) self.server.start() self.cnx = pymysql.connect( host='localhost', port=self.server.local_bind_port, user='discordb0t', password='d1sCORDb()t!', db='discordbot' ) print("Connection Successful!") self.cur = self.cnx.cursor() self.server_name = server_name self.cur.execute("SHOW TABLES") self.tables = [table_name for (table_name,) in self.cur] if self.server_name not in self.tables: self.create_table() def create_table(self): SQL = "CREATE TABLE `{0}` LIKE `{1}`".format(self.server_name, "Default_Table") self.cur.execute(SQL) self.cnx.commit() def add_member(self, *member): if not self.check_mem('Main', member[0]): SQL = "INSERT INTO `Main`(`UID`, `Name`, `Avatar`, `Bot`, `Banned`, `Credits`, `Level`, `XP`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)" self.cur.execute(SQL, member) self.cnx.commit() SQL = "INSERT INTO `{0}`(`UID`, `Name`, `Avatar`, `Bot`, `Banned`, `Credits`, `Level`, `XP`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)".format( self.server_name) self.cur.execute(SQL, member) self.cnx.commit() elif self.check_mem('Main', member[0]): SQL = "INSERT INTO `{0}`(`UID`, `Name`, `Avatar`, `Bot`, `Banned`, `Credits`, `Level`, `XP`) " \ "SELECT `UID`, `Name`, `Avatar`, `Bot`, `Banned`, `Credits`, `Level`, `XP` " \ "FROM `Main` WHERE `Main`.`UID` = {1}".format( self.server_name, member[0]) self.cur.execute(SQL) self.cnx.commit() def remove_member(self, member_id): SQL = "DELETE FROM `{0}` WHERE `{0}`.`UID` = {1}".format(self.server_name, member_id) self.cur.execute(SQL) self.cnx.commit() def check_mem(self, server_name, member_id): SQL = "SELECT 1 FROM `{0}` WHERE `{0}`.`UID` = {1}".format(server_name, member_id) self.cur.execute(SQL) x = self.cur.fetchone() if isinstance(x, type(None)): return False return True def reset_credits(self, member_id, amount): for table in self.tables: if self.check_mem(table, member_id): SQL = "UPDATE `{0}` SET `Credits` = '{1}' WHERE `{0}`.`UID` = {2}".format(table, amount, member_id) self.cur.execute(SQL) self.cnx.commit() def reset_xp(self, member_id): for table in self.tables: if self.check_mem(table, member_id): SQL = "UPDATE `{0}` SET `XP` = '{1}' WHERE `{0}`.`UID` = {2}".format(table, 0, member_id) self.cur.execute(SQL) self.cnx.commit() def update_pfp(self, member_id, avatar_url): for table in self.tables: if self.check_mem(table, member_id): SQL = "UPDATE `{0}` SET `Avatar` = '{1}' WHERE `{0}`.`UID` = {2}".format(table, avatar_url, member_id) self.cur.execute(SQL) self.cnx.commit() def update_name(self, new_name, member_id): for table in self.tables: if self.check_mem(table, member_id): SQL = "UPDATE `{0}` SET `Name` = '{1}' WHERE `{0}`.`UID` = {2}".format(table, new_name, member_id) self.cur.execute(SQL) self.cnx.commit() def update_table(self, current_name): SQL = "ALTER TABLE `{0}` RENAME TO `{1}`".format(self.server_name, current_name) self.cur.execute(SQL) self.cnx.commit() def update_xp(self, member_id, xp_gain): SQL = "" def fetch_profile(self, member_id): SQL = "SELECT * FROM `Main` WHERE `Main`.`UID` = %s" self.cur.execute(SQL, member_id) elements = [element for element in self.cur.fetchone()] return elements def terminate(self): print("terminated") self.cur.close() self.cnx.close() self.server.close()
nilq/baby-python
python
class Station: def __init__(self, station_id, direction, stop_name, station_name, accessible, red, blue, green, brown, purple, purple_exp, yellow, pink, orange, latitude, longitude): self.station_id = station_id self.direction = direction self.stop_name = stop_name self.station_name = station_name self.accessible = accessible self.red = red self.blue = blue self.green = green self.brown = brown self.purple = purple self.purple_exp = purple_exp self.yellow = yellow self.pink = pink self.orange = orange self.latitude = latitude self.longitude = longitude
nilq/baby-python
python
meses = {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12} #entrada mes = int(input()) #processamento & saída for k, v in meses.items(): if v == mes: print(k) break
nilq/baby-python
python
from gruffy import AccumulatorBar g = AccumulatorBar() g.title = "Gruffy's Graph" g.data("add", [10, 50, 150, 20]) g.hide_legend = True g.labels = {0: '2003', 1: '2004', 2: '2005', 3: '2006'} g.transparent = 0.7 g.y_axis_increment = 50 g.maximum_value = 300 g.write('gruffy-accumulatorbar.png')
nilq/baby-python
python
from sym_lis3 import GlobalEnv import pytest def test_dyn(): g = GlobalEnv() g.eval_str('(define "foo" (lambda (x y) (if (in? dyn_env x) y 0)))') assert not g.eval_str('(in? root_env "x")') assert g.eval_str('(foo "x" 1)') == 1 assert g.eval_str('(foo "+" 1)') == 0 assert g.eval_str('(foo "y" 55)') == 55
nilq/baby-python
python
class BaseEngine: def __init__(self, world): self.world = world self._cull_method = self.default_cull_method def check_collision(self, entity, collider): raise NotImplementedError('Nope.') def resolve_collision(self, entity, collider): raise NotImplementedError('Nope.') def handle_collision(self, entity): raise NotImplementedError('Nope.') def set_cull_method(self, cull_method): self._cull_method = cull_method def cull_chunks(self, chunks): return self._cull_method(chunks) def default_cull_method(self, chunks): return [shape for chunk in chunks for shape in chunk.shapes]
nilq/baby-python
python
from ..typecheck import * from . layout import Layout from . image import Image from . css import css, div_inline_css, icon_css, none_css class element: def __init__(self, is_inline: bool, width: Optional[float], height: Optional[float], css: Optional[css]) -> None: super().__init__() self.layout = None #type: Optional[Layout] self.children = [] #type: Sequence[element] self.requires_render = True self._height = height self._width = width self.is_inline = is_inline if css: self.css = css self.className = css.class_name self.padding_height = css.padding_height self.padding_width = css.padding_width else: self.css = none_css self.className = none_css.class_name self.padding_height = 0 self.padding_width = 0 def height(self, layout: Layout) -> float: if self._height is not None: return self._height + self.padding_height height = 0.0 height_max = 0.0 for item in self.children: height += item.height(layout) if item.is_inline and height > height_max: height_max = max(height_max, height) height = 0.0 return max(height_max, height) + self.padding_height def width(self, layout: Layout) -> float: if self._width is not None: return self._width + self.padding_width width = 0.0 width_max = 0.0 for item in self.children: width += item.width(layout) if not item.is_inline and width > width_max: width_max = max(width_max, width) width = 0.0 return max(width_max, width) + self.padding_width def add_class(self, name: str) -> None: self.className += ' ' self.className += name def dirty(self): if self.layout: self.layout.dirty() self.requires_render = True def html_inner(self, layout: Layout) -> str: html = [] for child in self.children: html.append(child.html(layout)) return ''.join(html) def html(self, layout: Layout) -> str: ... def added(self, layout: Layout) -> None: ... def removed(self) -> None: ... def render(self) -> Optional[Union[Sequence['element'], 'element']]: ... class span (element): Children = Optional[Union[Sequence['span'], 'span']] def __init__(self, width: Optional[float] = None, height: Optional[float] = None, css: Optional[css] = None) -> None: super().__init__(True, width, height, css) self._items = None #type: span.Children def render(self) -> 'span.Children': return self._items def __getitem__(self, values: 'span.Children'): self._items = values return self def html(self, layout: Layout) -> str: inner = self.html_inner(layout) h = self.height(layout) w = self.width(layout) html = '<span class="{}" style="line-height:{}rem;">{}</span>'.format(self.className, h, inner) return html class div (element): Children = Optional[Union[Sequence['div'], Sequence['span'], 'div', 'span']] def __init__(self, width: Optional[float] = None, height: Optional[float] = None, css: Optional[css] = None) -> None: super().__init__(False, width, height, css) self._items = None #type: div.Children def render(self) -> 'div.Children': return self._items def __getitem__(self, values: 'div.Children'): self._items = values return self def html(self, layout: Layout) -> str: inner = self.html_inner(layout) h = self.height(layout) - self.padding_height w = self.width(layout) - self.padding_width if self.children and self.children[0].is_inline: html = '<div class= "{} {}" style="height:{}rem;width:{}rem;line-height:{}rem"><img style="height:2.5rem;">{}</div>'.format(div_inline_css.class_name, self.className, h, w, h, inner) else: html = '<div class="{}" style="height:{}rem;width:{}rem;">{}</div>'.format(self.className, h, w, inner) return html # uses an img tag to force the width of the phantom to be the width of the item being rendered class phantom_sizer (div): def __init__(self, item: Union[div, span]) -> None: super().__init__() self.item = item def render(self) -> div.Children: return self.item def html(self, layout: Layout) -> str: inner = self.html_inner(layout) h = self.height(layout) w = self.width(layout) html = '<div class="{}" style="height:{}rem;"><img style="width:{}rem;">{}</div>'.format(self.className, h, w, inner) return html html_escape_table = { "&": "&amp;", ">": "&gt;", "<": "&lt;", " ": "\u00A0" # HACK spaces inside <a> tags are not clickable. We replaces spaces with no break spaces } def html_escape(text: str) -> str: return "".join(html_escape_table.get(c, c) for c in text) class text (span): def __init__(self, text: str, width: Optional[float] = None, height: Optional[float] = None, css: Optional[css] = None) -> None: super().__init__(width, height, css) self.text = text.replace("\u0000", "\\u0000") @property def text(self) -> str: return self._text @text.setter def text(self, text: str): self._text = text.replace("\u0000", "\\u0000") self.text_html = html_escape(self._text) def width(self, layout: Layout) -> float: return len(self.text) + self.padding_width def html(self, layout: Layout) -> str: h = self.height(layout) html = '<span class="{}" style="line-height:{}rem;">{}</span>'.format(self.className, h, self.text_html) return html class click (span): def __init__(self, on_click: Callable[[], None]) -> None: super().__init__() self.on_click = on_click def html(self, layout: Layout) -> str: href = layout.register_on_click_handler(self.on_click) html = '<a href={}>{}</a>'.format(href, self.html_inner(layout)) return html class icon (span): def __init__(self, image: Image) -> None: super().__init__(width=2.5, height=2.5, css=icon_css) self.image = image def html(self, layout: Layout) -> str: return '''<span class="{}"><img style="width:2.5rem;height:2.5rem;" src="{}"></span>'''.format(self.className, self.image.data(layout)) class code(span): def __init__(self, text: str, language: str = 'c++') -> None: super().__init__() self.text = text.replace("\n", "") self.text_html = html_escape(self.text) self.language = language def added(self, layout: Layout) -> None: self.highlight = layout.syntax_highlight(self.text, self.language) def width(self, layout: Layout) -> float: return len(self.text) + self.padding_width def html(self, layout: Layout) -> str: h = self.height(layout) text_html = self.highlight.html or self.text_html html = '<span class="{}" style="line-height:{}rem;">{}</span>'.format(self.className, h, text_html) return html
nilq/baby-python
python
from django.core.mail import send_mail, EmailMessage from django.forms import modelformset_factory from django.http import HttpResponseRedirect from django.shortcuts import get_object_or_404, render from django.urls import reverse from django.views import generic from .models import Question, Choice, FilePathFieldForm class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): """Return the last five published questions.""" return Question.objects.order_by('-pub_date')[:5] class DetailView(generic.DetailView): model = Question template_name = 'polls/detail.html' class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' def vote(request, question_id): question = get_object_or_404(Question, pk=question_id) try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # Redisplay the question voting form. return render(request, 'polls/detail.html', { 'question': question, 'error_message': "You didn't select a choice.", }) else: selected_choice.votes += 1 selected_choice.save() # Always return an HttpResponseRedirect after successfully dealing # with POST data. This prevents data from being posted twice if a # user hits the Back button. return HttpResponseRedirect(reverse('polls:results', args=(question.id,))) def send(request): # subject = '主题' # 主题 # message = 'sssssss' # 内容 # sender = 'my@qq.com' # 发送邮箱,已经在settings.py设置,直接导入 # receiver = ['target@qq.com'] # 目标邮箱 # html_message = '<h1>%s</h1>' % 'testtesttest' # 发送html格式 # send_mail(subject, message, sender, receiver, html_message=html_message) email = EmailMessage( 'Hello', 'Body goes here', 'from@example.com', ['to1@example.com', 'to2@example.com'], ['bcc@example.com'], reply_to=['another@example.com'], headers={'Message-ID': 'foo'}, ) email.send() # 使用form组件实现注册方式 def manage_FilePathForm(request): form_obj = FilePathFieldForm() # 实例化一个对象 if request.method == "POST": # 实例化form对象的时候,把post提交过来的数据直接传进去 form_obj = FilePathFieldForm(request.POST) # 调用form_obj校验数据的方法 if form_obj.is_valid(): form_obj.save() return render(request, 'polls/manage_authors.html', {'form_obj': form_obj})
nilq/baby-python
python
from ...utils.IndexedRect import IndexedRect class IndexedRectBuilder(object): def __init__(self): self.last_rect = None self.initial_point = None self.reset() def set_initial_point(self, x, y): self.initial_point = (x,y) def get_initial_point(self): return self.initial_point def reset(self): self.last_rect = None self.initial_point = None def has_initial_point(self): return self.initial_point is not None def to_rect(self, i, x, y): self.last_rect = IndexedRect(i, self.initial_point[0], self.initial_point[1], x, y) self.initial_point = None return self.last_rect class BoundingBoxInputManager(object): MAX_KEPT = 20 def __init__(self): self.curr_inputs = [] self.reset() def add(self, ir: IndexedRect): self.curr_inputs.append(ir) self.curr_inputs = self.curr_inputs[-self.MAX_KEPT:] def get_n(self): return min(len(self.curr_inputs), 2) def has_n(self, n): return len(self.curr_inputs) >= n def reset(self): self.curr_inputs = [] def __getitem__(self, key): return self.curr_inputs[-2:][key] def get_2_sorted(self): return sorted(self.curr_inputs[-2:], key=lambda r: r.i) def get_last(self): if len(self.curr_inputs) == 0: return None return self.curr_inputs[-1] def remove_last(self): if self.has_n(1): last = self.curr_inputs[-1] else: last = None self.curr_inputs = self.curr_inputs[:-1] return last
nilq/baby-python
python
# -*- coding: utf-8 -*- import binarybrain as bb import binarybrain.core as core import numpy as np from typing import List class Optimizer(bb.Object): """Optimizer の基本クラス """ def __init__(self, core_optimizer=None): super(Optimizer, self).__init__(core_object=core_optimizer) def set_variables(self, params, grads): """変数設定 Args: params (Variables): 学習対象のパラメータ変数 grads (Variables): paramsに対応する勾配変数 """ self.get_core().set_variables(params.get_core(), grads.get_core()) def update(self): """パラメータ更新&勾配ゼロクリア set_variablesで設定された勾配変数に基づいた学習をset_variablesで 設定されたパラメータ変数に適用して、勾配をゼロクリアする """ return self.get_core().update() def zero_grad(self): """勾配のゼロクリア set_variablesで設定された勾配変数をゼロクリアする """ return self.get_core().zero_grad() def step(self): """パラメータ更新 set_variablesで設定された勾配変数に基づいた学習をset_variablesで 設定されたパラメータ変数に適用する """ return self.get_core().step() def set_learning_rate(self, learning_rate): """学習率設定 """ self.get_core().set_learning_rate(learning_rate) class OptimizerSgd(Optimizer): """SGD 最適化クラス Args: learning_rate (float): 学習率 """ def __init__(self, learning_rate=0.001, dtype=bb.DType.FP32): core_optimizer = bb.search_core_object('OptimizerSgd', [dtype]).create(learning_rate=learning_rate) super(OptimizerSgd, self).__init__(core_optimizer=core_optimizer) class OptimizerAdaGrad(Optimizer): """AdaGrad 最適化クラス Args: learning_rate (float): 学習率 """ def __init__(self, learning_rate=0.01, dtype=bb.DType.FP32): core_optimizer = bb.search_core_object('OptimizerAdaGrad', [dtype]).create(learning_rate=learning_rate) super(OptimizerAdaGrad, self).__init__(core_optimizer=core_optimizer) class OptimizerAdam(Optimizer): """Adam 最適化クラス Args: learning_rate (float): 学習率 beta1 (float): beta1 beta2 (float): beta2 """ def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, dtype=bb.DType.FP32): core_optimizer = bb.search_core_object('OptimizerAdam', [dtype]).create(learning_rate=learning_rate, beta1=beta1, beta2=beta2) super(OptimizerAdam, self).__init__(core_optimizer=core_optimizer)
nilq/baby-python
python
#coding:utf8 # Author : tuxpy # Email : q8886888@qq.com # Last modified : 2015-03-26 13:14:11 # Filename : gale/utils.py # Description : from __future__ import unicode_literals try: # py2 from urlparse import urlsplit from urllib import unquote_plus from urllib import quote_plus except ImportError: # py3 from urllib.parse import urlsplit # py3 from urllib.parse import unquote_plus from urllib.parse import quote_plus import email.utils import time import urllib from gale import escape from gale.config import CRLF import mimetypes import uuid import fcntl import gevent from gevent import (Greenlet, socket) from functools import wraps import sys def set_close_exec(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags|fcntl.FD_CLOEXEC) # 设置close exec标志,这样在reload时会关闭socket def get_gale_socket(raw_socket = None): _socket = raw_socket or socket.socket(socket.AF_INET, socket.SOCK_STREAM) _socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) _socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) set_close_exec(_socket.fileno()) return _socket def parse_request_range(_range): if not _range: return 0, 0 if 'bytes=' not in _range: return 0, 0 bytes_range = _range.split('bytes=')[-1].strip() start, stop = bytes_range.split('-') start = start and int(start) or 0 stop = stop and int(stop) or 0 return start, stop def urldecode(params_url): if not params_url: # 如果没有东西的话,就返回{} return {} params_url = escape.param_decode(params_url) _d = {} # 存的是请求参数的字典形式,值是参数值列表 for _name, _value in map(lambda x: x.split('=', 1), filter(lambda k_v: '=' in k_v, params_url.split('&'))): # filter 是为了把不带有=号的参数去掉 # 对用户提交的url参数和body进行解码成unicode _d.setdefault(_name, []).append(urlunquote(_value)) return _d def urlunquote(param): if param == None: return param param = unquote_plus(escape.native_str(param)) return escape.param_decode(param) def urlquote(param): return quote_plus(escape.utf8(param)) code_mess_map = { 100: 'Continue', 101: 'Switching Protocols', 200: 'OK', 201: 'Created', 202: 'Accepted', 203: 'Non-Authoritative Information', 204: 'No Content', 205: 'Reset Content', 206: 'Partial Content', 300: 'Multiple Choices', 301: 'Moved Permanently', 302: 'Found', 303: 'See Other', 304: 'Not Modified', 305: 'Use Proxy', 307: 'Temporary Redirect', 400: 'Bad Request', 401: 'Unauthorized', 402: 'Payment Required', 403: 'Forbidden', 404: 'Not Found', 405: 'Method Not Allowed', 406: 'Not Acceptable', 407: 'Proxy Authentication Required', 408: 'Request Timeout', 409: 'Conflict', 410: 'Gone', 411: 'Length Required', 412: 'Precondition Failed', 413: 'Request Entity Too Large', 414: 'Request-URI Too Long', 415: 'Unsupported Media Type', 416: 'Requested Range Not Satisfiable', 417: 'Expectation Failed', 500: 'Internal Server Error', 501: 'Not Implemented', 502: 'Bad Gateway', 503: 'Service Unavailable', 504: 'Gateway Timeout', 505: 'HTTP Version Not Supported' } def format_timestamp(ts = None): if not ts: ts = time.time() return email.utils.formatdate(ts, usegmt = True) def get_mime_type(file_path): return mimetypes.guess_type(file_path)[0] or 'application/octet-stream' def made_uuid(): return uuid.uuid4().hex from multiprocessing import Manager __mgr = Manager() def ShareDict(*args, **kwargs): return __mgr.dict(*args, **kwargs) def stop_share_dict(): __mgr.shutdown() from gale.py_ver import is_py3 unicode_type = is_py3 and str or unicode def is_string(s): if is_py3: return isinstance(s, str) else: return isinstance(s, (str, unicode)) if is_py3: exec(""" def raise_exc_info(exc_info): raise exc_info[1].with_traceback(exc_info[2]) def exec_in(code, glob, loc=None): if isinstance(code, str): code = compile(code, '<string>', 'exec', dont_inherit=True) exec(code, glob, loc) """) else: exec(""" def raise_exc_info(exc_info): raise exc_info[0], exc_info[1], exc_info[2] def exec_in(code, glob, loc=None): if isinstance(code, basestring): # exec(string) inherits the caller's future imports; compile # the string first to prevent that. code = compile(code, '<string>', 'exec', dont_inherit=True) exec code in glob, loc """) class ObjectDict(dict): def __setattr__(self, key, value): self[key] = value def __getattr__(self, key): return self[key] def single_pattern(obj): @wraps(obj) def wrap(*args, **kwargs): if hasattr(obj, '_instance'): return obj._instance _instance = obj.__new__(obj, *args, **kwargs) obj.__init__(_instance, *args, **kwargs) obj._instance = _instance return _instance return wrap
nilq/baby-python
python
import cv2 import numpy as np from matplotlib import pyplot as plt # calculer difference entre les deux flow optique adjacents def diffimage(lastframe, nextframe, size): diff_frame = nextframe - lastframe ABS = abs(diff_frame) diff_value = (ABS.sum(axis = 0)).sum(axis = 0)/size return diff_frame, diff_value if __name__ == '__main__': cap = cv2.VideoCapture("../TP2_Videos/Extrait1-Cosmos_Laundromat1(340p).m4v") ret, lastframe = cap.read() lastgray = cv2.cvtColor(lastframe, cv2.COLOR_BGR2GRAY) ret, nextframe = cap.read() nextgray = cv2.cvtColor(nextframe, cv2.COLOR_BGR2GRAY) index = 1 last_diff_value = 0 lasthist = np.zeros([100, 100]) a = np.array([0]) # pour preserver deux diffrence entre les deux flow optique adjacents while(ret): index += 1 size = nextframe.size flow = cv2.calcOpticalFlowFarneback(lastgray,nextgray,None, pyr_scale = 0.5,# Taux de réduction pyramidal levels = 3, # Nombre de niveaux de la pyramide winsize = 15, # Taille de fenêtre de lissage (moyenne) des coefficients polynomiaux iterations = 3, # Nb d'itérations par niveau poly_n = 7, # Taille voisinage pour approximation polynomiale poly_sigma = 1.5, # E-T Gaussienne pour calcul dérivées flags = 0) # flow est une flow optique qui a deux channels nexthist = cv2.calcHist([flow], [0,1], None, [100,100], [-100,100,-100,100]) nexthist[nexthist > 255] = 255 diff_frame, next_diff_value = diffimage(lasthist, nexthist, size) a = np.append(a, next_diff_value) cv2.imshow('fame', nextframe) if (next_diff_value > 0.05 and abs(a[1]-a[0]) < 0.002 ) or next_diff_value > 0.1: cv2.imwrite('Frame_%04d.png'%index,nextframe) # conserver image clef a = np.delete(a,[0]) k = cv2.waitKey(15) if k == 27: break lastgray = nextgray lasthist = nexthist ret, nextframe = cap.read() if (ret): nextgray = cv2.cvtColor(nextframe, cv2.COLOR_BGR2GRAY) cap.realease() cv2.destroyAllWindows()
nilq/baby-python
python
import pytest from mcanitexgen.animation.parser import Duration, ParserError, Time, Timeframe, Weight class Test_Timeframe_init: @pytest.mark.parametrize( "start, end, duration, expected_timeframe", [ # Deduce end and duration (0, None, None, Timeframe(0, 1, 1)), (10, None, None, Timeframe(10, 11, 1)), # Deduce duration (0, 20, None, Timeframe(0, 20, 20)), (11, 22, None, Timeframe(11, 22, 11)), # Deduce end (0, None, 5, Timeframe(0, 5, 5)), (15, None, 5, Timeframe(15, 20, 5)), # All set (0, 10, 10, Timeframe(0, 10, 10)), ], ) def test_args(self, start, end, duration, expected_timeframe): assert Timeframe(start, end, duration) == expected_timeframe @pytest.mark.parametrize( "start, end, duration, match", [ (None, None, None, "Timeframe must have at least one of start, end, duration set"), (None, 2, 20, "Timeframes without start can't have end and duration"), (0, 5, 20, "Start, end and duration of timeframe don't match: 0, 5, 20"), ], ) def test_illegal_args(self, start, end, duration, match): with pytest.raises(ParserError, match=match): Timeframe(start, end, duration) class Test_Time_from_args: @pytest.mark.parametrize( "start, end, duration, weight, expected_time", [ (None, None, None, None, None), # Weight (None, None, None, 12, Weight(12)), # Duration (None, None, 10, None, Duration(10)), # Timeframe (0, None, None, None, Timeframe(0, 1, 1)), (1, 20, None, None, Timeframe(1, 20, 19)), (1, 20, 19, None, Timeframe(1, 20, 19)), (1, None, 19, None, Timeframe(1, 20, 19)), (None, 10, None, None, Timeframe(None, 10, None)), ], ) def test_args(self, start, end, duration, weight, expected_time): assert Time.from_args(start, end, duration, weight) == expected_time @pytest.mark.parametrize( "start, end, duration, weight, match", [ # Weight (None, None, None, 0, "Weight of time must be at least 1"), (None, None, 1, 1, "Weighted time can't have start, end or duration"), (None, 1, None, 1, "Weighted time can't have start, end or duration"), (1, None, None, 1, "Weighted time can't have start, end or duration"), # Duration (None, None, 0, None, "Duration must be at least 1"), (None, None, -10, None, "Duration must be at least 1"), ], ) def test_illegal_args(self, start, end, duration, weight, match): with pytest.raises(ParserError, match=match): Time.from_args(start, end, duration, weight)
nilq/baby-python
python
#!/usr/bin/env python3 from yaml import load class ComposePlantuml: def __init__(self): pass def parse(self, data): return load(data) def link_graph(self, compose, notes=False): result = 'skinparam componentStyle uml2\n' for component in sorted(self.components(compose)): result += '[{0}]\n'.format(component) for source, destination in sorted(self.links(compose)): result += '[{0}] --> [{1}]\n'.format(source, destination) for source, destination in sorted(self.dependencies(compose)): result += '[{0}] ..> [{1}] : depends on\n'.format(source, destination) if notes: for component_name in sorted(self.components(compose)): component = self.component(compose, component_name) if 'labels' in component: labels = [ '{0}={1}'.format(key, value) for key, value in component['labels'].items() ] result += 'note top of [{0}]\n {1}\nend note\n'.format(component_name, '\n '.join(labels)) return result.strip() def boundaries(self, compose, group=False, notes=False): result = 'skinparam componentStyle uml2\n' result += 'cloud system {\n' for component in sorted(self.components(compose)): if self.has_service_external_ports(compose, component) or self.has_service_volumes(compose, component): result += ' [{0}]\n'.format(component) result += '}\n' volume_registry = {} volume_uml = '' for volume in sorted(self.volumes(compose)): if not self.is_volume_used(compose, volume): continue volume_uml += 'database {0}'.format(volume) + ' {\n' for path in sorted(self.volume_usage(compose, volume)): id = self.volume_identifier(volume, path) if id in volume_registry: continue volume_registry[id] = 'volume_{0}'.format(len(volume_registry.keys()) + 1) volume_uml += ' [{0}] as {1}\n'.format(path, volume_registry[id]) volume_uml += '}\n' result += self.group('volumes', volume_uml) if group else volume_uml port_uml = '' port_links = '' for service, host, container in sorted(self.ports(compose)): port = host if container is None else '{0} : {1}'.format(host, container) port_links += '[{0}] --> {1}\n'.format(service, port) port_uml += 'interface {0}\n'.format(host) result += self.group('ports', port_uml) if group else '' result += port_links for volume in sorted(self.volumes(compose)): for service, volume_path in sorted(self.service_using_path(compose, volume)): name = volume_path if '{0}.{1}'.format(volume, volume_path) in volume_registry: name = volume_registry['{0}.{1}'.format(volume, volume_path)] result += '[{0}] --> {1}\n'.format(service, name) if notes: for component_name in sorted(self.components(compose)): if not (self.has_service_external_ports(compose, component_name) or self.has_service_volumes(compose, component_name)): continue if not self.labels(compose, component_name): continue labels = [ '{0}={1}'.format(key, value) for key, value in self.labels(compose, component_name).items() ] result += 'note top of [{0}]\n {1}\nend note\n'.format(component_name, '\n '.join(labels)) return result.strip() @staticmethod def labels(compose, service): service = ComposePlantuml.component(compose, service) if 'labels' not in service: return None if type(service['labels']) is str: key, value = service['labels'].split(':') return {key: value} return service['labels'] @staticmethod def group(name, content): if len(content) == 0: return '' return 'package {0} '.format(name) + '{\n ' + '\n '.join(content.split('\n')).strip() + '\n}\n' @staticmethod def is_volume_used(compose, volume): components = compose if 'version' not in compose else compose.get('services', {}) for _, component in components.items(): for volume_name in component.get('volumes', {}): if volume_name.startswith('{0}:'.format(volume)): return True return False @staticmethod def is_service_used(compose, service): components = compose if 'version' not in compose else compose.get('services', {}) for _, component in components.items(): for link in component.get('links', []): link = link if ':' not in link else link.split(':')[0] if link == service: return True for dependency in component.get('depends_on', []): if dependency == service: return True return False @staticmethod def has_service_external_ports(compose, service): components = compose if 'version' not in compose else compose.get('services', {}) for name, component in components.items(): if service != name: continue return 'ports' in component return False @staticmethod def has_service_volumes(compose, service): components = compose if 'version' not in compose else compose.get('services', {}) for name, component in components.items(): if service != name: continue if 'volumes' not in component: return False for volume in component['volumes']: if volume.startswith('/'): continue if ':' in volume: return True return False @staticmethod def volume_identifier(volume, path): return '{0}.{1}'.format(volume, path) @staticmethod def components(compose): if 'version' not in compose: return [component for component in compose] return [component for component in compose.get('services', {})] @staticmethod def component(compose, name): root = compose if 'version' not in compose else compose['services'] assert name in root return root[name] @staticmethod def links(compose): result = [] components = compose if 'version' not in compose else compose.get('services', {}) for component_name, component in components.items(): for link in component.get('links', []): link = link if ':' not in link else link.split(':')[0] result.append((component_name, link)) return result @staticmethod def dependencies(compose): result = [] components = compose if 'version' not in compose else compose.get('services', {}) for component_name, component in components.items(): for dependency in component.get('depends_on', []): result.append((component_name, dependency)) return result @staticmethod def ports(compose): result = [] components = compose if 'version' not in compose else compose.get('services', {}) for component_name, component in components.items(): for port in component.get('ports', []): port = str(port) host, container = (port, None) if ':' in port: host, container = port.split(':') result.append((component_name, host, container)) return result @staticmethod def volumes(compose): if 'version' not in compose: return [] # TODO: support for version 1 volumes = compose.get('volumes', {}) return list(volumes.keys()) @staticmethod def volume_usage(compose, volume): result = [] components = compose if 'version' not in compose else compose.get('services', {}) for component_name, component in components.items(): for volume_name in component.get('volumes', {}): if not volume_name.startswith('{0}:'.format(volume)): continue result.append(volume_name.split(':')[1]) return result @staticmethod def service_using_path(compose, volume): result = [] components = compose if 'version' not in compose else compose.get('services', {}) for component_name, component in components.items(): for volume_name in component.get('volumes', {}): if not volume_name.startswith('{0}:'.format(volume)): continue result.append((component_name, volume_name.split(':')[1])) return result
nilq/baby-python
python
# Generated by Django 3.2.7 on 2021-09-09 18:17 import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('auctions', '0008_add_expiry_alter_category_on_listing'), ] operations = [ migrations.AlterField( model_name='listing', name='expiry_date', field=models.DateTimeField(default=datetime.datetime(2021, 10, 7, 18, 17, 0, 930064, tzinfo=utc), verbose_name='expiry date'), ), ]
nilq/baby-python
python
import pytest from astropy.io import fits import numpy as np from numpy.testing import assert_array_equal from lightkurve import search_lightcurve from lightkurve.io.qlp import read_qlp_lightcurve from lightkurve.io.detect import detect_filetype @pytest.mark.remote_data def test_qlp(): """Can we read in QLP light curves?""" url = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/qlp/s0011/0000/0002/7755/4109/hlsp_qlp_tess_ffi_s0011-0000000277554109_tess_v01_llc.fits" with fits.open(url, mode="readonly") as hdulist: # Can we auto-detect a QLP file? assert detect_filetype(hdulist) == "QLP" # Are the correct fluxes read in? lc = read_qlp_lightcurve(url, quality_bitmask=0) assert lc.meta["FLUX_ORIGIN"] == "sap_flux" assert_array_equal(lc.flux.value, hdulist[1].data["SAP_FLUX"]) @pytest.mark.remote_data def test_search_qlp(): """Can we search and download QLP light curves from MAST?""" search = search_lightcurve("TIC 277554109", author="QLP", sector=11) assert len(search) == 1 assert search.table["author"][0] == "QLP" lc = search.download() assert type(lc).__name__ == "TessLightCurve" assert lc.sector == 11 assert lc.author == "QLP"
nilq/baby-python
python
__version__ = 0.1 import os import logging import configparser import daiquiri import daiquiri.formatter _ROOT = os.path.dirname(os.path.abspath(__file__)) _CONFIG = os.path.join(_ROOT, 'config.ini') FORMAT = ( "%(asctime)s :: %(color)s%(levelname)s :: %(name)s :: %(funcName)s :" "%(message)s%(color_stop)s" ) daiquiri.setup(level=logging.INFO, outputs=( daiquiri.output.Stream(formatter=daiquiri.formatter.ColorFormatter( fmt=FORMAT)), )) logger = daiquiri.getLogger("root") if not os.path.isfile(_CONFIG): logger.error("Configuration file '%s' not found", _CONFIG) config = None else: config = configparser.ConfigParser(allow_no_value=True) with open(_CONFIG) as fobj: config.read_file(fobj)
nilq/baby-python
python
import numpy as np from ._CFunctions import _Cgcpm import DateTimeTools as TT def GCPM(x,y,z,Date,ut,Kp=1.0,Verbose=False): ''' Calculates the Global Core Plasma Model at some given position(s) and time(s). Inputs ====== x : float scalar or array of x_SM (Solar Magnetic coordinates) component of the position, where units are in R_E. y : float scalar or array of y_SM z : float scalar or array of z_SM Date : int Date(s) in format yyyymmdd ut : float Time(s) in hours from beginning of day where ut = hh + mm/60.0 + ss/3600.0 Kp : float Kp index (or indices) Verbose : bool If True, model calculation progress will be displayed Returns ======= ne : float32 Array of electron densities in 1/cm^3 nH : float32 Array of proton densities in 1/cm^3 nHe : float32 Array of helium ion densities in 1/cm^3 nO : float 32 Array of Oxygen densities in 1/cm^3 ''' #reformat the positions _x = np.array([x]).flatten().astype('float32') _y = np.array([y]).flatten().astype('float32') _z = np.array([z]).flatten().astype('float32') _n = np.int32(_x.size) #sort out the dates dates = np.zeros(_n,dtype='int32') + Date _years = np.int32(dates//10000) _dayno = np.int32(TT.DayNo(dates)) #times _ut = np.zeros(_n,dtype='float32') + ut #Kp indices _kp = np.zeros(_n,dtype='float32') + Kp #Verbose flag _verb = np.int32(Verbose) #output arrays ne = np.zeros(_n,dtype='float32') nH = np.zeros(_n,dtype='float32') nHe = np.zeros(_n,dtype='float32') nO = np.zeros(_n,dtype='float32') #call the C wrapper _Cgcpm(_x,_y,_z,_years,_dayno,_ut,_kp,_n,ne,nH,nHe,nO,_verb) return ne,nH,nHe,nO
nilq/baby-python
python
# Generated by Django 3.1.5 on 2021-01-23 02:13 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Cliente', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time on which the object was last modified.', verbose_name='modified at')), ('nombre', models.CharField(max_length=50, verbose_name='nombre cliente')), ('apellido', models.CharField(max_length=50, verbose_name='apellido cliente')), ('dpi', models.CharField(max_length=13, unique=True)), ('telefono', models.CharField(max_length=12, verbose_name='telefono cliente')), ('direccion', models.CharField(max_length=100, verbose_name='direccion cliente')), ], options={ 'abstract': False, }, ), ]
nilq/baby-python
python
# Natural Language Processing # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('googleplaystoreuserreviews.csv') dataset.dropna(inplace=True) X = dataset.iloc[:,0].values # Cleaning the texts import re import nltk nltk.download('stopwords') from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer corpus = [] for i in range(0, 37427): review = re.sub('[^a-zA-Z]', ' ', str(X[i])) review = review.lower() review = review.split() ps = PorterStemmer() review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))] review = ' '.join(review) corpus.append(review) # Creating the Bag of Words model from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer() x = cv.fit_transform(corpus).toarray() y = dataset.iloc[:, 1].values from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder_X = LabelEncoder() y = labelencoder_X.fit_transform(y) # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 0) from sklearn.metrics import r2_score # Fitting Logistic regression to the Training set from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) r2_score(y_test, y_pred) # Fitting Naive Bayes to the Training set from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(X_train, y_train) y_pred1 = classifier.predict(X_test) r2_score(y_test, y_pred1) # Making the Confusion Matrix from sklearn.metrics import confusion_matrix,accuracy_score cm = confusion_matrix(y_test, y_pred) accuracy_score(y_test, y_pred) from xgboost import XGBClassifier classifier = XGBClassifier() classifier.fit(X_train, y_train) # Predicting the Test set results y_pred = classifier.predict(X_test) # Making the Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) # Applying k-Fold Cross Validation from sklearn.model_selection import cross_val_score accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10) accuracies.mean() accuracies.std()
nilq/baby-python
python
from linebot.models import TextSendMessage, FlexSendMessage from app.config import CELEBRATING_TARGET from app.crud.leaderboard import update_amount, get_list_of_amount from . import line_bot_api, exception_handler @exception_handler def celebrating_birthday(line_event): group_id = line_event.source.group_id user_id = line_event.source.user_id update_amount(group_id, user_id) line_bot_api.reply_message(line_event.reply_token, TextSendMessage("🎉")) @exception_handler def send_leaderboard(line_event): group_id = line_event.source.group_id line_bot_api.push_message( group_id, [TextSendMessage("집계중입니다...")], notification_disabled=True ) response = get_list_of_amount(group_id) contents = { "type": "bubble", "styles": {"header": {"backgroundColor": "#E3D3A3"}}, "header": { "type": "box", "layout": "vertical", "contents": [ { "type": "text", "text": "생일 축하 리더보드", "size": "xl", "align": "center", "weight": "bold", } ], }, "body": {"type": "box", "layout": "vertical", "spacing": "md", "contents": []}, "footer": { "type": "box", "layout": "vertical", "contents": [ { "type": "button", "action": { "type": "message", "label": "생일 축하하기", "text": f"{CELEBRATING_TARGET}아 생일 축하해!", }, "style": "primary", } ], }, } count = 1 rank = 1 last_amount = 0 for item in response["Items"]: if int(item["amount"]) != last_amount: rank = count last_amount = int(item["amount"]) user_id = item["user_id"] user_profile = line_bot_api.get_group_member_profile(group_id, user_id) user_name = user_profile.display_name leaderboard_item = { "type": "box", "layout": "horizontal", "contents": [ {"type": "text", "text": f"{rank}위", "flex": 3, "weight": "bold"}, {"type": "text", "text": user_name, "flex": 6, "weight": "bold"}, { "type": "text", "text": str(item["amount"]), "flex": 2, "align": "end", "gravity": "center", }, ], } if rank is 1: leaderboard_item["contents"][0]["size"] = "xxl" leaderboard_item["contents"][0]["color"] = "#A4B60F" leaderboard_item["contents"][1]["size"] = "xxl" elif rank is 2: leaderboard_item["contents"][0]["size"] = "xl" leaderboard_item["contents"][0]["color"] = "#878787" leaderboard_item["contents"][1]["size"] = "xl" elif rank is 3: leaderboard_item["contents"][0]["size"] = "lg" leaderboard_item["contents"][0]["color"] = "#8A6200" leaderboard_item["contents"][1]["size"] = "lg" else: pass contents["body"]["contents"].append(leaderboard_item) count += 1 line_bot_api.reply_message( line_event.reply_token, FlexSendMessage(alt_text="Leaderboard", contents=contents), )
nilq/baby-python
python
#!/usr/bin/env python3 import unittest import timeout_decorator from challenges.codility.lessons.q019.stone_wall_v001 import * MAX_N = 100000 MIN_ELEMENT = 1 MAX_ELEMENT = 1000000000 class StoneWallTestCase(unittest.TestCase): def test_description_examples(self): self.assertEqual(7, solution([8, 8, 5, 7, 9, 8, 7, 4, 8])) # Correctness def test_simple_1(self): self.assertEqual(1, solution([888])) def test_simple_2(self): self.assertEqual(1, solution([888, 888])) def test_simple_3(self): self.assertEqual(3, solution([888, 1, 888])) def test_simple_4(self): self.assertEqual(3, solution([5, 5, 4, 5])) self.assertEqual(3, solution([5, 5, 4, 4, 5])) def test_boundary_cases(self): n = 1000 self.assertEqual(n - MIN_ELEMENT + 1, solution((range(MIN_ELEMENT, n + 1)))) # Performance @timeout_decorator.timeout(0.015) def test_medium1(self): self.assertEqual(8, solution([4, 5, 6, 7, 7, 7, 8, 1, 3, 2])) @timeout_decorator.timeout(0.015) def test_medium2(self): self.assertEqual(3, solution([1, 2, 2, 1, 1, 1, 1, 1, 1, 2])) @timeout_decorator.timeout(0.015) def test_medium3(self): self.assertEqual(6, solution([17, 1, 17, 2, 2, 5, 5, 2, 5, 5])) @timeout_decorator.timeout(0.015) def test_medium4(self): self.assertEqual(15, solution([17, 5, 19, 69, 5, 10, 19, 92, 24, 11, 19, 95, 16, 8, 19, 68])) @timeout_decorator.timeout(0.350) def test_large_pyramid(self): start = 1 end = 17000 array = list(range(start, end + 1)) + list(range(end, start - 1, -1)) self.assertEqual(end - start + 1, solution(array)) @timeout_decorator.timeout(0.650) def test_large_increasing_decreasing(self): start = 2 end = 20000 array = list(range(start, end + 1, 2)) + list(range(end, start - 1, -2)) self.assertEqual((end - start) // 2 + 1, solution(array)) start = 3 end = 21000 array = list(range(start, end + 1, 3)) + list(range(end, start - 1, -3)) self.assertEqual((end - start) // 3 + 1, solution(array)) @timeout_decorator.timeout(0.350) def test_large_up_to_20(self): self.__test_sequence(200) @timeout_decorator.timeout(0.350) def test_large_up_to_100(self): self.__test_sequence(1000) @timeout_decorator.timeout(0.350) def test_large_max(self): self.__test_sequence(10000) def __test_sequence(self, n, start=MIN_ELEMENT): self.assertEqual(n, solution(range(start, start + n))) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
# Create CSS using GitHub's colour scheme from a JSON source like (https://github.com/doda/github-language-colors) import json with open('github_colors.json') as colors: with open('github_colors.css', 'w') as css: m = json.loads(colors.read()) for lang in m: color = m[lang] lang_safe = lang.replace('+', 'plus').replace('#','sharp').replace(' ','') css.write('.project-{0} {{ border-bottom: 5px solid {1}; }}\n'.format(lang_safe, m[lang]))
nilq/baby-python
python
TRAINING_DATA = [ ( "i went to amsterdem last year and the canals were beautiful", {"entities": [(10, 19, "TOURIST_DESTINATION")]}, ), ( "You should visit Paris once in your life, but the Eiffel Tower is kinda boring", {"entities": [(17, 22, "TOURIST_DESTINATION")]}, ), ("There's also a Paris in Arkansas, lol", {"entities": []}), ( "Berlin is perfect for summer holiday: lots of parks, great nightlife, cheap beer!", {"entities": [(0, 6, "TOURIST_DESTINATION")]}, ), ]
nilq/baby-python
python
""" @leofansq Basic function: show_img(name, img): Show the image find_files(directory, pattern): Method to find target files in one directory, including subdirectory Load function: load_calib_cam2cam(filename, debug=False): Only load R_rect & P_rect for need load_calib_lidar2cam(filename, debug=False): Load calib parameters for LiDAR2Cam load_calib(filename, debug=False): Load the calib parameters which has R_rect & P_rect & Tr in the same file load_img(filename, debug=False): Load the image load_lidar(filename, debug=False): Load the PointCloud Process function: cal_proj_matrix_raw(filename_c2c, filename_l2c, camera_id, debug=False): Compute the projection matrix from LiDAR to Img cal_proj_matrix(filename, camera_id, debug=False): Compute the projection matrix from LiDAR to Image project_lidar2img(img, pc, p_matrix, debug=False): Project the LiDAR PointCloud to Image generate_colorpc(img, pc, pcimg, debug=False): Generate the PointCloud with color save_pcd(filename, pc_color): Save the PointCloud with color in the term of .pcd """ import cv2 import numpy as np from pyntcloud import PyntCloud import os import fnmatch from tqdm import tqdm from pprint import pprint #**********************************************************# # Basic Function # #**********************************************************# def show_img(name, img): """ Show the image Parameters: name: name of window img: image """ cv2.namedWindow(name, 0) cv2.imshow(name, img) cv2.waitKey(50) def find_files(directory, pattern): """ Method to find target files in one directory, including subdirectory :param directory: path :param pattern: filter pattern :return: target file path list """ file_list = [] for root, _, files in os.walk(directory): for basename in files: if fnmatch.fnmatch(basename, pattern): filename = os.path.join(root, basename) file_list.append(filename) return file_list #**********************************************************# # Load Function # #**********************************************************# def load_calib_cam2cam(filename, debug=False): """ Only load R_rect & P_rect for neeed Parameters: filename of the calib file Return: R_rect: a list of r_rect(shape:3*3) P_rect: a list of p_rect(shape:3*4) """ with open(filename) as f_calib: lines = f_calib.readlines() R_rect = [] P_rect = [] for line in lines: title = line.strip().split(' ')[0] if title[:-4] == "R_rect": r_r = np.array(line.strip().split(' ')[1:], dtype=np.float32) r_r = np.reshape(r_r, (3,3)) R_rect.append(r_r) elif title[:-4] == "P_rect": p_r = np.array(line.strip().split(' ')[1:], dtype=np.float32) p_r = np.reshape(p_r, (3,4)) P_rect.append(p_r) if debug: print ("R_rect:") pprint (R_rect) print () print ("P_rect:") pprint (P_rect) return R_rect, P_rect def load_calib_lidar2cam(filename, debug=False): """ Load calib Parameters: filename of the calib file Return: tr: shape(4*4) [ r t 0 0 0 1] """ with open(filename) as f_calib: lines = f_calib.readlines() for line in lines: title = line.strip().split(' ')[0] if title[:-1] == "R": r = np.array(line.strip().split(' ')[1:], dtype=np.float32) r = np.reshape(r, (3,3)) if title[:-1] == "T": t = np.array(line.strip().split(' ')[1:], dtype=np.float32) t = np.reshape(t, (3,1)) tr = np.hstack([r,t]) tr = np.vstack([tr,np.array([0,0,0,1])]) if debug: print () print ("Tr:") print (tr) return tr def load_calib(filename, debug=False): """ Load the calib parameters which has R_rect & P_rect & Tr in the same file Parameters: filename: the filename of the calib file Return: R_rect, P_rect, Tr """ with open(filename) as f_calib: lines = f_calib.readlines() P_rect = [] for line in lines: title = line.strip().split(' ')[0] if len(title): if title[0] == "R": R_rect = np.array(line.strip().split(' ')[1:], dtype=np.float32) R_rect = np.reshape(R_rect, (3,3)) elif title[0] == "P": p_r = np.array(line.strip().split(' ')[1:], dtype=np.float32) p_r = np.reshape(p_r, (3,4)) P_rect.append(p_r) elif title[:-1] == "Tr_velo_to_cam": Tr = np.array(line.strip().split(' ')[1:], dtype=np.float32) Tr = np.reshape(Tr, (3,4)) Tr = np.vstack([Tr,np.array([0,0,0,1])]) return R_rect, P_rect, Tr def load_img(filename, debug=False): """ Load the image Parameter: filename: the filename of the image Return: img: image """ img = cv2.imread(filename) if debug: show_img("Image", img) return img def load_lidar(filename, debug=False): """ Load the PointCloud Parameter: filename: the filename of the PointCloud Return: points: PointCloud associated with the image """ # N*4 -> N*3 points = np.fromfile(filename, dtype=np.float32) points = np.reshape(points, (-1,4)) points = points[:, :3] points.tofile("./temp_pc.bin") # Remove all points behind image plane (approximation) cloud = PyntCloud.from_file("./temp_pc.bin") cloud.points = cloud.points[cloud.points["x"]>=0] points = np.array(cloud.points) if debug: print (points.shape) return points #**********************************************************# # Process Function # #**********************************************************# def cal_proj_matrix_raw(filename_c2c, filename_l2c, camera_id, debug=False): """ Compute the projection matrix from LiDAR to Img Parameters: filename_c2c: filename of the calib file for cam2cam filename_l2c: filename of the calib file for lidar2cam camera_id: the NO. of camera Return: P_lidar2img: the projection matrix from LiDAR to Img """ # Load Calib Parameters R_rect, P_rect = load_calib_cam2cam(filename_c2c, debug) tr = load_calib_lidar2cam(filename_l2c, debug) # Calculation R_cam2rect = np.hstack([R_rect[0], np.array([[0],[0],[0]])]) R_cam2rect = np.vstack([R_cam2rect, np.array([0,0,0,1])]) P_lidar2img = np.matmul(P_rect[camera_id], R_cam2rect) P_lidar2img = np.matmul(P_lidar2img, tr) if debug: print () print ("P_lidar2img:") print (P_lidar2img) return P_lidar2img def cal_proj_matrix(filename, camera_id, debug=False): """ Compute the projection matrix from LiDAR to Img Parameters: filename: filename of the calib file camera_id: the NO. of camera Return: P_lidar2img: the projection matrix from LiDAR to Img """ # Load Calib Parameters R_rect, P_rect, tr = load_calib(filename, debug) # Calculation R_cam2rect = np.hstack([R_rect, np.array([[0],[0],[0]])]) R_cam2rect = np.vstack([R_cam2rect, np.array([0,0,0,1])]) P_lidar2img = np.matmul(P_rect[camera_id], R_cam2rect) P_lidar2img = np.matmul(P_lidar2img, tr) if debug: print () print ("P_lidar2img:") print (P_lidar2img) return P_lidar2img def project_lidar2img(img, pc, p_matrix, debug=False): """ Project the LiDAR PointCloud to Image Parameters: img: Image pc: PointCloud p_matrix: projection matrix """ # Dimension of data & projection matrix dim_norm = p_matrix.shape[0] dim_proj = p_matrix.shape[1] # Do transformation in homogenuous coordinates pc_temp = pc.copy() if pc_temp.shape[1]<dim_proj: pc_temp = np.hstack([pc_temp, np.ones((pc_temp.shape[0],1))]) points = np.matmul(p_matrix, pc_temp.T) points = points.T temp = np.reshape(points[:,dim_norm-1], (-1,1)) points = points[:,:dim_norm]/(np.matmul(temp, np.ones([1,dim_norm]))) # Plot if debug: img_copy = img.copy() depth_max = np.max(pc[:,0]) for idx,i in enumerate(points): color = int((pc[idx,0]/depth_max)*255) cv2.rectangle(img_copy, (int(i[0]-1),int(i[1]-1)), (int(i[0]+1),int(i[1]+1)), (0, 0, color), -1) show_img("Test", img_copy) return points def generate_colorpc(img, pc, pcimg, debug=False): """ Generate the PointCloud with color Parameters: img: image pc: PointCloud pcimg: PointCloud project to image Return: pc_color: PointCloud with color e.g. X Y Z R G B """ x = np.reshape(pcimg[:,0], (-1,1)) y = np.reshape(pcimg[:,1], (-1,1)) xy = np.hstack([x,y]) pc_color = [] for idx, i in enumerate(xy): if (i[0]>1 and i[0]<img.shape[1]) and (i[1]>1 and i[1]<img.shape[0]): bgr = img[int(i[1]), int(i[0])] p_color = [pc[idx][0], pc[idx][1], pc[idx][2], bgr[2], bgr[1], bgr[0]] pc_color.append(p_color) pc_color = np.array(pc_color) return pc_color def save_pcd(filename, pc_color): """ Save the PointCloud with color in the term of .pcd Parameter: filename: filename of the pcd file pc_color: PointCloud with color """ f = open(filename, "w") f.write("# .PCD v0.7 - Point Cloud Data file format\n") f.write("VERSION 0.7\n") f.write("FIELDS x y z rgb\n") f.write("SIZE 4 4 4 4\n") f.write("TYPE F F F U\n") f.write("COUNT 1 1 1 1\n") f.write("WIDTH {}\n".format(pc_color.shape[0])) f.write("WIDTH {}\n".format(pc_color.shape[0])) f.write("HEIGHT 1\n") f.write("POINTS {}\n".format(pc_color.shape[0])) f.write("DATA ascii\n") for i in pc_color: rgb = (int(i[3])<<16) | (int(i[4])<<8) | (int(i[5]) | 1<<24) f.write("{:.6f} {:.6f} {:.6f} {}\n".format(i[0],i[1],i[2],rgb)) # f.write("{:.6f} {:.6f} {:.6f} {}\n".format(i[0],i[1],i[2],i[3],i[4],i[5])) f.close() if __name__ == '__main__': # Option calib_cam2cam = "./calib/calib_cam_to_cam.txt" calib_lidar2camera = "./calib/calib_velo_to_cam.txt" camera_id = 1 filepath_img = "./img/000003.png" # filepath_img = "./new.png" filepath_lidar = "./lidar/000003.bin" filename_save = "./test.pcd" debug = True # Process p_matrix = cal_proj_matrix_raw(calib_cam2cam, calib_lidar2camera, camera_id, debug) img = load_img(filepath_img, debug) img = img[0:150,0:500] pc = load_lidar(filepath_lidar, debug) pcimg = project_lidar2img(img, pc, p_matrix, debug) pc_color = generate_colorpc(img, pc, pcimg) save_pcd(filename_save, pc_color) if debug: key = cv2.waitKey(0) & 0xFF cv2.destroyAllWindows()
nilq/baby-python
python
from dotenv.main import find_dotenv import tweepy import time import random from dotenv import load_dotenv import os import requests load_dotenv(find_dotenv()) API_KEY = os.getenv('API_KEY') API_SECRET_KEY = os.getenv('API_SECRET_KEY') ACCESS_TOKEN = os.getenv('ACCESS_TOKEN') ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET') auth = tweepy.OAuthHandler(API_KEY, API_SECRET_KEY) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = tweepy.API(auth, wait_on_rate_limit=True) try: api.verify_credentials() print("Authentication successful!\n") except: print("Unable to authenticate...") for i in range(0, 1000): try: response = requests.get( "https://api.spaceflightnewsapi.net/v3/articles") res = response.json() rand_no = random.randint(0, 9) tweet = res[rand_no]["summary"]+" "+res[rand_no]["url"] if(len(tweet) > 280): tweet = res[rand_no]["title"]+". "+res[rand_no]["url"] print("\nSummary longer than 280 so tweeted title") api.update_status(tweet) print(tweet+" Tweeted\n") i = i+1 time.sleep(86400) except tweepy.TweepyException as e: print(e) except StopIteration: break
nilq/baby-python
python
# Copyright 2017 Hosang Yoon # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Program for training Use as (for example): DEV="device=cuda0" # single GPU DEV="contexts=dev0->cuda0;dev1->cuda1" # multi GPU (currently incomplete) FLAGS="floatX=float32,"$DEV",gpuarray.preallocate=1,base_compiledir=theano" THEANO_FLAGS=$FLAGS python -u train.py --data_dir=$DATA_DIR \ --save_to=$WORKSPACE_DIR/workspace_$NAME \ [--load_from=$WORKSPACE_DIR/workspace_$LOADNAME] [--seed=some_number] \ | tee -a $WORKSPACE_DIR/$NAME".log" - Device "cuda$" means $-th GPU - Flag contexts can map any number of GPUs to be used for data parallelism (this feature is incomplete until Theano completes implementation of support for this flag) - Flag gpuarray.preallocate reserves given ratio of GPU mem (reduce if needed) - Flag base_compiledir directs intermediate files to pwd/theano to avoid lock conflicts between multiple training instances (by default ~/.theano) - $NAME == $LOADNAME is permitted """ from __future__ import absolute_import, division, print_function from six import iterkeys, itervalues, iteritems from collections import OrderedDict import argparse from net import Net from data import build_id_idx, DataIter import time import numpy as np import theano as th from subprocess import call import sys def main(): options = OrderedDict() options['input_dim'] = 44 options['target_dim'] = 1 options['unit_type'] = 'lstm' # fc/lstm/gru options['lstm_peephole'] = True options['loss_type'] = 'l2' # l2/l1/huber # options['huber_delta'] = 0.33 # depends on target's scale options['net_width'] = 512 options['net_depth'] = 12 options['batch_size'] = 128 options['window_size'] = 128 options['step_size'] = 64 options['init_scale'] = 0.02 options['init_use_ortho'] = False options['weight_norm'] = False options['layer_norm'] = False options['residual_gate'] = True options['learn_init_states'] = True options['learn_id_embedding'] = False # options['id_embedding_dim'] = 16 options['learn_clock_params'] = False # options['clock_t_exp_lo'] = 1. # for learn_clock_params # options['clock_t_exp_hi'] = 6. # for learn_clock_params # options['clock_r_on'] = 0.2 # for learn_clock_params # options['clock_leak_rate'] = 0.001 # for learn_clock_params # options['grad_norm_clip'] = 2. # comment out to turn off options['update_type'] = 'nesterov' # sgd/momentum/nesterov options['update_mu'] = 0.9 # for momentum/nesterov options['force_type'] = 'adadelta' # vanilla/adadelta/rmsprop/adam options['force_ms_decay'] = 0.99 # for adadelta/rmsprop # options['force_adam_b1'] = 0.9 # options['force_adam_b2'] = 0.999 options['frames_per_epoch'] = 8 * 1024 * 1024 options['lr_init_val'] = 1e-5 options['lr_lower_bound'] = 1e-7 options['lr_decay_rate'] = 0.5 options['max_retry'] = 10 options['unroll_scan'] = False # faster training/slower compile if options['unroll_scan']: sys.setrecursionlimit(32 * options['window_size']) # 32 is empirical """ Parse arguments, list files, and THEANO_FLAG settings """ parser = argparse.ArgumentParser() parser.add_argument('--data_dir' , type = str, required = True) parser.add_argument('--save_to' , type = str, required = True) parser.add_argument('--load_from', type = str) parser.add_argument('--seed' , type = int) args = parser.parse_args() assert 0 == call(str('mkdir -p ' + args.save_to).split()) # store mean/whitening matrices from Reshaper (remove if inapplicable) assert 0 == call(str('cp ' + args.data_dir + '/mean.matrix ' + args.save_to).split()) assert 0 == call(str('cp ' + args.data_dir + '/whitening.matrix ' + args.save_to).split()) # store ID count, internal ID order, and number of sequences id_idx = build_id_idx(args.data_dir + '/train.list') options['id_count'] = len(id_idx) with open(args.save_to + '/ids.order', 'w') as f: f.write(';'.join(iterkeys(id_idx))) # code_0;...;code_N-1 def n_seqs(list_file): with open(list_file) as f: return sum(1 for line in f) n_seqs_train = n_seqs(args.data_dir + '/train.list') n_seqs_dev = n_seqs(args.data_dir + '/dev.list') # list of context_name's (THEANO_FLAGS=contexts=... for multi GPU mode) c_names = [m.split('->')[0] for m in th.config.contexts.split(';')] \ if th.config.contexts != "" else None # for replicating previous experiments seed = np.random.randint(np.iinfo(np.int32).max) \ if args.seed is None else args.seed np.random.seed(seed) """ Print summary for logging """ def print_hline(): print(''.join('-' for _ in range(79))) lapse_from = lambda start: ('(' + ('%.1f' % (time.time() - start)).rjust(7) + ' sec)') print_hline() # ----------------------------------------------------------- print('Data location : ' + args.data_dir) if args.load_from is not None: print('Re-train from : ' + args.load_from) print('Save model to : ' + args.save_to) print_hline() # ----------------------------------------------------------- print('Options') maxlen = max(len(k) for k in options.keys()) for k, v in iteritems(options): print(' ' + k.ljust(maxlen) + ' : ' + str(v)) print_hline() # ----------------------------------------------------------- print('Stats') print(' np.random.seed : ' + str(seed).rjust(10)) print(' # of train seqs : ' + str(n_seqs_train).rjust(10)) print(' # of dev seqs : ' + str(n_seqs_dev ).rjust(10)) print(' # of unique IDs : ' + str(options['id_count']).rjust(10)) print(' # of weights : ', end = '') net = Net(options, args.save_to, args.load_from, c_names) # takes few secs print(str(net.n_weights()).rjust(10)) """ Compile th.function's (time consuming) and prepare for training """ print_hline() # ----------------------------------------------------------- print('Compiling fwd/bwd propagators... ', end = '') # takes minutes ~ start = time.time() # hours (unroll_scan) f_fwd_bwd_propagate = net.compile_f_fwd_bwd_propagate() f_fwd_propagate = net.compile_f_fwd_propagate() print(lapse_from(start)) print('Compiling updater/initializer... ', end = '') start = time.time() f_update_v_params = net.compile_f_update_v_params() f_initialize_optimizer = net.compile_f_initialize_optimizer() print(lapse_from(start)) # NOTE: window_size must be the same as that given to Net train_data = DataIter(list_file = args.data_dir + '/train.list', window_size = options['window_size'], step_size = options['step_size'], batch_size = options['batch_size'], input_dim = options['input_dim'], target_dim = options['target_dim'], id_idx = id_idx) dev_data = DataIter(list_file = args.data_dir + '/dev.list', window_size = options['window_size'], step_size = options['step_size'], batch_size = options['batch_size'], input_dim = options['input_dim'], target_dim = options['target_dim'], id_idx = id_idx) chunk_size = options['step_size'] * options['batch_size'] trained_frames_per_epoch = \ (options['frames_per_epoch'] // chunk_size) * chunk_size def run_epoch(data_iter, lr_cur): """ lr_cur sets the running mode float training None inference """ is_training = lr_cur is not None if is_training: # apply BPTT(window_size; step_size) step_size = options['step_size'] else: # set next_prev_idx = window_size - 1 for efficiency step_size = options['window_size'] frames_per_step = step_size * options['batch_size'] data_iter.discard_unfinished() data_iter.set_step_size(step_size) loss_sum = 0. frames_seen = 0 for input_tbi, target_tbi, time_tb, id_idx_tb in data_iter: if is_training: loss = f_fwd_bwd_propagate(input_tbi, target_tbi, time_tb, id_idx_tb, step_size) else: loss = f_fwd_propagate(input_tbi, target_tbi, time_tb, id_idx_tb, step_size) loss_sum += np.asscalar(loss[0]) frames_seen += frames_per_step if is_training: f_update_v_params(lr_cur) if frames_seen >= trained_frames_per_epoch: break return np.float32(loss_sum / frames_seen) """ Scheduled learning rate annealing with patience Adapted from https://github.com/KyuyeonHwang/Fractal """ # Names for saving/loading name_pivot = '0' name_prev = '1' name_best = None # auto trained_frames = 0 trained_frames_at_pivot = 0 trained_frames_at_best = 0 discarded_frames = 0 loss_pivot = 0. loss_prev = 0. loss_best = 0. cur_retry = 0 lr = options['lr_init_val'] f_initialize_optimizer() net.save_to_workspace(name_prev) net.save_to_workspace(name_best) while True: print_hline() # ------------------------------------------------------- print('Training... ', end = '') start = time.time() loss_train = run_epoch(train_data, lr) print(lapse_from(start)) trained_frames += trained_frames_per_epoch print('Evaluating... ', end = '') start = time.time() loss_cur = run_epoch(dev_data, None) print(lapse_from(start)) print('Total trained frames : ' + str(trained_frames ).rjust(12)) print('Total discarded frames : ' + str(discarded_frames).rjust(12)) print('Train loss : %.6f' % loss_train) print('Eval loss : %.6f' % loss_cur, end = '') if np.isnan(loss_cur): loss_cur = np.float32('inf') if loss_cur < loss_best or trained_frames == trained_frames_per_epoch: print(' (best)', end = '') trained_frames_at_best = trained_frames loss_best = loss_cur net.save_to_workspace(name_best) print('') if loss_cur > loss_prev and trained_frames > trained_frames_per_epoch: print_hline() # --------------------------------------------------- cur_retry += 1 if cur_retry > options['max_retry']: cur_retry = 0 lr *= options['lr_decay_rate'] if lr < options['lr_lower_bound']: break # cur <- pivot & prev <- cur discard = trained_frames - trained_frames_at_pivot discarded_frames += discard trained_frames = trained_frames_at_pivot net.load_from_workspace(name_pivot) f_initialize_optimizer() loss_prev = loss_pivot net.save_to_workspace(name_prev) print('Discard recently trained ' + str(discard) + ' frames') print('New learning rate : ' + str(lr)) else: print('Retry count : ' + str(cur_retry) + ' / ' + str(options['max_retry'])) else: cur_retry = 0 # pivot <- prev & prev <- cur trained_frames_at_pivot = trained_frames - trained_frames_per_epoch loss_pivot, loss_prev = loss_prev, loss_cur name_pivot, name_prev = name_prev, name_pivot net.save_to_workspace(name_prev) discarded_frames += trained_frames - trained_frames_at_best trained_frames = trained_frames_at_best net.load_from_workspace(name_best) net.remove_from_workspace(name_pivot) net.remove_from_workspace(name_prev) print('') print('Best network') print('Total trained frames : ' + str(trained_frames ).rjust(12)) print('Total discarded frames : ' + str(discarded_frames).rjust(12)) print('[Train set] Loss : %.6f' % run_epoch(train_data, None)) print('[ Dev set ] Loss : %.6f' % run_epoch(dev_data , None)) print('') if __name__ == '__main__': main()
nilq/baby-python
python
from .mlp_score_head import MLPScoreHead __all__ = [ 'MLPScoreHead' ]
nilq/baby-python
python
import pytest from game import seed_grid, parse_args, print_grid, get_neighbours, live_or_die def test_parser(): with pytest.raises(BaseException): parse_args(["-x", "-y", "-c"]) args = parse_args(["-x 10", "-y 20", "-c (1,1),(2,2),(5,4)"]) assert args.x == 10 assert args.y == 20 assert args.cells == [[(1, 1), (2, 2), (5, 4)]] def test_seed(): grid = seed_grid(10, 20, [(0, 0), (9, 19)]) assert len(grid) == 10 assert len(grid[0]) == 20 assert grid[0][0] == "L" assert grid[9][19] == "L" assert grid[1][19] == " " def test_print(capsys): grid = seed_grid(4, 4, [(1, 2)]) print_grid(grid) captured = capsys.readouterr() assert captured.out == "| | | | |\n| | | | |\n| |L| | |\n| | | | |\n" def test_neighbours(): grid = seed_grid(4, 4, [(0, 0), (2, 2)]) live = get_neighbours(grid, 1, 1) assert live == 2 grid = seed_grid(4, 4, []) live = get_neighbours(grid, 3, 2) assert live == 0 grid = seed_grid(4, 4, [(0, 0), (0, 1)]) live = get_neighbours(grid, 0, 2) assert live == 1 grid = seed_grid(4, 4, [(3, 1)]) live = get_neighbours(grid, 2, 0) assert live == 1 grid = seed_grid(4, 4, [(3, 0)]) live = get_neighbours(grid, 2, 3) assert live == 1 def test_live_or_die(): assert live_or_die("L", 1) == " " assert live_or_die("L", 4) == " " assert live_or_die("L", 3) == "L" assert live_or_die(" ", 3) == "L" assert live_or_die(" ", 2) == " "
nilq/baby-python
python
# -*- coding: utf-8 -*- import scrapy import pandas as pd class FirstSpider(scrapy.Spider): name = 'first' def start_requests(self): urls = ['https://www.worldometers.info/coronavirus/#countries'] for url in urls: yield scrapy.Request(url=url, callback=self.parse) def parse(self, response): table = pd.read_html(response.text) print(table)
nilq/baby-python
python
from future import standard_library standard_library.install_aliases() import datetime import json import os import re import time from collections import namedtuple, defaultdict from urllib.parse import urlparse, urljoin from io import BytesIO import flask import sqlalchemy.sql from flask import abort from flask import current_app from flask import flash from flask import g from flask import make_response from flask import render_template from flask import request, url_for from flask import send_file from flask_wtf import Form from sqlalchemy.orm import joinedload from sqlalchemy.orm.exc import NoResultFound from typing import Optional from wtforms import SelectField, StringField, SubmitField from wtforms.validators import DataRequired, Length import lnt.server.db.rules_manager import lnt.server.db.search import lnt.server.reporting.analysis import lnt.server.reporting.dailyreport import lnt.server.reporting.latestrunsreport import lnt.server.reporting.runs import lnt.server.reporting.summaryreport import lnt.server.ui.util import lnt.util import lnt.util.ImportData import lnt.util.stats from lnt.external.stats import stats as ext_stats from lnt.server.db import testsuitedb from lnt.server.reporting.analysis import ComparisonResult, calc_geomean from lnt.server.ui import util from lnt.server.ui.decorators import frontend, db_route, v4_route from lnt.server.ui.globals import db_url_for, v4_url_for, v4_redirect from lnt.server.ui.util import FLASH_DANGER, FLASH_SUCCESS, FLASH_INFO from lnt.server.ui.util import PrecomputedCR from lnt.server.ui.util import baseline_key, convert_revision from lnt.server.ui.util import mean from lnt.testing import PASS from lnt.util import logger from lnt.util import multidict from lnt.util import stats # http://flask.pocoo.org/snippets/62/ def is_safe_url(target): ref_url = urlparse(request.host_url) test_url = urlparse(urljoin(request.host_url, target)) return test_url.scheme in ('http', 'https') and \ ref_url.netloc == test_url.netloc def get_redirect_target(): for target in request.values.get('next'), request.referrer: if not target: continue if is_safe_url(target): return target ### # Root-Only Routes @frontend.route('/favicon.ico') def favicon_ico(): return v4_redirect(url_for('.static', filename='favicon.ico')) @frontend.route('/select_db') def select_db(): path = request.args.get('path') db = request.args.get('db') if path is None: abort(400, "'path' argument is missing") if db not in current_app.old_config.databases: abort(404, "'db' argument is missing or invalid") # Rewrite the path. new_path = "/db_%s" % db if not path.startswith("/db_"): new_path += path else: if '/' in path[1:]: new_path += "/" + path.split("/", 2)[2] return v4_redirect(request.script_root + new_path) ##### # Per-Database Routes @db_route('/') def index(): return render_template("index.html") ### # Database Actions def _do_submit(): assert request.method == 'POST' input_file = request.files.get('file') input_data = request.form.get('input_data') if 'select_machine' not in request.form and \ 'update_machine' in request.form: # Compatibility with old clients update_machine = int(request.form.get('update_machine', 0)) != 0 select_machine = 'update' if update_machine else 'match' else: select_machine = request.form.get('select_machine', 'match') merge_run = request.form.get('merge', None) ignore_regressions = request.form.get('ignore_regressions', False) \ or getattr(current_app.old_config, 'ignore_regressions', False) if input_file and not input_file.content_length: input_file = None if not input_file and not input_data: return render_template( "submit_run.html", error="must provide input file or data") if input_file and input_data: return render_template( "submit_run.html", error="cannot provide input file *and* data") if input_file: data_value = input_file.read() else: data_value = input_data # The following accomodates old submitters. Note that we explicitely # removed the tag field from the new submission format, this is only here # for old submission jobs. The better way of doing it is mentioning the # correct test-suite in the URL. So when submitting to suite YYYY use # db_XXX/v4/YYYY/submitRun instead of db_XXXX/submitRun! if g.testsuite_name is None: try: data = json.loads(data_value) Run = data.get('Run') if Run is not None: Info = Run.get('Info') if Info is not None: g.testsuite_name = Info.get('tag') except Exception: pass if g.testsuite_name is None: g.testsuite_name = 'nts' # Get a DB connection. session = request.session db = request.get_db() result = lnt.util.ImportData.import_from_string( current_app.old_config, g.db_name, db, session, g.testsuite_name, data_value, select_machine=select_machine, merge_run=merge_run, ignore_regressions=ignore_regressions) # It is nice to have a full URL to the run, so fixup the request URL # here were we know more about the flask instance. if result.get('result_url'): result['result_url'] = request.url_root + result['result_url'] response = flask.jsonify(**result) error = result['error'] if error is not None: response.status_code = 400 logger.warning("%s: Submission rejected: %s" % (request.url, error)) return response def ts_data(ts): """Data about the current testsuite used by layout.html which should be present in most templates.""" baseline_id = flask.session.get(baseline_key(ts.name)) baselines = request.session.query(ts.Baseline).all() return { 'baseline_id': baseline_id, 'baselines': baselines, 'ts': ts } @db_route('/submitRun', methods=('GET', 'POST')) def submit_run(): """Compatibility url that hardcodes testsuite to 'nts'""" if request.method == 'GET': g.testsuite_name = 'nts' return v4_redirect(v4_url_for('.v4_submitRun')) # This route doesn't know the testsuite to use. We have some defaults/ # autodetection for old submissions, but really you should use the full # db_XXX/v4/YYYY/submitRun URL when using non-nts suites. g.testsuite_name = None return _do_submit() @v4_route('/submitRun', methods=('GET', 'POST')) def v4_submitRun(): if request.method == 'GET': ts = request.get_testsuite() return render_template("submit_run.html", **ts_data(ts)) return _do_submit() ### # V4 Schema Viewer @v4_route("/") def v4_overview(): ts = request.get_testsuite() return render_template("v4_overview.html", testsuite_name=g.testsuite_name, **ts_data(ts)) @v4_route("/recent_activity") def v4_recent_activity(): session = request.session ts = request.get_testsuite() # Get the most recent runs in this tag, we just arbitrarily limit to # looking at the last 100 submission. recent_runs = session.query(ts.Run) \ .options(joinedload(ts.Run.order)) \ .options(joinedload(ts.Run.machine)) \ .order_by(ts.Run.start_time.desc()).limit(100) recent_runs = recent_runs.all() # Compute the active machine list. active_machines = dict((run.machine.name, run) for run in recent_runs[::-1]) # Compute the active submission list. # # FIXME: Remove hard coded field use here. N = 30 active_submissions = [(r, r.order.llvm_project_revision) for r in recent_runs[:N]] return render_template("v4_recent_activity.html", testsuite_name=g.testsuite_name, active_machines=active_machines, active_submissions=active_submissions, **ts_data(ts)) @v4_route("/machine/") def v4_machines(): # Compute the list of associated runs, grouped by order. # Gather all the runs on this machine. session = request.session ts = request.get_testsuite() machines = session.query(ts.Machine).order_by(ts.Machine.name) return render_template("all_machines.html", machines=machines, **ts_data(ts)) @v4_route("/machine/<int:machine_id>/latest") def v4_machine_latest(machine_id): """Return the most recent run on this machine.""" session = request.session ts = request.get_testsuite() run = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_id) \ .order_by(ts.Run.start_time.desc()) \ .first() return v4_redirect(v4_url_for('.v4_run', id=run.id, **request.args)) @v4_route("/machine/<int:machine_id>/compare") def v4_machine_compare(machine_id): """Return the most recent run on this machine.""" session = request.session ts = request.get_testsuite() machine_compare_to_id = int(request.args['compare_to_id']) machine_1_run = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_id) \ .order_by(ts.Run.start_time.desc()) \ .first() machine_2_run = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_compare_to_id) \ .order_by(ts.Run.start_time.desc()) \ .first() return v4_redirect(v4_url_for('.v4_run', id=machine_1_run.id, compare_to=machine_2_run.id)) @v4_route("/machine/<int:id>") def v4_machine(id): # Compute the list of associated runs, grouped by order. # Gather all the runs on this machine. session = request.session ts = request.get_testsuite() associated_runs = multidict.multidict( (run_order, r) for r, run_order in (session.query(ts.Run, ts.Order) .join(ts.Order) .filter(ts.Run.machine_id == id) .order_by(ts.Run.start_time.desc()))) associated_runs = sorted(associated_runs.items()) try: machine = session.query(ts.Machine).filter(ts.Machine.id == id).one() except NoResultFound: abort(404, "Invalid machine id {}".format(id)) if request.args.get('json'): json_obj = dict() json_obj['name'] = machine.name json_obj['id'] = machine.id json_obj['runs'] = [] for order in associated_runs: rev = order[0].llvm_project_revision for run in order[1]: json_obj['runs'].append((run.id, rev, run.start_time.isoformat(), run.end_time.isoformat())) return flask.jsonify(**json_obj) machines = session.query(ts.Machine).order_by(ts.Machine.name).all() relatives = [m for m in machines if m.name == machine.name] return render_template("v4_machine.html", testsuite_name=g.testsuite_name, id=id, associated_runs=associated_runs, machine=machine, machines=machines, relatives=relatives, **ts_data(ts)) class V4RequestInfo(object): def __init__(self, run_id): session = request.session self.db = request.get_db() self.session = session self.ts = ts = request.get_testsuite() self.run = run = session.query(ts.Run).filter_by(id=run_id).first() if run is None: abort(404, "Invalid run id {}".format(run_id)) # Get the aggregation function to use. aggregation_fn_name = request.args.get('aggregation_fn') self.aggregation_fn = {'min': lnt.util.stats.safe_min, 'median': lnt.util.stats.median}.get( aggregation_fn_name, lnt.util.stats.safe_min) # Get the MW confidence level. try: confidence_lv = float(request.args.get('MW_confidence_lv')) except (TypeError, ValueError): confidence_lv = .05 self.confidence_lv = confidence_lv # Find the neighboring runs, by order. prev_runs = list(ts.get_previous_runs_on_machine(session, run, N=3)) next_runs = list(ts.get_next_runs_on_machine(session, run, N=3)) self.neighboring_runs = next_runs[::-1] + [self.run] + prev_runs # Select the comparison run as either the previous run, or a user # specified comparison run. compare_to_str = request.args.get('compare_to') if compare_to_str: compare_to_id = int(compare_to_str) compare_to = session.query(ts.Run) \ .filter_by(id=compare_to_id) \ .first() if compare_to is None: flash("Comparison Run is invalid: " + compare_to_str, FLASH_DANGER) else: self.comparison_neighboring_runs = ( list(ts.get_next_runs_on_machine(session, compare_to, N=3))[::-1] + [compare_to] + list(ts.get_previous_runs_on_machine(session, compare_to, N=3))) else: if prev_runs: compare_to = prev_runs[0] else: compare_to = None self.comparison_neighboring_runs = self.neighboring_runs try: self.num_comparison_runs = int( request.args.get('num_comparison_runs')) except Exception: self.num_comparison_runs = 0 # Find the baseline run, if requested. baseline_str = request.args.get('baseline') if baseline_str: baseline_id = int(baseline_str) baseline = session.query(ts.Run).filter_by(id=baseline_id).first() if baseline is None: flash("Could not find baseline " + baseline_str, FLASH_DANGER) else: baseline = None # We're going to render this on a real webpage with CSS support, so # override the default styles and provide bootstrap class names for # the tables. styles = { 'body': '', 'td': '', 'h1': 'font-size: 14pt', 'table': 'width: initial; font-size: 9pt;', 'th': 'text-align: center;' } classes = { 'table': 'table table-striped table-condensed table-hover' } self.data = lnt.server.reporting.runs.generate_run_data( session, self.run, baseurl=db_url_for('.index', _external=False), result=None, compare_to=compare_to, baseline=baseline, num_comparison_runs=self.num_comparison_runs, aggregation_fn=self.aggregation_fn, confidence_lv=confidence_lv, styles=styles, classes=classes) self.sri = self.data['sri'] note = self.data['visible_note'] if note: flash(note, FLASH_INFO) self.data.update(ts_data(ts)) @v4_route("/<int:id>/report") def v4_report(id): info = V4RequestInfo(id) return render_template('reporting/run_report.html', **info.data) @v4_route("/<int:id>/text_report") def v4_text_report(id): info = V4RequestInfo(id) text_report = render_template('reporting/run_report.txt', **info.data) response = make_response(text_report) response.mimetype = "text/plain" return response # Compatilibity route for old run pages. @db_route("/simple/<tag>/<int:id>/") def simple_run(tag, id): # Get the expected test suite. db = request.get_db() session = request.session ts = db.testsuite[tag] # Look for a matched run. matched_run = session.query(ts.Run).\ filter(ts.Run.simple_run_id == id).\ first() # If we found one, redirect to it's report. if matched_run is not None: return v4_redirect(db_url_for(".v4_run", testsuite_name=tag, id=matched_run.id)) # Otherwise, report an error. return render_template("error.html", message="""\ Unable to find a run for this ID. Please use the native v4 URL interface (instead of the /simple/... URL schema).""") @v4_route("/<int:id>") def v4_run(id): info = V4RequestInfo(id) session = info.session ts = info.ts run = info.run # Parse the view options. options = {} options['show_delta'] = bool(request.args.get('show_delta')) options['show_previous'] = bool(request.args.get('show_previous')) options['show_stddev'] = bool(request.args.get('show_stddev')) options['show_mad'] = bool(request.args.get('show_mad')) options['show_all'] = bool(request.args.get('show_all')) options['show_all_samples'] = bool(request.args.get('show_all_samples')) options['show_sample_counts'] = \ bool(request.args.get('show_sample_counts')) options['show_graphs'] = bool(request.args.get('show_graphs')) options['show_data_table'] = bool(request.args.get('show_data_table')) options['show_small_diff'] = bool(request.args.get('show_small_diff')) options['hide_report_by_default'] = bool( request.args.get('hide_report_by_default')) options['num_comparison_runs'] = info.num_comparison_runs options['test_filter'] = test_filter_str = request.args.get( 'test_filter', '') options['MW_confidence_lv'] = info.confidence_lv if test_filter_str: test_filter_re = re.compile(test_filter_str) else: test_filter_re = None options['test_min_value_filter'] = test_min_value_filter_str = \ request.args.get('test_min_value_filter', '') if test_min_value_filter_str != '': test_min_value_filter = float(test_min_value_filter_str) else: test_min_value_filter = 0.0 options['aggregation_fn'] = request.args.get('aggregation_fn', 'min') # Get the test names. test_info = session.query(ts.Test.name, ts.Test.id).\ order_by(ts.Test.name).all() # Filter the list of tests by name, if requested. if test_filter_re: test_info = [test for test in test_info if test_filter_re.search(test[0])] if request.args.get('json'): json_obj = dict() sri = lnt.server.reporting.analysis.RunInfo(session, ts, [id]) reported_tests = session.query(ts.Test.name, ts.Test.id).\ filter(ts.Run.id == id).\ filter(ts.Test.id.in_(sri.test_ids)).all() order = run.order.as_ordered_string() for test_name, test_id in reported_tests: test = dict(test_name=test_name, test_id=test_id, order=order, machine=run.machine.name) for sample_field in ts.sample_fields: res = sri.get_run_comparison_result( run, None, test_id, sample_field, ts.Sample.get_hash_of_binary_field()) test[sample_field.name] = res.current json_obj[test_name] = test return flask.jsonify(**json_obj) urls = { 'search': v4_url_for('.v4_search') } data = info.data data.update({ 'analysis': lnt.server.reporting.analysis, 'metric_fields': list(ts.Sample.get_metric_fields()), 'options': options, 'request_info': info, 'test_info': test_info, 'test_min_value_filter': test_min_value_filter, 'urls': urls, }) return render_template("v4_run.html", **data) class PromoteOrderToBaseline(Form): name = StringField('Name', validators=[DataRequired(), Length(max=32)]) description = StringField('Description', validators=[Length(max=256)]) promote = SubmitField('Promote') update = SubmitField('Update') demote = SubmitField('Demote') @v4_route("/order/<int:id>", methods=['GET', 'POST']) def v4_order(id): """Order page details order information, as well as runs that are in this order as well setting this run as a baseline.""" session = request.session ts = request.get_testsuite() form = PromoteOrderToBaseline() if form.validate_on_submit(): try: baseline = session.query(ts.Baseline) \ .filter(ts.Baseline.order_id == id) \ .one() except NoResultFound: baseline = ts.Baseline() if form.demote.data: session.delete(baseline) session.commit() flash("Baseline demoted.", FLASH_SUCCESS) else: baseline.name = form.name.data baseline.comment = form.description.data baseline.order_id = id session.add(baseline) session.commit() flash("Baseline {} updated.".format(baseline.name), FLASH_SUCCESS) return v4_redirect(v4_url_for(".v4_order", id=id)) try: baseline = session.query(ts.Baseline) \ .filter(ts.Baseline.order_id == id) \ .one() form.name.data = baseline.name form.description.data = baseline.comment except NoResultFound: pass # Get the order. order = session.query(ts.Order).filter(ts.Order.id == id).first() if order is None: abort(404, "Invalid order id {}".format(id)) previous_order = None if order.previous_order_id: previous_order = session.query(ts.Order) \ .filter(ts.Order.id == order.previous_order_id).one() next_order = None if order.next_order_id: next_order = session.query(ts.Order) \ .filter(ts.Order.id == order.next_order_id).one() runs = session.query(ts.Run) \ .filter(ts.Run.order_id == id) \ .options(joinedload(ts.Run.machine)) \ .all() num_runs = len(runs) return render_template("v4_order.html", order=order, form=form, previous_order=previous_order, next_order=next_order, runs=runs, num_runs=num_runs, **ts_data(ts)) @v4_route("/set_baseline/<int:id>") def v4_set_baseline(id): """Update the baseline stored in the user's session.""" session = request.session ts = request.get_testsuite() base = session.query(ts.Baseline).get(id) if not base: return abort(404, "Invalid baseline id {}".format(id)) flash("Baseline set to " + base.name, FLASH_SUCCESS) flask.session[baseline_key(ts.name)] = id return v4_redirect(get_redirect_target()) @v4_route("/all_orders") def v4_all_orders(): # Get the testsuite. session = request.session ts = request.get_testsuite() # Get the orders and sort them totally. orders = sorted(session.query(ts.Order).all()) return render_template("v4_all_orders.html", orders=orders, **ts_data(ts)) @v4_route("/<int:id>/graph") def v4_run_graph(id): # This is an old style endpoint that treated graphs as associated with # runs. Redirect to the new endpoint. session = request.session ts = request.get_testsuite() run = session.query(ts.Run).filter_by(id=id).first() if run is None: abort(404, "Invalid run id {}".format(id)) # Convert the old style test parameters encoding. args = {'highlight_run': id} plot_number = 0 for name, value in request.args.items(): # If this isn't a test specification, just forward it. if not name.startswith('test.'): args[name] = value continue # Otherwise, rewrite from the old style of:: # # test.<test id>=<sample field index> # # into the new style of:: # # plot.<number>=<machine id>.<test id>.<sample field index> test_id = name.split('.', 1)[1] args['plot.%d' % (plot_number,)] = '%d.%s.%s' % ( run.machine.id, test_id, value) plot_number += 1 return v4_redirect(v4_url_for(".v4_graph", **args)) BaselineLegendItem = namedtuple('BaselineLegendItem', 'name id') LegendItem = namedtuple('LegendItem', 'machine test_name field_name color url') @v4_route("/graph_for_sample/<int:sample_id>/<string:field_name>") def v4_graph_for_sample(sample_id, field_name): """Redirect to a graph of the data that a sample and field came from. When you have a sample from an API call, this can get you into the LNT graph page, for that sample. Extra args are passed through, to allow the caller to customize the graph page displayed, with for example run highlighting. :param sample_id: the sample ID from the database, obtained from the API. :param field_name: the name of the field. :return: a redirect to the graph page for that sample and field. """ session = request.session ts = request.get_testsuite() target_sample = session.query(ts.Sample).get(sample_id) if not target_sample: abort(404, "Could not find sample id {}".format(sample_id)) # Get the field index we are interested in. field_index = None for idx, f in enumerate(ts.sample_fields): if f.name == field_name: field_index = idx break if field_index is None: abort(400, "Could not find field {}".format(field_name)) kwargs = {'plot.0': '{machine_id}.{test_id}.{field_index}'.format( machine_id=target_sample.run.machine.id, test_id=target_sample.test_id, field_index=field_index)} # Pass request args through, so you can add graph options. kwargs.update(request.args) graph_url = v4_url_for('.v4_graph', **kwargs) return v4_redirect(graph_url) class PlotParameter(object): def __init__(self, machine, test, field, field_index): self.machine = machine self.test = test self.field = field self.field_index = field_index self.samples = None def __repr__(self): return "{}:{}({} samples)" \ .format(self.machine.name, self.test.name, len(self.samples) if self.samples else "No") def assert_field_idx_valid(field_idx, count): if not (0 <= field_idx < count): return abort(404, "Invalid field index {}. Total sample_fileds for " "the current suite is {}.".format(field_idx, count)) def load_plot_parameter(machine_id, test_id, field_index, session, ts): try: machine_id = int(machine_id) test_id = int(test_id) field_index = int(field_index) except ValueError: return abort(400, "Invalid plot arguments.") try: machine = session.query(ts.Machine) \ .filter(ts.Machine.id == machine_id) \ .one() except NoResultFound: return abort(404, "Invalid machine id {}".format(machine_id)) try: test = session.query(ts.Test).filter(ts.Test.id == test_id).one() except NoResultFound: return abort(404, "Invalid test id {}".format(test_id)) assert_field_idx_valid(field_index, len(ts.sample_fields)) try: field = ts.sample_fields[field_index] except NoResultFound: return abort(404, "Invalid field_index {}".format(field_index)) return PlotParameter(machine, test, field, field_index) def parse_plot_parameters(args): """ Returns a list of tuples of integers (machine_id, test_id, field_index). :param args: The request parameters dictionary. """ plot_parameters = [] for name, value in args.items(): # Plots are passed as:: # # plot.<unused>=<machine id>.<test id>.<field index> if not name.startswith('plot.'): continue # Ignore the extra part of the key, it is unused. try: machine_id, test_id, field_index = map(int, value.split('.')) except ValueError: return abort(400, "Parameter {} was malformed. {} must be int.int.int" .format(name, value)) plot_parameters.append((machine_id, test_id, field_index)) return plot_parameters def parse_and_load_plot_parameters(args, session, ts): """ Parses plot parameters and loads the corresponding entities from the database. Returns a list of PlotParameter instances sorted by machine name, test name and then field. :param args: The request parameters dictionary. :param session: The database session. :param ts: The test suite. """ plot_parameters = [load_plot_parameter(machine_id, test_id, field_index, session, ts) for (machine_id, test_id, field_index) in parse_plot_parameters(args)] # Order the plots by machine name, test name and then field. plot_parameters.sort(key=lambda plot_parameter: (plot_parameter.machine.name, plot_parameter.test.name, plot_parameter.field.name, plot_parameter.field_index)) return plot_parameters def parse_mean_parameter(args, session, ts): # Mean to graph is passed as: # # mean=<machine id>.<field index> value = args.get('mean') if not value: return None try: machine_id, field_index = map(int, value.split('.')) except ValueError: return abort(400, "Invalid format of 'mean={}', expected mean=<machine id>.<field index>".format(value)) try: machine = session.query(ts.Machine) \ .filter(ts.Machine.id == machine_id) \ .one() except NoResultFound: return abort(404, "Invalid machine id {}".format(machine_id)) assert_field_idx_valid(field_index, len(ts.sample_fields)) field = ts.sample_fields[field_index] return machine, field def load_graph_data(plot_parameter, show_failures, limit, xaxis_date, revision_cache=None): """ Load all the field values for this test on the same machine. :param plot_parameter: Stores machine, test and field to load. :param show_failures: Filter only passed values if False. :param limit: Limit points if specified. :param xaxis_date: X axis is Date, otherwise Order. """ session = request.session ts = request.get_testsuite() # Load all the field values for this test on the same machine. # # FIXME: Don't join to Order here, aggregate this across all the tests # we want to load. Actually, we should just make this a single query. values = session.query(plot_parameter.field.column, ts.Order, ts.Run.start_time, ts.Run.id) \ .join(ts.Run).join(ts.Order) \ .filter(ts.Run.machine_id == plot_parameter.machine.id) \ .filter(ts.Sample.test == plot_parameter.test) \ .filter(plot_parameter.field.column.isnot(None)) # Unless all samples requested, filter out failing tests. if not show_failures: if plot_parameter.field.status_field: values = values.filter((plot_parameter.field.status_field.column == PASS) | (plot_parameter.field.status_field.column.is_(None))) if limit: values = values.limit(limit) if xaxis_date: # Aggregate by date. data = list(multidict.multidict( (date, (val, order, date, run_id)) for val, order, date, run_id in values).items()) # Sort data points according to date. data.sort(key=lambda sample: sample[0]) else: # Aggregate by order (revision). data = list(multidict.multidict( (order.llvm_project_revision, (val, order, date, run_id)) for val, order, date, run_id in values).items()) # Sort data points according to order (revision). data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache)) return data def load_geomean_data(field, machine, limit, xaxis_date, revision_cache=None): """ Load geomean for specified field on the same machine. :param field: Field. :param machine: Machine. :param limit: Limit points if specified. :param xaxis_date: X axis is Date, otherwise Order. """ session = request.session ts = request.get_testsuite() values = session.query(sqlalchemy.sql.func.min(field.column), ts.Order, sqlalchemy.sql.func.min(ts.Run.start_time)) \ .join(ts.Run).join(ts.Order).join(ts.Test) \ .filter(ts.Run.machine_id == machine.id) \ .filter(field.column.isnot(None)) \ .group_by(ts.Order.llvm_project_revision, ts.Test) if limit: values = values.limit(limit) data = multidict.multidict( ((order, date), val) for val, order, date in values).items() # Calculate geomean of each revision. if xaxis_date: data = [(date, [(calc_geomean(vals), order, date)]) for ((order, date), vals) in data] # Sort data points according to date. data.sort(key=lambda sample: sample[0]) else: data = [(order.llvm_project_revision, [(calc_geomean(vals), order, date)]) for ((order, date), vals) in data] # Sort data points according to order (revision). data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache)) return data @v4_route("/graph") def v4_graph(): session = request.session ts = request.get_testsuite() switch_min_mean_local = False if 'switch_min_mean_session' not in flask.session: flask.session['switch_min_mean_session'] = False # Parse the view options. options = {'min_mean_checkbox': 'min()'} if 'submit' in request.args: # user pressed a button if 'switch_min_mean' in request.args: # user checked mean() checkbox flask.session['switch_min_mean_session'] = \ options['switch_min_mean'] = \ bool(request.args.get('switch_min_mean')) switch_min_mean_local = flask.session['switch_min_mean_session'] else: # mean() check box is not checked flask.session['switch_min_mean_session'] = \ options['switch_min_mean'] = \ bool(request.args.get('switch_min_mean')) switch_min_mean_local = flask.session['switch_min_mean_session'] else: # new page was loaded by clicking link, not submit button options['switch_min_mean'] = switch_min_mean_local = \ flask.session['switch_min_mean_session'] options['hide_lineplot'] = bool(request.args.get('hide_lineplot')) show_lineplot = not options['hide_lineplot'] options['show_mad'] = show_mad = bool(request.args.get('show_mad')) options['show_stddev'] = show_stddev = \ bool(request.args.get('show_stddev')) options['hide_all_points'] = hide_all_points = bool( request.args.get('hide_all_points')) options['xaxis_date'] = xaxis_date = bool( request.args.get('xaxis_date')) options['limit'] = limit = int( request.args.get('limit', 0)) options['show_cumulative_minimum'] = show_cumulative_minimum = bool( request.args.get('show_cumulative_minimum')) options['show_linear_regression'] = show_linear_regression = bool( request.args.get('show_linear_regression')) options['show_failures'] = show_failures = bool( request.args.get('show_failures')) options['normalize_by_median'] = normalize_by_median = bool( request.args.get('normalize_by_median')) options['show_moving_average'] = moving_average = bool( request.args.get('show_moving_average')) options['show_moving_median'] = moving_median = bool( request.args.get('show_moving_median')) options['moving_window_size'] = moving_window_size = int( request.args.get('moving_window_size', 10)) options['hide_highlight'] = bool( request.args.get('hide_highlight')) options['logarithmic_scale'] = bool( request.args.get('logarithmic_scale')) show_highlight = not options['hide_highlight'] # Load the graph parameters. plot_parameters = parse_and_load_plot_parameters(request.args, session, ts) # Extract requested mean trend. mean_parameter = parse_mean_parameter(request.args, session, ts) # Sanity check the arguments. if not plot_parameters and not mean_parameter: return render_template("error.html", message="Nothing to graph.") # Extract requested baselines, and their titles. baseline_parameters = [] for name, value in request.args.items(): # Baselines to graph are passed as: # # baseline.title=<run id> if not name.startswith('baseline.'): continue baseline_title = name[len('baseline.'):] run_id_str = value try: run_id = int(run_id_str) except Exception: return abort(400, "Invalid baseline run id {}".format(run_id_str)) try: run = session.query(ts.Run) \ .options(joinedload(ts.Run.machine)) \ .filter(ts.Run.id == run_id) \ .one() except Exception: err_msg = ("The run {} was not found in the database." .format(run_id)) return render_template("error.html", message=err_msg) baseline_parameters.append((run, baseline_title)) # Create region of interest for run data region if we are performing a # comparison. revision_range = None highlight_run_id = request.args.get('highlight_run') if show_highlight and highlight_run_id and highlight_run_id.isdigit(): highlight_run = session.query(ts.Run).filter_by( id=int(highlight_run_id)).first() if highlight_run is None: abort(404, "Invalid highlight_run id {}".format(highlight_run_id)) # Find the neighboring runs, by order. prev_runs = list(ts.get_previous_runs_on_machine(session, highlight_run, N=1)) if prev_runs: start_rev = prev_runs[0].order.llvm_project_revision end_rev = highlight_run.order.llvm_project_revision revision_range = { "start": start_rev, "end": end_rev, } # Build the graph data. legend = [] graph_plots = [] graph_datum = [] baseline_plots = [] revision_cache = {} num_plots = len(plot_parameters) metrics = list(set(req.field.name for req in plot_parameters)) for i, req in enumerate(plot_parameters): # Determine the base plot color. col = list(util.makeDarkColor(float(i) / num_plots)) url = "/".join([str(req.machine.id), str(req.test.id), str(req.field_index)]) legend.append(LegendItem(req.machine, req.test.name, req.field.name, tuple(col), url)) # Load all the field values for this test on the same machine. data = load_graph_data(req, show_failures, limit, xaxis_date, revision_cache) graph_datum.append((req.test.name, data, col, req.field, url, req.machine)) # Get baselines for this line num_baselines = len(baseline_parameters) for baseline_id, (baseline, baseline_title) in \ enumerate(baseline_parameters): q_baseline = session.query(req.field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Machine.name) \ .join(ts.Run).join(ts.Order).join(ts.Machine) \ .filter(ts.Run.id == baseline.id) \ .filter(ts.Sample.test == req.test) \ .filter(req.field.column.isnot(None)) # In the event of many samples, use the mean of the samples as the # baseline. samples = [] for sample in q_baseline: samples.append(sample[0]) # Skip this baseline if there is no data. if not samples: continue mean = sum(samples)/len(samples) # Darken the baseline color distinguish from non-baselines. # Make a color closer to the sample than its neighbour. color_offset = float(baseline_id) / num_baselines / 2 my_color = (i + color_offset) / num_plots dark_col = list(util.makeDarkerColor(my_color)) str_dark_col = util.toColorString(dark_col) baseline_plots.append({ "color": str_dark_col, "lineWidth": 2, "yaxis": {"from": mean, "to": mean}, # "name": q_baseline[0].llvm_project_revision, "name": "Baseline %s: %s (%s)" % (baseline_title, req.test.name, req.field.name), }) baseline_name = ("Baseline {} on {}" .format(baseline_title, q_baseline[0].name)) legend.append(LegendItem(BaselineLegendItem( baseline_name, baseline.id), req.test.name, req.field.name, dark_col, None)) # Draw mean trend if requested. if mean_parameter: machine, field = mean_parameter test_name = 'Geometric Mean' if field.name not in metrics: metrics.append(field.name) col = (0, 0, 0) legend.append(LegendItem(machine, test_name, field.name, col, None)) data = load_geomean_data(field, machine, limit, xaxis_date, revision_cache) graph_datum.append((test_name, data, col, field, None, machine)) def trace_name(name, test_name, field_name): return "%s: %s (%s)" % (name, test_name, field_name) for test_name, data, col, field, url, machine in graph_datum: # Generate trace metadata. trace_meta = {} trace_meta["machine"] = machine.name trace_meta["machineID"] = machine.id if len(graph_datum) > 1: # If there are more than one plot in the graph, also label the # test name. trace_meta["test_name"] = test_name trace_meta["metric"] = field.name # Compute the graph points. pts_x = [] pts_y = [] meta = [] errorbar = {"x": [], "y": [], "error_y": {"type": "data", "visible": True, "array": []}} cumulative_minimum = {"x": [], "y": []} moving_median_data = {"x": [], "y": []} moving_average_data = {"x": [], "y": []} multisample_points_data = {"x": [], "y": [], "meta": []} if normalize_by_median: normalize_by = 1.0/stats.median([min([d[0] for d in values]) for _, values in data]) else: normalize_by = 1.0 min_val = None # Note data is sorted in load_graph_data(). for point_label, datapoints in data: # Get the samples. values = [data_array[0] for data_array in datapoints] orders = [data_array[1] for data_array in datapoints] # And the date on which they were taken. dates = [data_array[2] for data_array in datapoints] # Run ID where this point was collected. run_ids = [data_array[3] for data_array in datapoints if len(data_array) == 4] values = [v * normalize_by for v in values] is_multisample = (len(values) > 1) aggregation_fn = min if switch_min_mean_local: aggregation_fn = lnt.util.stats.agg_mean if field.bigger_is_better: aggregation_fn = max agg_value, agg_index = \ aggregation_fn((value, index) for (index, value) in enumerate(values)) pts_y.append(agg_value) # Plotly does not sort X axis in case of type: 'category'. # point_label is a string (order revision) if xaxis_date = False pts_x.append(point_label) # Generate point metadata. point_metadata = {"order": orders[agg_index].as_ordered_string(), "orderID": orders[agg_index].id, "date": str(dates[agg_index])} if run_ids: point_metadata["runID"] = str(run_ids[agg_index]) meta.append(point_metadata) # Add the multisample points, if requested. if not hide_all_points and (is_multisample or bool(request.args.get('csv')) or bool(request.args.get('download_csv'))): for i, v in enumerate(values): multisample_metadata = {"order": orders[i].as_ordered_string(), "orderID": orders[i].id, "date": str(dates[i])} if run_ids: multisample_metadata["runID"] = str(run_ids[i]) multisample_points_data["x"].append(point_label) multisample_points_data["y"].append(v) multisample_points_data["meta"].append(multisample_metadata) # Add the standard deviation error bar, if requested. if show_stddev: mean = stats.mean(values) sigma = stats.standard_deviation(values) errorbar["x"].append(point_label) errorbar["y"].append(mean) errorbar["error_y"]["array"].append(sigma) # Add the MAD error bar, if requested. if show_mad: med = stats.median(values) mad = stats.median_absolute_deviation(values, med) errorbar["x"].append(point_label) errorbar["y"].append(med) errorbar["error_y"]["array"].append(mad) if show_cumulative_minimum: min_val = agg_value if min_val is None else min(min_val, agg_value) cumulative_minimum["x"].append(point_label) cumulative_minimum["y"].append(min_val) # Compute the moving average and or moving median of our data if # requested. if moving_average or moving_median: def compute_moving_average(x, window, average_list, _): average_list["x"].append(x) average_list["y"].append(lnt.util.stats.mean(window)) def compute_moving_median(x, window, _, median_list): median_list["x"].append(x) median_list["y"].append(lnt.util.stats.median(window)) def compute_moving_average_and_median(x, window, average_list, median_list): average_list["x"].append(x) average_list["y"].append(lnt.util.stats.mean(window)) median_list["x"].append(x) median_list["y"].append(lnt.util.stats.median(window)) if moving_average and moving_median: fun = compute_moving_average_and_median elif moving_average: fun = compute_moving_average else: fun = compute_moving_median len_pts = len(pts_x) for i in range(len_pts): start_index = max(0, i - moving_window_size) end_index = min(len_pts, i + moving_window_size) window_pts = pts_y[start_index:end_index] fun(pts_x[i], window_pts, moving_average_data, moving_median_data) yaxis_index = metrics.index(field.name) yaxis = "y" if yaxis_index == 0 else "y%d" % (yaxis_index + 1) # Add the minimum line plot, if requested. if show_lineplot: plot = { "name": trace_name("Line", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "type": "scatter", "mode": "lines+markers", "line": {"color": util.toColorString(col)}, "x": pts_x, "y": pts_y, "meta": meta } plot.update(trace_meta) if url: plot["url"] = url graph_plots.append(plot) # Add regression line, if requested. if show_linear_regression and len(pts_x) >= 2: unique_x = list(set(pts_x)) if xaxis_date: unique_x.sort() else: unique_x.sort(key=lambda sample: convert_revision(sample, cache=revision_cache)) num_unique_x = len(unique_x) if num_unique_x >= 2: dict_x = {} x_min = pts_x[0] x_max = pts_x[-1] # We compute the regression line in terms of a normalized X scale. if xaxis_date: x_range = float((x_max - x_min).total_seconds()) for x_key in unique_x: dict_x[x_key] = (x_key - x_min).total_seconds() / x_range else: for i, x_key in enumerate(unique_x): dict_x[x_key] = i/(num_unique_x - 1) norm_x = [dict_x[xi] for xi in pts_x] try: info = ext_stats.linregress(norm_x, pts_y) except ZeroDivisionError: info = None except ValueError: info = None if info is not None: slope, intercept, _, _, _ = info reglin_col = [c * 0.8 for c in col] if xaxis_date: reglin_y = [(xi - x_min).total_seconds() / x_range * slope + intercept for xi in unique_x] else: reglin_y = [i/(num_unique_x - 1) * slope + intercept for i in range(num_unique_x)] plot = { "name": trace_name("Linear Regression", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "lines", "line": {"color": util.toColorString(reglin_col), "width": 2}, # "shadowSize": 4, "x": unique_x, "y": reglin_y } plot.update(trace_meta) graph_plots.insert(0, plot) # Add the points plot, if used. if multisample_points_data["x"]: pts_col = (0, 0, 0) multisample_points_data.update({ "name": trace_name("Points", test_name, field.name), "legendgroup": test_name, "showlegend": False, "yaxis": yaxis, # "hoverinfo": "skip", "type": "scatter", "mode": "markers", "marker": {"color": util.toColorString(pts_col), "size": 5} }) multisample_points_data.update(trace_meta) if url: multisample_points_data["url"] = url graph_plots.append(multisample_points_data) # Add the error bar plot, if used. if errorbar["x"]: bar_col = [c * 0.4 for c in col] errorbar.update({ "name": trace_name("Error bars", test_name, field.name), "showlegend": False, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "markers", "marker": {"color": util.toColorString(bar_col)} }) errorbar.update(trace_meta) graph_plots.append(errorbar) # Add the moving average plot, if used. if moving_average_data["x"]: avg_col = [c * 0.7 for c in col] moving_average_data.update({ "name": trace_name("Moving average", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "lines", "line": {"color": util.toColorString(avg_col)} }) moving_average_data.update(trace_meta) graph_plots.append(moving_average_data) # Add the moving median plot, if used. if moving_median_data["x"]: med_col = [c * 0.6 for c in col] moving_median_data.update({ "name": trace_name("Moving median: ", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "lines", "line": {"color": util.toColorString(med_col)} }) moving_median_data.update(trace_meta) graph_plots.append(moving_median_data) if cumulative_minimum["x"]: min_col = [c * 0.5 for c in col] cumulative_minimum.update({ "name": trace_name("Cumulative Minimum", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "lines", "line": {"color": util.toColorString(min_col)} }) cumulative_minimum.update(trace_meta) graph_plots.append(cumulative_minimum) if bool(request.args.get("json")) or bool(request.args.get("download_json")): json_obj = dict() json_obj['data'] = graph_plots # Flatten ORM machine objects to their string names. simple_type_legend = [] for li in legend: # Flatten name, make color a dict. new_entry = { 'name': li.machine.name, 'test': li.test_name, 'unit': li.field_name, 'color': util.toColorString(li.color), 'url': li.url, } simple_type_legend.append(new_entry) json_obj['legend'] = simple_type_legend json_obj['revision_range'] = revision_range json_obj['current_options'] = options json_obj['test_suite_name'] = ts.name json_obj['baselines'] = baseline_plots flask_json = flask.jsonify(**json_obj) if bool(request.args.get('json')): return flask_json else: json_file = BytesIO() lines = flask_json.get_data() json_file.write(lines) json_file.seek(0) return send_file(json_file, mimetype='text/json', attachment_filename='Graph.json', as_attachment=True) return render_template("v4_graph.html", options=options, graph_plots=graph_plots, metrics=metrics, legend=legend, **ts_data(ts)) @v4_route("/global_status") def v4_global_status(): session = request.session ts = request.get_testsuite() metric_fields = sorted(list(ts.Sample.get_metric_fields()), key=lambda f: f.name) fields = dict((f.name, f) for f in metric_fields) # Get the latest run. latest = session.query(ts.Run.start_time).\ order_by(ts.Run.start_time.desc()).first() # If we found an entry, use that. if latest is not None: latest_date, = latest else: # Otherwise, just use today. latest_date = datetime.date.today() # Create a datetime for the day before the most recent run. yesterday = latest_date - datetime.timedelta(days=1) # Get arguments. revision = request.args.get('revision', str(ts.Machine.DEFAULT_BASELINE_REVISION)) field = fields.get(request.args.get('field', None), metric_fields[0]) # Get the list of all runs we might be interested in. recent_runs = session.query(ts.Run) \ .filter(ts.Run.start_time > yesterday) \ .all() # Aggregate the runs by machine. recent_runs_by_machine = multidict.multidict() for run in recent_runs: recent_runs_by_machine[run.machine] = run # Get a sorted list of recent machines. recent_machines = sorted(recent_runs_by_machine.keys(), key=lambda m: m.name) # We use periods in our machine names. css does not like this # since it uses periods to demark classes. Thus we convert periods # in the names of our machines to dashes for use in css. It is # also convenient for our computations in the jinja page to have # access to def get_machine_keys(m): m.css_name = m.name.replace('.', '-') return m recent_machines = list(map(get_machine_keys, recent_machines)) # For each machine, build a table of the machine, the baseline run, and the # most recent run. We also computed a list of all the runs we are reporting # over. machine_run_info = [] reported_run_ids = [] for machine in recent_machines: runs = recent_runs_by_machine[machine] # Get the baseline run for this machine. baseline = machine.get_closest_previously_reported_run( session, ts.Order(llvm_project_revision=revision)) # Choose the "best" run to report on. We want the most recent one with # the most recent order. run = max(runs, key=lambda r: (r.order, r.start_time)) if baseline: machine_run_info.append((baseline, run)) reported_run_ids.append(baseline.id) reported_run_ids.append(run.id) if not machine_run_info: abort(404, "No closest runs for revision '{}'".format(revision)) # Get the set all tests reported in the recent runs. reported_tests = session.query(ts.Test.id, ts.Test.name).filter( sqlalchemy.sql.exists('*', sqlalchemy.sql.and_( ts.Sample.run_id.in_(reported_run_ids), ts.Sample.test_id == ts.Test.id))).all() # Load all of the runs we are interested in. runinfo = lnt.server.reporting.analysis.RunInfo(session, ts, reported_run_ids) # Build the test matrix. This is a two dimensional table index by # (machine-index, test-index), where each entry is the percent change. test_table = [] for i, (test_id, test_name) in enumerate(reported_tests): # Create the row, starting with the test name and worst entry. row = [(test_id, test_name), None] # Compute comparison results for each machine. row.extend((runinfo.get_run_comparison_result( run, baseline, test_id, field, ts.Sample.get_hash_of_binary_field()), run.id) for baseline, run in machine_run_info) # Compute the worst cell value. if len(row) > 2: row[1] = max(cr.pct_delta for cr, _ in row[2:]) test_table.append(row) # Order the table by worst regression. test_table.sort(key=lambda row: row[1], reverse=True) return render_template("v4_global_status.html", tests=test_table, machines=recent_machines, fields=metric_fields, selected_field=field, selected_revision=revision, **ts_data(ts)) @v4_route("/daily_report") def v4_daily_report_overview(): # Redirect to the report for the most recent submitted run's date. session = request.session ts = request.get_testsuite() # Get the latest run. latest = session.query(ts.Run).\ order_by(ts.Run.start_time.desc()).limit(1).first() # If we found a run, use it's start time. if latest: date = latest.start_time else: # Otherwise, just use today. date = datetime.date.today() extra_args = request.args.copy() extra_args.pop("year", None) extra_args.pop("month", None) extra_args.pop("day", None) return v4_redirect(v4_url_for(".v4_daily_report", year=date.year, month=date.month, day=date.day, **extra_args)) @v4_route("/daily_report/<int:year>/<int:month>/<int:day>") def v4_daily_report(year, month, day): num_days_str = request.args.get('num_days') if num_days_str is not None: num_days = int(num_days_str) else: num_days = 3 day_start_str = request.args.get('day_start') if day_start_str is not None: day_start = int(day_start_str) else: day_start = 16 filter_machine_regex = request.args.get('filter-machine-regex') ts = request.get_testsuite() # Create the report object. report = lnt.server.reporting.dailyreport.DailyReport( ts, year, month, day, num_days, day_start, filter_machine_regex=filter_machine_regex) # Build the report. try: report.build(request.session) except ValueError: return abort(400) return render_template("v4_daily_report.html", report=report, analysis=lnt.server.reporting.analysis, **ts_data(ts)) ### # Cross Test-Suite V4 Views def get_summary_config_path(): return os.path.join(current_app.old_config.tempDir, 'summary_report_config.json') @db_route("/summary_report/edit", methods=('GET', 'POST')) def v4_summary_report_ui(): # If this is a POST request, update the saved config. session = request.session if request.method == 'POST': # Parse the config data. config_data = request.form.get('config') config = flask.json.loads(config_data) # Write the updated config. with open(get_summary_config_path(), 'w') as f: flask.json.dump(config, f, indent=2) # Redirect to the summary report. return v4_redirect(db_url_for(".v4_summary_report")) config_path = get_summary_config_path() if os.path.exists(config_path): with open(config_path) as f: config = flask.json.load(f) else: config = { "machine_names": [], "orders": [], "machine_patterns": [], } # Get the list of available test suites. testsuites = request.get_db().testsuite.values() # Gather the list of all run orders and all machines. def to_key(name): first = name.split('.', 1)[0] if first.isdigit(): return (int(first), name) return (first, name) all_machines = set() all_orders = set() for ts in testsuites: for name, in session.query(ts.Machine.name): all_machines.add(name) for name, in session.query(ts.Order.llvm_project_revision): all_orders.add(name) all_machines = sorted(all_machines) all_orders = sorted(all_orders, key=to_key) return render_template("v4_summary_report_ui.html", config=config, all_machines=all_machines, all_orders=all_orders, **ts_data(ts)) @v4_route("/latest_runs_report") def v4_latest_runs_report(): ts = request.get_testsuite() num_runs_str = request.args.get('num_runs') if num_runs_str is not None: num_runs = int(num_runs_str) else: num_runs = 10 report = lnt.server.reporting.latestrunsreport.LatestRunsReport(ts, num_runs) report.build(request.session) return render_template("v4_latest_runs_report.html", report=report, analysis=lnt.server.reporting.analysis, **ts_data(ts)) @db_route("/summary_report") def v4_summary_report(): session = request.session # Load the summary report configuration. config_path = get_summary_config_path() if not os.path.exists(config_path): return render_template("error.html", message="""\ You must define a summary report configuration first.""") with open(config_path) as f: config = flask.json.load(f) # Create the report object. report = lnt.server.reporting.summaryreport.SummaryReport( request.get_db(), config['orders'], config['machine_names'], config['machine_patterns']) # Build the report. report.build(session) if bool(request.args.get('json')): json_obj = dict() json_obj['ticks'] = report.report_orders data = [] for e in report.normalized_data_table.items(): header, samples = e raw_samples = samples.getvalue() data.append([header, raw_samples]) json_obj['data'] = data return flask.jsonify(**json_obj) return render_template("v4_summary_report.html", report=report) @frontend.route('/rules') def rules(): discovered_rules = lnt.server.db.rules_manager.DESCRIPTIONS return render_template("rules.html", rules=discovered_rules) @frontend.route('/log') def log(): with open(current_app.config['log_file_name'], 'r') as f: log_lines = f.readlines() r'2017-07-21 15:02:15,143 ERROR:' return render_template("log.html", log_lines=log_lines) @frontend.route('/debug') def debug(): assert not current_app.debug @frontend.route('/__health') def health(): """Our instance health. If queue is too long or we use too much mem, return 500. Monitor might reboot us for this.""" is_bad_state = False msg = "Ok" import resource stats = resource.getrusage(resource.RUSAGE_SELF) mem = stats.ru_maxrss if mem > 1024**3: is_bad_state = True msg = "Over memory " + str(mem) + ">" + str(1024**3) if is_bad_state: return msg, 500 return msg, 200 @v4_route("/search") def v4_search(): session = request.session ts = request.get_testsuite() query = request.args.get('q') l_arg = request.args.get('l', 8) default_machine = request.args.get('m', None) assert query results = lnt.server.db.search.search(session, ts, query, num_results=l_arg, default_machine=default_machine) return json.dumps( [('%s #%s' % (r.machine.name, r.order.llvm_project_revision), r.id) for r in results]) # How much data to render in the Matrix view. MATRIX_LIMITS = [ ('12', 'Small'), ('50', 'Medium'), ('250', 'Large'), ('-1', 'All'), ] class MatrixOptions(Form): limit = SelectField('Size', choices=MATRIX_LIMITS) def baseline(): # type: () -> Optional[testsuitedb.TestSuiteDB.Baseline] """Get the baseline object from the user's current session baseline value or None if one is not defined. """ session = request.session ts = request.get_testsuite() base_id = flask.session.get(baseline_key(ts.name)) if not base_id: return None try: base = session.query(ts.Baseline).get(base_id) except NoResultFound: return None return base @v4_route("/matrix", methods=['GET', 'POST']) def v4_matrix(): """A table view for Run sample data, because *some* people really like to be able to see results textually. request.args.limit limits the number of samples. for each dataset to add, there will be a "plot.n=.m.b.f" where m is machine ID, b is benchmark ID and f os field kind offset. "n" is used to unique the paramters, and is ignored. """ session = request.session ts = request.get_testsuite() # Load the matrix request parameters. form = MatrixOptions(request.form) if request.method == 'POST': post_limit = form.limit.data else: post_limit = MATRIX_LIMITS[0][0] plot_parameters = parse_and_load_plot_parameters(request.args, session, ts) if not plot_parameters: abort(404, "Request requires some plot arguments.") # Feature: if all of the results are from the same machine, hide the name # to make the headers more compact. dedup = True for r in plot_parameters: if r.machine.id != plot_parameters[0].machine.id: dedup = False if dedup: machine_name_common = plot_parameters[0].machine.name machine_id_common = plot_parameters[0].machine.id else: machine_name_common = machine_id_common = None # It is nice for the columns to be sorted by name. plot_parameters.sort(key=lambda x: x.test.name), # Now lets get the data. all_orders = set() order_to_id = {} for req in plot_parameters: q = session.query(req.field.column, ts.Order.llvm_project_revision, ts.Order.id) \ .join(ts.Run) \ .join(ts.Order) \ .filter(ts.Run.machine_id == req.machine.id) \ .filter(ts.Sample.test == req.test) \ .filter(req.field.column.isnot(None)) \ .order_by(ts.Order.llvm_project_revision.desc()) limit = request.args.get('limit', post_limit) if limit or post_limit: limit = int(limit) if limit != -1: q = q.limit(limit) req.samples = defaultdict(list) for s in q.all(): req.samples[s[1]].append(s[0]) all_orders.add(s[1]) order_to_id[s[1]] = s[2] if not all_orders: abort(404, "No orders found.") # Now grab the baseline data. user_baseline = baseline() backup_baseline = next(iter(all_orders)) if user_baseline: all_orders.add(user_baseline.order.llvm_project_revision) baseline_rev = user_baseline.order.llvm_project_revision baseline_name = user_baseline.name else: baseline_rev = backup_baseline baseline_name = backup_baseline for req in plot_parameters: q_baseline = session.query(req.field.column, ts.Order.llvm_project_revision, ts.Order.id) \ .join(ts.Run) \ .join(ts.Order) \ .filter(ts.Run.machine_id == req.machine.id) \ .filter(ts.Sample.test == req.test) \ .filter(req.field.column.isnot(None)) \ .filter(ts.Order.llvm_project_revision == baseline_rev) baseline_data = q_baseline.all() if baseline_data: for s in baseline_data: req.samples[s[1]].append(s[0]) all_orders.add(s[1]) order_to_id[s[1]] = s[2] else: # Well, there is a baseline, but we did not find data for it... # So lets revert back to the first run. msg = "Did not find data for {}. Showing {}." flash(msg.format(user_baseline, backup_baseline), FLASH_DANGER) all_orders.remove(baseline_rev) baseline_rev = backup_baseline baseline_name = backup_baseline all_orders = list(all_orders) all_orders.sort(reverse=True) all_orders.insert(0, baseline_rev) # Now calculate Changes between each run. for req in plot_parameters: req.change = {} for order in all_orders: cur_samples = req.samples[order] prev_samples = req.samples.get(baseline_rev, None) cr = ComparisonResult(mean, False, False, cur_samples, prev_samples, None, None, confidence_lv=0.05, bigger_is_better=False) req.change[order] = cr # Calculate Geomean for each order. order_to_geomean = {} curr_geomean = None for order in all_orders: curr_samples = [] prev_samples = [] for req in plot_parameters: curr_samples.extend(req.samples[order]) prev_samples.extend(req.samples[baseline_rev]) prev_geomean = calc_geomean(prev_samples) curr_geomean = calc_geomean(curr_samples) if prev_geomean: cr = ComparisonResult(mean, False, False, [curr_geomean], [prev_geomean], None, None, confidence_lv=0.05, bigger_is_better=False) order_to_geomean[order] = cr else: # There will be no change here, but display current val. if curr_geomean: order_to_geomean[order] = PrecomputedCR(curr_geomean, curr_geomean, False) # Calculate the date of each order. runs = session.query(ts.Run.start_time, ts.Order.llvm_project_revision) \ .join(ts.Order) \ .filter(ts.Order.llvm_project_revision.in_(all_orders)) \ .all() order_to_date = dict([(x[1], x[0]) for x in runs]) class FakeOptions(object): show_small_diff = False show_previous = False show_all = True show_delta = False show_stddev = False show_mad = False show_all_samples = False show_sample_counts = False return render_template("v4_matrix.html", testsuite_name=g.testsuite_name, associated_runs=plot_parameters, orders=all_orders, options=FakeOptions(), analysis=lnt.server.reporting.analysis, geomeans=order_to_geomean, order_to_id=order_to_id, form=form, baseline_rev=baseline_rev, baseline_name=baseline_name, machine_name_common=machine_name_common, machine_id_common=machine_id_common, order_to_date=order_to_date, **ts_data(ts)) @frontend.route("/explode") def explode(): """This route is going to exception. Used for testing 500 page.""" return 1/0 @frontend.route("/gone") def gone(): """This route returns 404. Used for testing 404 page.""" abort(404, "test") @frontend.route("/ping") def ping(): """Simple route to see if server is alive. Used by tests to poll on server creation.""" return "pong", 200 @frontend.route("/sleep") def sleep(): """Simple route to simulate long running page loads. Used by to diagnose proxy issues etc.""" sleep_time = 1 if request.args.get('timeout'): sleep_time = int(request.args.get('timeout')) time.sleep(sleep_time) return "Done", 200
nilq/baby-python
python
import os from datetime import datetime import numpy import xarray as xr from esdl.cate.cube_gen import CateCubeSourceProvider class OzoneTemisProvider(CateCubeSourceProvider): def __init__(self, cube_config, name='ozone_temis', dir=None, resampling_order=None): super().__init__(cube_config, name, dir, resampling_order) self.old_indices = None @property def variable_descriptors(self): shared_meta_info = { 'data_type': numpy.float32, 'fill_value': numpy.nan, 'references': 'Jacob C. A. van Peet, Ronald J. van der A, Hennie M. Kelder, and Pieternel F. Levelt (2018),' 'Simultaneous assimilation of ozone profiles from multiple UV-VIS satellite instruments, Atmospheric Chemistry and Physics, ' 'doi:10.5194/acp-18-1685-2018', 'comment': 'The global tropospheric ozone column from 0 to 6 km is presented here. The column is derived by simultaneous assimlating ozone profiles of GOME-2 and OMI.', 'url': 'http://www.temis.nl/protocols/tropo.html', 'project_name' : 'Tropospheric ozone column', } ds = xr.open_dataset(self.dir_path + '/tropcol-20111202-v0002.nc') meta = dict() meta.update(shared_meta_info) meta.update(ds.attrs) coords = ('lon', 'lat', 'time') res = dict() for vs in ds.variables: if vs not in coords: meta_var = { 'source_name': vs, 'units': ds[vs].units, 'long_name': ds[vs].long_name, 'standard_name': ds[vs].standard_name, } meta_var.update(meta) res[vs] = meta_var ds.close() return res def compute_source_time_ranges(self): source_time_ranges = list() for root, sub_dirs, files in os.walk(self.dir_path): for file_name in files: if '.nc' in file_name: f = os.path.join(root, file_name) buff = file_name.split('-') dtt = datetime.strptime(buff[1], '%Y%m%d') source_time_ranges.append((dtt, dtt, f, 0)) return sorted(source_time_ranges, key=lambda item: item[0]) def transform_source_image(self, source_image): """ Transforms the source image, here by flipping and then shifting horizontally. :param source_image: 2D image :return: source_image """ # TODO (hans-permana, 20161219): the following line is a workaround to an issue where the nan values are # always read as -9.9. Find out why these values are automatically converted and create a better fix. source_image[source_image == -9.9] = numpy.nan return numpy.flipud(source_image)
nilq/baby-python
python
# # This example demonstrates using Lark with a custom lexer. # # You can use a custom lexer to tokenize text when the lexers offered by Lark # are too slow, or not flexible enough. # # You can also use it (as shown in this example) to tokenize streams of objects. # from lark import Lark, Transformer, v_args from lark.lexer import Lexer, Token class TypeLexer(Lexer): def __init__(self, lexer_conf): pass def lex(self, data): for obj in data: if isinstance(obj, int): yield Token('INT', obj) elif isinstance(obj, (type(''), type(u''))): yield Token('STR', obj) else: raise TypeError(obj) parser = Lark(""" start: data_item+ data_item: STR INT* %declare STR INT """, parser='lalr', lexer=TypeLexer) class ParseToDict(Transformer): @v_args(inline=True) def data_item(self, name, *numbers): return name.value, [n.value for n in numbers] start = dict def test(): data = ['alice', 1, 27, 3, 'bob', 4, 'carrie', 'dan', 8, 6] print(data) tree = parser.parse(data) res = ParseToDict().transform(tree) print('-->') print(res) # prints {'alice': [1, 27, 3], 'bob': [4], 'carrie': [], 'dan': [8, 6]} if __name__ == '__main__': test()
nilq/baby-python
python
import json from .errors import JrsNodeNotFound from .refs_resolver import RefsResolver class Context(object): def __init__(self): self.schemas = {} self.nodes = {} self.refsResolver = RefsResolver(self) def addSchema(self, schema): self.schemas[schema.id] = schema def addNode(self, schemaId, path, node): self.nodes["{}#{}".format(schemaId, path)] = node def getNode(self, schemaId, path): fullPath = "{}#{}".format(schemaId, path.replace("/", ".")) if fullPath not in self.nodes: raise JrsNodeNotFound("Not found node with schemaId: {}, path: {}".format(schemaId, path)) return self.nodes[fullPath] def initNodes(self): for schema in self.schemas.values(): schema.root.initNodes() def resolveRefs(self): self.refsResolver.resolveRefs() def toJson(self, prettyPrint): schemas = {} for item in self.schemas.values(): schemas[item.id] = item.root.value if prettyPrint: return json.dumps(schemas, separators=(",", ": "), indent=4) + "\n" else: return json.dumps(schemas, separators=(",", ":"))
nilq/baby-python
python
# Given a list of dominoes, dominoes[i] = [a, b] is equivalent to dominoes[j] = [c, d] if and only if either (a==c and b==d), or (a==d and b==c) - that is, one domino can be rotated to be equal to another domino. # Return the number of pairs(i, j) for which 0 <= i < j < dominoes.length, and dominoes[i] is equivalent to dominoes[j]. class Solution(object): def numEquivalentDominoes(self, dominoes): count = 0 seen = [] for domino in dominoes: if domino in seen: count += 1 continue seen += [domino, list(reversed(domino))] return count print(Solution().numEquivalentDominoes([[1, 2], [2, 1], [3, 4], [5, 6]]))
nilq/baby-python
python
from ralph.accounts.api import RalphUserSimpleSerializer from ralph.api import RalphAPIViewSet, router from ralph.assets.api.serializers import RalphAPISerializer from ralph.sim_cards.models import CellularCarrier, SIMCard, SIMCardFeatures class CellularCarrierSerializer(RalphAPISerializer): class Meta: model = CellularCarrier fields = ['name'] class SIMCardFeaturesSerializer(RalphAPISerializer): class Meta: model = SIMCardFeatures fields = ['name'] class SIMCardSerializer(RalphAPISerializer): carrier = CellularCarrierSerializer() features = SIMCardFeaturesSerializer(many=True) user = RalphUserSimpleSerializer() owner = RalphUserSimpleSerializer() class Meta: model = SIMCard fields = ['status', 'card_number', 'phone_number', 'pin1', 'puk1', 'user', 'owner', 'warehouse', 'carrier', 'features', 'quarantine_until', 'modified'] class CellularCarrierViewSet(RalphAPIViewSet): queryset = CellularCarrier.objects.all() serializer_class = CellularCarrierSerializer class SIMCardFeatureViewSet(RalphAPIViewSet): queryset = SIMCardFeatures.objects.all() serializer_class = SIMCardFeaturesSerializer class SIMCardViewSet(RalphAPIViewSet): queryset = SIMCard.objects.all() serializer_class = SIMCardSerializer select_related = ['carrier', 'user', 'owner'] prefetch_related = ['features'] filter_fields = ['user__username', 'features__name', 'owner__username', 'carrier__name'] router.register(r'sim-card-feature', SIMCardFeatureViewSet) router.register(r'sim-card-cellular-carrier', CellularCarrierViewSet) router.register(r'sim-card', SIMCardViewSet) urlpatterns = []
nilq/baby-python
python
"""Test runway.config.components.runway._test_def.""" # pylint: disable=no-self-use,protected-access # pyright: basic import pytest from pydantic import ValidationError from runway.config.components.runway import ( CfnLintRunwayTestDefinition, RunwayTestDefinition, ScriptRunwayTestDefinition, YamlLintRunwayTestDefinition, ) from runway.config.models.runway import ( CfnLintRunwayTestDefinitionModel, ScriptRunwayTestDefinitionModel, YamlLintRunwayTestDefinitionModel, ) class TestCfnLintRunwayTestDefinition: """Test runway.config.components.runway._test_def.CfnLintRunwayTestDefinition.""" def test_parse_obj(self) -> None: """Test parse_obj.""" assert isinstance( CfnLintRunwayTestDefinition.parse_obj({}), CfnLintRunwayTestDefinition ) class TestRunwayTestDefinition: """Test runway.config.components.runway._test_def.RunwayTestDefinition.""" def test_new_cfn_lint(self) -> None: """Test creation CfnLintRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition(CfnLintRunwayTestDefinitionModel()), CfnLintRunwayTestDefinition, ) def test_new_invalid(self) -> None: """Test new invalid type.""" with pytest.raises(TypeError) as excinfo: RunwayTestDefinition({}) # type: ignore assert str(excinfo.value).startswith("expected data of type") def test_new_script(self) -> None: """Test creation ScriptRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition(ScriptRunwayTestDefinitionModel()), ScriptRunwayTestDefinition, ) def test_new_yamllint(self) -> None: """Test creation ScriptRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition(YamlLintRunwayTestDefinitionModel()), YamlLintRunwayTestDefinition, ) def test_parse_obj_cfn_lint(self) -> None: """Test parse_obj CfnLintRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition.parse_obj({"type": "cfn-lint"}), CfnLintRunwayTestDefinition, ) def test_parse_obj_invalid(self) -> None: """Test parse_obj invalid object.""" with pytest.raises(ValidationError): RunwayTestDefinition.parse_obj({"type": "invalid"}) def test_parse_obj_script(self) -> None: """Test parse_obj ScriptRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition.parse_obj({"type": "script"}), ScriptRunwayTestDefinition, ) def test_parse_obj_yamllint(self) -> None: """Test parse_obj YamlLintRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition.parse_obj({"type": "yamllint"}), YamlLintRunwayTestDefinition, ) def test_register_variable(self) -> None: """Test _register_variable.""" obj = RunwayTestDefinition.parse_obj( {"type": "script", "name": "test_register_variable", "required": True} ) assert obj._vars["required"].name == "test_register_variable.required" class TestScriptRunwayTestDefinition: """Test runway.config.components.runway._test_def.ScriptRunwayTestDefinition.""" def test_parse_obj(self) -> None: """Test parse_obj.""" assert isinstance( ScriptRunwayTestDefinition.parse_obj({}), ScriptRunwayTestDefinition ) class TestYamlLintRunwayTestDefinition: """Test runway.config.components.runway._test_def.YamlLintRunwayTestDefinition.""" def test_parse_obj(self) -> None: """Test parse_obj.""" assert isinstance( YamlLintRunwayTestDefinition.parse_obj({}), YamlLintRunwayTestDefinition )
nilq/baby-python
python
""""@package This package enables the research group usage for the database. """ from src.models.employee import EmployeeDataAccess class ResearchGroup: """ This class defines a research group """ def __init__(self, name, abbreviation, logo_location, description_id, address, telephone_number, is_active): """ ResearchGroup initializer. :param name: Research group name. :param abbreviation: Research group abbreviation. :param logo_location: Location of group logo. :param description_id: ID of the group description. :param address: Research group address. :param telephone_number: Research group telephone number. :param study_field: Research group study field. :param is_active: Status of research group. """ self.name = name self.abbreviation = abbreviation self.logo_location = logo_location self.address = address self.telephone_number = telephone_number self.is_active = is_active self.description_id = description_id self.description_eng = None self.description_nl = None self.contact_person = None def to_dict(self): """ Converts object to a dictionary. :return: Dictionary of the object data. """ return vars(self) class ResearchGroupDataAccess: """ This class interacts with the ResearchGroup component of the database. """ def __init__(self, dbconnect): """ Initiates the ResearchGroupDataAccess object. :param dbconnect: The database connection. """ self.dbconnect = dbconnect def get_group_names(self, active_only): # TODO #2 error for empty fetch """ Fetches all research group names. :param active_only: Only return active research groups. :return: A list with all the active and/or non-active research group names. """ cursor = self.dbconnect.get_cursor() if active_only: cursor.execute('SELECT name FROM research_group WHERE is_active = TRUE') else: cursor.execute('SELECT name FROM research_group') return [row[0] for row in cursor] def get_research_groups(self, active_only): # TODO #2 catching empty? """ Fetches all research groups from the database. :param active_only: Only return active research groups. :return: A list with all the active and/or non-active research groups. """ return [self.get_research_group(name) for name in self.get_group_names(active_only)] def get_research_group(self, group_name): # TODO #2 """ Retrieves all the data of a given research group. :param group_name: The name of the research group to fetch. :return: Research group object. """ cursor = self.dbconnect.get_cursor() """General info""" cursor.execute( 'SELECT name, abbreviation, logo_location, description_id, address, telephone_number' ', is_active FROM research_group WHERE name=%s', (group_name,)) row = cursor.fetchone() group = ResearchGroup(row[0], row[1], row[2], row[3], row[4], row[5], row[6]) """Descriptions""" cursor.execute('SELECT html_content_nl, html_content_eng FROM document WHERE document_id=%s', (group.description_id,)) row = cursor.fetchone() if row is not None: group.description_nl = row[0] group.description_eng = row[1] """Contact person""" cursor.execute('SELECT contact_person FROM contact_person WHERE research_group=%s', (group_name,)) row = cursor.fetchone() if row is not None: employee = EmployeeDataAccess(self.dbconnect).get_employee(row[0]) group.contact_person = employee.name return group def add_research_group(self, obj): """ Adds a research group to the database. :param obj: The new research group. :raise: Exception if the database has to roll back. """ cursor = self.dbconnect.get_cursor() try: cursor.execute('INSERT INTO research_group(name, abbreviation, logo_location, description_id, address, ' 'telephone_number, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s)', (obj.name, obj.abbreviation, obj.logo_location, obj.description_id, obj.address, obj.telephone_number, obj.is_active)) self.dbconnect.commit() except: self.dbconnect.rollback() raise def update_research_group(self, group_name, obj): """ Updates a research group in the database. :param group_name: The original name of the group. :param obj: New research group. :raise: Exception if the database has to roll back. """ cursor = self.dbconnect.get_cursor() try: cursor.execute('UPDATE research_group ' 'SET name = %s, abbreviation = %s, logo_location = %s, description_id = %s, ' 'address = %s, telephone_number = %s, is_active = %s ' 'WHERE name=%s', (obj.name, obj.abbreviation, obj.logo_location, obj.description_id, obj.address, obj.telephone_number, obj.is_active, group_name)) self.dbconnect.commit() except: self.dbconnect.rollback() raise def set_active(self, group_name, active): """ Changes the status of the group. :param group_name: The group to change. :param active: The new active status. :raise: Exception if the database has to roll back. """ cursor = self.dbconnect.get_cursor() try: cursor.execute('UPDATE research_group ' 'SET is_active = %s ' 'WHERE name=%s', (active, group_name)) self.dbconnect.commit() except: self.dbconnect.rollback() raise def set_contact_person(self, group_name, contact_person_id): """ Sets the contact person of a group. :param group_name: The research group name. :param contact_person_id: The ID of contact person of the group. :raise: Exception if the database has to roll back. """ cursor = self.dbconnect.get_cursor() try: cursor.execute('DELETE from contact_person ' 'WHERE research_group = %s', (group_name, )) self.dbconnect.commit() employee = EmployeeDataAccess(self.dbconnect).get_employee_by_name(contact_person_id) cursor.execute('INSERT INTO contact_person VALUES (%s, %s)', (employee.e_id, group_name)) self.dbconnect.commit() except: self.dbconnect.rollback() raise
nilq/baby-python
python
# MIT License # # Copyright (C) IBM Corporation 2018 # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ This module implements the abstract base class for all poison filtering defences. """ from __future__ import absolute_import, division, print_function, unicode_literals import abc import sys # Ensure compatibility with Python 2 and 3 when using ABCMeta if sys.version_info >= (3, 4): ABC = abc.ABC else: ABC = abc.ABCMeta(str('ABC'), (), {}) class PoisonFilteringDefence(ABC): """ Base class for all poison filtering defences. """ defence_params = ['classifier'] def __init__(self, classifier, x_train, y_train): """ Create an :class:`.ActivationDefence` object with the provided classifier. :param classifier: model evaluated for poison :type classifier: :class:`art.classifiers.classifier.Classifier` :param x_train: dataset used to train the classifier. :type x_train: :class:`numpy.ndarray` :param y_train: labels used to train the classifier. :type y_train: :class:`numpy.ndarray` """ self.classifier = classifier self.x_train = x_train self.y_train = y_train @abc.abstractmethod def detect_poison(self, **kwargs): """ Detect poison. :param kwargs: Defence-specific parameters used by child classes. :type kwargs: `dict` :return: `(dict, list)` dictionary with report and list with items identified as poison """ raise NotImplementedError @abc.abstractmethod def evaluate_defence(self, is_clean, **kwargs): """ Evaluate the defence given the labels specifying if the data is poisoned or not. :param is_clean: 1-D array where is_clean[i]=1 means x_train[i] is clean and is_clean[i]=0 that it's poison. :param kwargs: Defence-specific parameters used by child classes. :type kwargs: `dict` :return: JSON object with confusion matrix """ raise NotImplementedError def set_params(self, **kwargs): """ Take in a dictionary of parameters and apply attack-specific checks before saving them as attributes. :param kwargs: a dictionary of defence-specific parameters :type kwargs: `dict` :return: `True` when parsing was successful """ for key, value in kwargs.items(): if key in self.defence_params: setattr(self, key, value) return True def get_params(self): """ Returns dictionary of parameters used to run defence. :return: `dict` """ dictionary = {} for param in self.defence_params: dictionary.update({param: getattr(self, param)}) return dictionary
nilq/baby-python
python
import jimi, requests def reloadModule(module): # Apply system updates clusterMembers = jimi.cluster.getAll() for clusterMember in clusterMembers: headers = { "x-api-token" : jimi.auth.generateSystemSession() } requests.get("{0}{1}system/update/{2}/".format(clusterMember,jimi.api.base,jimi.cluster.getMasterId()),headers=headers, timeout=60) requests.get("{0}{1}system/reload/module/{2}/".format(clusterMember,jimi.api.base,module),headers=headers, timeout=60)
nilq/baby-python
python