text
string
size
int64
token_count
int64
#!/usr/bin/env python # coding: utf-8 # Copyright © 2015, 2016, 2017, 2018 Wieland Hoffmann # License: MIT, see LICENSE for details from ..notifier import INotifier from ..version import version from functools import wraps from twisted.internet import protocol, reactor from twisted.plugin import IPlugin from twisted.words.protocols.irc import IRCClient from zope.interface.declarations import implementer def passthrough_to_client(func): """ :param func: """ @wraps(func) def wrapper(self, object_path): client = self.prot event_name = func.__name__ method = getattr(client, event_name) method(object_path) return wrapper class IRCNotifierBot(IRCClient): versionName = "sagbescheid" versionNum = version lineRate = 1 @property def nickname(self): return self.factory.nick def signedOn(self): self.factory.resetDelay() self.join(self.factory.channel) def _msg_channel(self, msg): """Send ``msg`` to the configured channel. :type msg: str """ self.msg(self.factory.channel, msg) def normal_start(self, object_path): """ :param self: :param object_path: """ self._msg_channel("%s started normally." % object_path) def normal_stop(self, object_path): """ :param self: :param object_path: """ self._msg_channel("%s stopped normally." % object_path) def failure(self, object_path): """ :param self: :param object_path: """ self._msg_channel("%s failed." % object_path) def ongoing_failure(self, object_path): """ :param self: :param object_path: """ self._msg_channel("%s is still failing." % object_path) def recovery(self, object_path): """ :param self: :param object_path: """ self._msg_channel("%s recovered." % object_path) def change_from_unknown(self, object_path): """ :param self: :param object_path: """ pass @implementer(IPlugin, INotifier) class IRCNotifierFactory(protocol.ReconnectingClientFactory): name = "irc" description = "Log events to an IRC channel" protocol = IRCNotifierBot def add_arguments(self, group): group.add_argument("--irc-nick", action="store", help="Nick for the bot") group.add_argument("--irc-channel", action="store", help="Channel for the bot to join") group.add_argument("--irc-server", action="store", help="IRC server address") group.add_argument("--irc-port", action="store", type=int, default=6667, help="IRC server port") def handle_arguments(self, args): self.channel = args.irc_channel self.nick = args.irc_nick self.port = args.irc_port self.server = args.irc_server reactor.connectTCP(self.server, self.port, self) def buildProtocol(self, addr): self.prot = protocol.ReconnectingClientFactory.buildProtocol(self, addr) return self.prot @passthrough_to_client def normal_start(self, object_path): """ :param self: :param object_path: """ @passthrough_to_client def normal_stop(self, object_path): """ :param self: :param object_path: """ @passthrough_to_client def failure(self, object_path): """ :param self: :param object_path: """ @passthrough_to_client def ongoing_failure(self, object_path): """ :param self: :param object_path: """ @passthrough_to_client def recovery(self, object_path): """ :param self: :param object_path: """ @passthrough_to_client def change_from_unknown(self, object_path): """ :param self: :param object_path: """ obj = IRCNotifierFactory()
4,107
1,250
import urllib from pyrogram import Client, filters from pyrogram.types import (InlineKeyboardButton, InlineKeyboardMarkup, InlineQueryResultArticle, InputTextMessageContent) from config import Config bot = Client( 'shareurl-generator', bot_token = Config.BOT_TOKEN, api_id = Config.API_ID, api_hash = Config.API_HASH ) @bot.on_message(filters.command(['start'])) def start(client, message): rep = f"**Hi {message.from_user.username}**\n\n**Am a bot to convert __text into Shareable telegram link__.**\nWorks on both **in pm and in Inline😊**\n\nClick __/help__ if needed.." message.reply_text( text=rep, quote=False, reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('SOURCE', url='https://github.com/ashkar2001/shareurlbotv1')],[InlineKeyboardButton("Search Here", switch_inline_query_current_chat=""),InlineKeyboardButton("Go Inline", switch_inline_query="")], [InlineKeyboardButton('Share Me', url='https://t.me/share/url?url=%2A%2AHello%20Plox%20%F0%9F%91%8B%2A%2A%0A%0A__I%20just%20found%20a%20Bot%20to%20convert__%20%2A%2AText%20as%20a%20Shareable%20Text%20Link%2A%2A%20__format%20%F0%9F%A4%A9.%20Hope%20it%20would%20be%20very%20helpful%20for%20u%20too...%F0%9F%A4%97%F0%9F%A4%97__%0A%0A%2A%2ABot%20Link%3A%20%40ShareUrlBot%20%F0%9F%A5%B0%2A%2A')]])) @bot.on_message(filters.command(['help'])) def help(client, message): message.reply_text("**Nothing Complicated..🤓**\n\n**For PM:**\n__Send your desired text to this bot to get your link.__\n\n**For Inline Method:**\n__Type__ `@ShareUrlBot your text`\n__in any chats keyboard and hit the inline result.__", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('SOURCE', url='https://github.com/ashkar2001/shareurlbotv1')]])) @bot.on_message(filters.command(['about'])) def about(client, message): message.reply_text(f"""**• Bot Info •** **My Name** :- `Share Url Generator` **Creator** :- @B_woy **Language** :- `Python3` **Library** :- `Pyrogram 1.2.8` **Server** :- `Heroku.com` **Build Status** :- `V 0.2` **• User Info •** **Name** :- `{message.from_user.first_name} {message.from_user.last_name}` **ID** :- `{message.from_user.id}` **Username** :- @{message.from_user.username} **DC ID** :- `{message.from_user.dc_id}`""", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('SOURCE', url = 'https://github.com/ashkar2001/shareurlbotv1')]])) @bot.on_message(filters.text) def shareurl(client, message): query = message.text url = urllib.parse.quote(query) rpl = f"https://t.me/share/url?url={url}" rslt = f"""**Click to CopY ⬇️⬇️** \n\n```{rpl}```""" message.reply_text(text=rslt, reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('Click to Try on This Link ⬆️⬆️', url=f'{rpl}')]])) @bot.on_inline_query() def inline(client, message): query = message.query.lower() if query == "": result= [InlineQueryResultArticle(title = "Help !!", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("Search Here", switch_inline_query_current_chat=""),InlineKeyboardButton("Go Inline", switch_inline_query="")]]), description ="How t0 usE meH !!", thumb_url="https://telegra.ph/file/99d8f16a777c2ee2781c1.jpg", input_message_content = InputTextMessageContent(message_text ="**Nothing Complicated..**🤓\n\nType `@ShareUrlBot your text` \nin any chats keyboard and hit the inline result.\n\nNote: __U can also use Me in PM!__")) ] message.answer(result) return else: url = urllib.parse.quote(query) rpl = f"https://t.me/share/url?url={url}" rslt = f"""**Click to CopY⬇️⬇️** \n\n```{rpl}```""" result = [InlineQueryResultArticle(title = f'{query}', description =f'{rpl}', reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('Click to Try on This linK ⬆️⬆️', url=f'{rpl}')], [InlineKeyboardButton("Search Again", switch_inline_query_current_chat=""),InlineKeyboardButton("Go Inline", switch_inline_query="")]]), input_message_content = InputTextMessageContent(message_text = rslt)) ] message.answer(result) bot.run()
4,221
1,617
import collections import copy import http.client import json import re import sys import urllib.parse import bs4 import requests from geomet import wkt """ Based on conversation and code from https://github.com/openaddresses/machine/issues/580 """ BEACON_HEADERS = { "Content-Type": "application/json", "User-Agent": "OA", } BODY_TEMPLATE = { "layerId": None, "useSelection": False, "ext": {"minx": 0, "miny": 0, "maxx": 40000000, "maxy": 40000000}, "wkt": None, "spatialRelation": 1, "featureLimit": 1, } name_value_pattern = re.compile(r"^(\w+) = (.*)$", re.M) coordinate_pattern = re.compile(r"(?P<x>-?\d+(\.\d+)?)\s+(?P<y>-?\d+(\.\d+)?)") def get_query_url(start_url): """Create a query URL including a dynamically assigned QPS value""" res = requests.get(start_url) soup = bs4.BeautifulSoup(res.text, "html.parser") config_script = soup.find_all("script", attrs={"type": "text/javascript"})[-1] script_content = config_script.contents[0] script_data_str = re.search(r"(?<=\= )\{.*\}(?=;)", script_content).group() script_data = json.loads(script_data_str) return ( "https://beacon.schneidercorp.com/api/beaconCore/GetVectorLayer?QPS=" + script_data["QPS"] ) def get_connection(raw_url): """ Return an HTTPConnection and URL path for a starting Beacon URL. Expects a raw URL similar to: https://beacon.schneidercorp.com/api/beaconCore/GetVectorLayer?QPS=xxxx """ # Safari developer tools sneaks in some zero-width spaces: # http://www.fileformat.info/info/unicode/char/200B/index.htm url = raw_url.replace("\u200b", "") scheme, host, path, _, query, _ = urllib.parse.urlparse(url) layer_path = urllib.parse.urlunparse(("", "", path, None, query, None)) if scheme == "https": return http.client.HTTPSConnection(host), layer_path elif scheme == "http": return http.client.HTTPConnection(host), layer_path def get_starting_bbox(conn, layer_path, layer_id, radius_km=200): """ Retrieves a bounding box tuple for a Beacon layer and radius in km. This is meant to be an overly-large, generous bbox that should encompass any reasonable county or city data source. """ body = copy.deepcopy(BODY_TEMPLATE) body["layerId"] = int(layer_id) conn.request("POST", url=layer_path, body=json.dumps(body), headers=BEACON_HEADERS) resp = conn.getresponse() if resp.status not in range(200, 299): raise RuntimeError("Bad status in get_starting_bbox") results = json.load(resp) wkt = results.get("d", [{}])[0].get("WktGeometry", None) if not wkt: raise RuntimeError("Missing WktGeometry in get_starting_bbox") match = coordinate_pattern.search(wkt) if not match: raise RuntimeError("Unparseable WktGeometry in get_started_bbox") x, y = float(match.group("x")), float(match.group("y")) xmin, ymin = x - radius_km * 1000, y - radius_km * 1000 xmax, ymax = x + radius_km * 1000, y + radius_km * 1000 return xmin, ymin, xmax, ymax def partition_bbox(xmin, ymin, xmax, ymax): """ Cut a bounding box into four smaller bounding boxes. """ xmid, ymid = xmin / 2 + xmax / 2, ymin / 2 + ymax / 2 return [ (xmin, ymin, xmid, ymid), (xmin, ymid, xmid, ymax), (xmid, ymin, xmax, ymid), (xmid, ymid, xmax, ymax), ] def get_features(conn, layer_path, layer_id, bbox, limit=0, depth=0): """ Return a list of features after geographically searching a layer. """ body = copy.deepcopy(BODY_TEMPLATE) body["layerId"], body["featureLimit"] = int(layer_id), limit body["ext"] = dict(minx=bbox[0], miny=bbox[1], maxx=bbox[2], maxy=bbox[3]) conn.request("POST", url=layer_path, body=json.dumps(body), headers=BEACON_HEADERS) resp = conn.getresponse() if resp.status not in range(200, 299): raise RuntimeError("Bad status in get_features") records = json.load(resp).get("d", []) if limit == 0: # This is our first time through and we don't actually know how many # things there are. Assume that the current count is the limit. limit = len(records) if len(records) >= limit: # There are too many records, recurse! # This also happens the first time through before we know anything. bbox1, bbox2, bbox3, bbox4 = partition_bbox(*bbox) return ( get_features(conn, layer_path, layer_id, bbox1, limit, depth + 1) + get_features(conn, layer_path, layer_id, bbox2, limit, depth + 1) + get_features(conn, layer_path, layer_id, bbox3, limit, depth + 1) + get_features(conn, layer_path, layer_id, bbox4, limit, depth + 1) ) # We are good, make some GeoJSON. print(" " * depth, "found", len(records), "in", bbox, file=sys.stderr) return [make_feature(record) for record in records] def extract_properties(record): """ Get a dictionary of GeoJSON feature properties for a record. """ properties = collections.OrderedDict() html1 = record.get("TipHtml", "").replace("\r\n", "\n") html2 = record.get("ResultHtml", "").replace("\r\n", "\n") soup1 = bs4.BeautifulSoup(html1, "html.parser") soup2 = bs4.BeautifulSoup(html2, "html.parser") for text in soup1.find_all(text=name_value_pattern): properties.update({k: v for (k, v) in name_value_pattern.findall(text)}) for text in soup2.find_all(text=name_value_pattern): properties.update({k: v for (k, v) in name_value_pattern.findall(text)}) return properties def extract_geometry(record): """ Get a GeoJSON geometry object for a record. """ prop = extract_properties(record) try: geom = dict(type="Point", coordinates=[float(prop["Long"]), float(prop["Lat"])]) except ValueError: geom = None return geom def make_feature(record): """ Get a complete GeoJSON feature object for a record. """ return dict( type="Feature", id=record.get("Key"), geometry=wkt.loads(record.get("WktGeometry")), properties=extract_properties(record), ) if __name__ == "__main__": _, start_url, layer_id, filename = sys.argv query_url = get_query_url(start_url) conn, layer_path = get_connection(query_url) bbox = get_starting_bbox(conn, layer_path, layer_id) print(bbox, file=sys.stderr) features = get_features(conn, layer_path, layer_id, bbox) geojson = dict(type="FeatureCollection", features=list(features)) if filename == "-": json.dump(geojson, sys.stdout) else: with open(filename, "w") as f: json.dump(geojson, f)
6,745
2,369
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE # Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt """Basic Error checker from the basic checker.""" from __future__ import annotations import itertools from collections.abc import Iterator from typing import Any import astroid from astroid import nodes from pylint.checkers import utils from pylint.checkers.base.basic_checker import _BasicChecker from pylint.checkers.utils import infer_all from pylint.interfaces import HIGH ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+, # List of methods which can be redefined REDEFINABLE_METHODS = frozenset(("__module__",)) TYPING_FORWARD_REF_QNAME = "typing.ForwardRef" def _get_break_loop_node(break_node: nodes.Break) -> nodes.For | nodes.While | None: """Returns the loop node that holds the break node in arguments. Args: break_node (astroid.Break): the break node of interest. Returns: astroid.For or astroid.While: the loop node holding the break node. """ loop_nodes = (nodes.For, nodes.While) parent = break_node.parent while not isinstance(parent, loop_nodes) or break_node in getattr( parent, "orelse", [] ): break_node = parent parent = parent.parent if parent is None: break return parent def _loop_exits_early(loop: nodes.For | nodes.While) -> bool: """Returns true if a loop may end with a break statement. Args: loop (astroid.For, astroid.While): the loop node inspected. Returns: bool: True if the loop may end with a break statement, False otherwise. """ loop_nodes = (nodes.For, nodes.While) definition_nodes = (nodes.FunctionDef, nodes.ClassDef) inner_loop_nodes: list[nodes.For | nodes.While] = [ _node for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes) if _node != loop ] return any( _node for _node in loop.nodes_of_class(nodes.Break, skip_klass=definition_nodes) if _get_break_loop_node(_node) not in inner_loop_nodes ) def _has_abstract_methods(node): """Determine if the given `node` has abstract methods. The methods should be made abstract by decorating them with `abc` decorators. """ return len(utils.unimplemented_abstract_methods(node)) > 0 def redefined_by_decorator(node: nodes.FunctionDef) -> bool: """Return True if the object is a method redefined via decorator. For example: @property def x(self): return self._x @x.setter def x(self, value): self._x = value """ if node.decorators: for decorator in node.decorators.nodes: if ( isinstance(decorator, nodes.Attribute) and getattr(decorator.expr, "name", None) == node.name ): return True return False class BasicErrorChecker(_BasicChecker): msgs = { "E0100": ( "__init__ method is a generator", "init-is-generator", "Used when the special class method __init__ is turned into a " "generator by a yield in its body.", ), "E0101": ( "Explicit return in __init__", "return-in-init", "Used when the special class method __init__ has an explicit " "return value.", ), "E0102": ( "%s already defined line %s", "function-redefined", "Used when a function / class / method is redefined.", ), "E0103": ( "%r not properly in loop", "not-in-loop", "Used when break or continue keywords are used outside a loop.", ), "E0104": ( "Return outside function", "return-outside-function", 'Used when a "return" statement is found outside a function or method.', ), "E0105": ( "Yield outside function", "yield-outside-function", 'Used when a "yield" statement is found outside a function or method.', ), "E0106": ( "Return with argument inside generator", "return-arg-in-generator", 'Used when a "return" statement with an argument is found ' "outside in a generator function or method (e.g. with some " '"yield" statements).', {"maxversion": (3, 3)}, ), "E0107": ( "Use of the non-existent %s operator", "nonexistent-operator", "Used when you attempt to use the C-style pre-increment or " "pre-decrement operator -- and ++, which doesn't exist in Python.", ), "E0108": ( "Duplicate argument name %s in function definition", "duplicate-argument-name", "Duplicate argument names in function definitions are syntax errors.", ), "E0110": ( "Abstract class %r with abstract methods instantiated", "abstract-class-instantiated", "Used when an abstract class with `abc.ABCMeta` as metaclass " "has abstract methods and is instantiated.", ), "W0120": ( "Else clause on loop without a break statement, remove the else and" " de-indent all the code inside it", "useless-else-on-loop", "Loops should only have an else clause if they can exit early " "with a break statement, otherwise the statements under else " "should be on the same scope as the loop itself.", ), "E0112": ( "More than one starred expression in assignment", "too-many-star-expressions", "Emitted when there are more than one starred " "expressions (`*x`) in an assignment. This is a SyntaxError.", ), "E0113": ( "Starred assignment target must be in a list or tuple", "invalid-star-assignment-target", "Emitted when a star expression is used as a starred assignment target.", ), "E0114": ( "Can use starred expression only in assignment target", "star-needs-assignment-target", "Emitted when a star expression is not used in an assignment target.", ), "E0115": ( "Name %r is nonlocal and global", "nonlocal-and-global", "Emitted when a name is both nonlocal and global.", ), "E0116": ( "'continue' not supported inside 'finally' clause", "continue-in-finally", "Emitted when the `continue` keyword is found " "inside a finally clause, which is a SyntaxError.", {"maxversion": (3, 8)}, ), "E0117": ( "nonlocal name %s found without binding", "nonlocal-without-binding", "Emitted when a nonlocal variable does not have an attached " "name somewhere in the parent scopes", ), "E0118": ( "Name %r is used prior to global declaration", "used-prior-global-declaration", "Emitted when a name is used prior a global declaration, " "which results in an error since Python 3.6.", {"minversion": (3, 6)}, ), } @utils.only_required_for_messages("function-redefined") def visit_classdef(self, node: nodes.ClassDef) -> None: self._check_redefinition("class", node) def _too_many_starred_for_tuple(self, assign_tuple: nodes.Tuple) -> bool: starred_count = 0 for elem in assign_tuple.itered(): if isinstance(elem, nodes.Tuple): return self._too_many_starred_for_tuple(elem) if isinstance(elem, nodes.Starred): starred_count += 1 return starred_count > 1 @utils.only_required_for_messages( "too-many-star-expressions", "invalid-star-assignment-target" ) def visit_assign(self, node: nodes.Assign) -> None: # Check *a, *b = ... assign_target = node.targets[0] # Check *a = b if isinstance(node.targets[0], nodes.Starred): self.add_message("invalid-star-assignment-target", node=node) if not isinstance(assign_target, nodes.Tuple): return if self._too_many_starred_for_tuple(assign_target): self.add_message("too-many-star-expressions", node=node) @utils.only_required_for_messages("star-needs-assignment-target") def visit_starred(self, node: nodes.Starred) -> None: """Check that a Starred expression is used in an assignment target.""" if isinstance(node.parent, nodes.Call): # f(*args) is converted to Call(args=[Starred]), so ignore # them for this check. return if isinstance(node.parent, (nodes.List, nodes.Tuple, nodes.Set, nodes.Dict)): # PEP 448 unpacking. return stmt = node.statement(future=True) if not isinstance(stmt, nodes.Assign): return if stmt.value is node or stmt.value.parent_of(node): self.add_message("star-needs-assignment-target", node=node) @utils.only_required_for_messages( "init-is-generator", "return-in-init", "function-redefined", "return-arg-in-generator", "duplicate-argument-name", "nonlocal-and-global", "used-prior-global-declaration", ) def visit_functiondef(self, node: nodes.FunctionDef) -> None: self._check_nonlocal_and_global(node) self._check_name_used_prior_global(node) if not redefined_by_decorator( node ) and not utils.is_registered_in_singledispatch_function(node): self._check_redefinition(node.is_method() and "method" or "function", node) # checks for max returns, branch, return in __init__ returns = node.nodes_of_class( nodes.Return, skip_klass=(nodes.FunctionDef, nodes.ClassDef) ) if node.is_method() and node.name == "__init__": if node.is_generator(): self.add_message("init-is-generator", node=node) else: values = [r.value for r in returns] # Are we returning anything but None from constructors if any(v for v in values if not utils.is_none(v)): self.add_message("return-in-init", node=node) # Check for duplicate names by clustering args with same name for detailed report arg_clusters = {} arguments: Iterator[Any] = filter(None, [node.args.args, node.args.kwonlyargs]) for arg in itertools.chain.from_iterable(arguments): if arg.name in arg_clusters: self.add_message( "duplicate-argument-name", node=arg, args=(arg.name,), confidence=HIGH, ) else: arg_clusters[arg.name] = arg visit_asyncfunctiondef = visit_functiondef def _check_name_used_prior_global(self, node: nodes.FunctionDef) -> None: scope_globals = { name: child for child in node.nodes_of_class(nodes.Global) for name in child.names if child.scope() is node } if not scope_globals: return for node_name in node.nodes_of_class(nodes.Name): if node_name.scope() is not node: continue name = node_name.name corresponding_global = scope_globals.get(name) if not corresponding_global: continue global_lineno = corresponding_global.fromlineno if global_lineno and global_lineno > node_name.fromlineno: self.add_message( "used-prior-global-declaration", node=node_name, args=(name,) ) def _check_nonlocal_and_global(self, node: nodes.FunctionDef) -> None: """Check that a name is both nonlocal and global.""" def same_scope(current: nodes.Global | nodes.Nonlocal) -> bool: return current.scope() is node from_iter = itertools.chain.from_iterable nonlocals = set( from_iter( child.names for child in node.nodes_of_class(nodes.Nonlocal) if same_scope(child) ) ) if not nonlocals: return global_vars = set( from_iter( child.names for child in node.nodes_of_class(nodes.Global) if same_scope(child) ) ) for name in nonlocals.intersection(global_vars): self.add_message("nonlocal-and-global", args=(name,), node=node) @utils.only_required_for_messages("return-outside-function") def visit_return(self, node: nodes.Return) -> None: if not isinstance(node.frame(future=True), nodes.FunctionDef): self.add_message("return-outside-function", node=node) @utils.only_required_for_messages("yield-outside-function") def visit_yield(self, node: nodes.Yield) -> None: self._check_yield_outside_func(node) @utils.only_required_for_messages("yield-outside-function") def visit_yieldfrom(self, node: nodes.YieldFrom) -> None: self._check_yield_outside_func(node) @utils.only_required_for_messages("not-in-loop", "continue-in-finally") def visit_continue(self, node: nodes.Continue) -> None: self._check_in_loop(node, "continue") @utils.only_required_for_messages("not-in-loop") def visit_break(self, node: nodes.Break) -> None: self._check_in_loop(node, "break") @utils.only_required_for_messages("useless-else-on-loop") def visit_for(self, node: nodes.For) -> None: self._check_else_on_loop(node) @utils.only_required_for_messages("useless-else-on-loop") def visit_while(self, node: nodes.While) -> None: self._check_else_on_loop(node) @utils.only_required_for_messages("nonexistent-operator") def visit_unaryop(self, node: nodes.UnaryOp) -> None: """Check use of the non-existent ++ and -- operators.""" if ( (node.op in "+-") and isinstance(node.operand, nodes.UnaryOp) and (node.operand.op == node.op) and (node.col_offset + 1 == node.operand.col_offset) ): self.add_message("nonexistent-operator", node=node, args=node.op * 2) def _check_nonlocal_without_binding(self, node: nodes.Nonlocal, name: str) -> None: current_scope = node.scope() while True: if current_scope.parent is None: break if not isinstance(current_scope, (nodes.ClassDef, nodes.FunctionDef)): self.add_message("nonlocal-without-binding", args=(name,), node=node) return if name not in current_scope.locals: current_scope = current_scope.parent.scope() continue # Okay, found it. return if not isinstance(current_scope, nodes.FunctionDef): self.add_message("nonlocal-without-binding", args=(name,), node=node) @utils.only_required_for_messages("nonlocal-without-binding") def visit_nonlocal(self, node: nodes.Nonlocal) -> None: for name in node.names: self._check_nonlocal_without_binding(node, name) @utils.only_required_for_messages("abstract-class-instantiated") def visit_call(self, node: nodes.Call) -> None: """Check instantiating abstract class with abc.ABCMeta as metaclass. """ for inferred in infer_all(node.func): self._check_inferred_class_is_abstract(inferred, node) def _check_inferred_class_is_abstract(self, inferred, node: nodes.Call): if not isinstance(inferred, nodes.ClassDef): return klass = utils.node_frame_class(node) if klass is inferred: # Don't emit the warning if the class is instantiated # in its own body or if the call is not an instance # creation. If the class is instantiated into its own # body, we're expecting that it knows what it is doing. return # __init__ was called abstract_methods = _has_abstract_methods(inferred) if not abstract_methods: return metaclass = inferred.metaclass() if metaclass is None: # Python 3.4 has `abc.ABC`, which won't be detected # by ClassNode.metaclass() for ancestor in inferred.ancestors(): if ancestor.qname() == "abc.ABC": self.add_message( "abstract-class-instantiated", args=(inferred.name,), node=node ) break return if metaclass.qname() in ABC_METACLASSES: self.add_message( "abstract-class-instantiated", args=(inferred.name,), node=node ) def _check_yield_outside_func(self, node: nodes.Yield) -> None: if not isinstance(node.frame(future=True), (nodes.FunctionDef, nodes.Lambda)): self.add_message("yield-outside-function", node=node) def _check_else_on_loop(self, node: nodes.For | nodes.While) -> None: """Check that any loop with an else clause has a break statement.""" if node.orelse and not _loop_exits_early(node): self.add_message( "useless-else-on-loop", node=node, # This is not optimal, but the line previous # to the first statement in the else clause # will usually be the one that contains the else:. line=node.orelse[0].lineno - 1, ) def _check_in_loop( self, node: nodes.Continue | nodes.Break, node_name: str ) -> None: """Check that a node is inside a for or while loop.""" for parent in node.node_ancestors(): if isinstance(parent, (nodes.For, nodes.While)): if node not in parent.orelse: return if isinstance(parent, (nodes.ClassDef, nodes.FunctionDef)): break if ( isinstance(parent, nodes.TryFinally) and node in parent.finalbody and isinstance(node, nodes.Continue) ): self.add_message("continue-in-finally", node=node) self.add_message("not-in-loop", node=node, args=node_name) def _check_redefinition( self, redeftype: str, node: nodes.Call | nodes.FunctionDef ) -> None: """Check for redefinition of a function / method / class name.""" parent_frame = node.parent.frame(future=True) # Ignore function stubs created for type information redefinitions = [ i for i in parent_frame.locals[node.name] if not (isinstance(i.parent, nodes.AnnAssign) and i.parent.simple) ] defined_self = next( (local for local in redefinitions if not utils.is_overload_stub(local)), node, ) if defined_self is not node and not astroid.are_exclusive(node, defined_self): # Additional checks for methods which are not considered # redefined, since they are already part of the base API. if ( isinstance(parent_frame, nodes.ClassDef) and node.name in REDEFINABLE_METHODS ): return # Skip typing.overload() functions. if utils.is_overload_stub(node): return # Exempt functions redefined on a condition. if isinstance(node.parent, nodes.If): # Exempt "if not <func>" cases if ( isinstance(node.parent.test, nodes.UnaryOp) and node.parent.test.op == "not" and isinstance(node.parent.test.operand, nodes.Name) and node.parent.test.operand.name == node.name ): return # Exempt "if <func> is not None" cases # pylint: disable=too-many-boolean-expressions if ( isinstance(node.parent.test, nodes.Compare) and isinstance(node.parent.test.left, nodes.Name) and node.parent.test.left.name == node.name and node.parent.test.ops[0][0] == "is" and isinstance(node.parent.test.ops[0][1], nodes.Const) and node.parent.test.ops[0][1].value is None ): return # Check if we have forward references for this node. try: redefinition_index = redefinitions.index(node) except ValueError: pass else: for redefinition in redefinitions[:redefinition_index]: inferred = utils.safe_infer(redefinition) if ( inferred and isinstance(inferred, astroid.Instance) and inferred.qname() == TYPING_FORWARD_REF_QNAME ): return dummy_variables_rgx = self.linter.config.dummy_variables_rgx if dummy_variables_rgx and dummy_variables_rgx.match(node.name): return self.add_message( "function-redefined", node=node, args=(redeftype, defined_self.fromlineno), )
22,033
6,273
class Solution: result = [] def backtrack(self, current, nums): if current == len(nums): self.result.append(nums[:]) return for i in range(current, len(nums)): nums[i], nums[current] = nums[current], nums[i] self.backtrack(current + 1, nums) nums[i], nums[current] = nums[current], nums[i] def permute(self, nums: List[int]) -> List[List[int]]: self.result = [] self.backtrack(0, nums) return self.result
526
176
class card: """the QR card object. """ qr_path = '' public_id = '' def __init__(self, qr_path, public_id): self.qr_path = qr_path self.public_id = public_id
194
74
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: dan@reciprocitylabs.com # Maintained By: dan@reciprocitylabs.com """Person.email must be unique Revision ID: 2bf7c04016c9 Revises: d3af6d071ef Create Date: 2013-07-24 02:16:06.282258 """ # revision identifiers, used by Alembic. revision = '2bf7c04016c9' down_revision = 'd3af6d071ef' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_unique_constraint('uq_people_email', 'people', ['email']) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_constraint('uq_people_email', 'people', type_='unique') ### end Alembic commands ###
915
353
import os import sys import string import yaml from argparse import ArgumentParser import time # default setting traces = {} realblobfiles = {} limitamount = 1500 # 24 32 48 64 warmupthreads = 1 # number of total clients hotratio = 0.25 #nondedupreplicas = 2 replicalevel = 1 wait = True dir = '/home/nannan/docker-performance/' layerfiledir = '/home/nannan/dockerimages/layers/hulk1' #realblobfiledir = '' hulk1layerfiledir = '/home/nannan/dockerimages/layers' def createclientinfo(trace, realload): load = [] if realload == True: load = [os.path.join(layerfiledir, trace+'_layers.lst')] else: load = [os.path.join(layerfiledir, 'pri_'+trace+'.lst')] #load = [os.path.join(layerfiledir, realblobfiles[trace])] client_info = { "threads": 1, "realblobs": load,# } return client_info # client_info = { # "threads": 1, # "realblobs": [os.path.join(layerfiledir, trace+'_layers.lst')], # # } # return client_info def createtrace(traces, limit): trace = { "location": "/home/nannan/dockerimages/docker-traces/data_centers", "traces": traces, "limit": { "amount": limit, }, "output": "results.json", } return trace def createwarmup(threads): warmup = { "output": "warmup_output.json", "threads": threads, } return warmup def createtestmode(testmode): nodedup = False sift = False restore = False primary = False if testmode == "nodedup": #nodedup = True primary = True if testmode == "sift": sift = True if testmode == "restore": restore = True if testmode == "primary": #nodedup = True primary = True testmode = { "nodedup": nodedup, "sift": sift, "restore": restore, "primary": primary, } return testmode def createsiftparams(mode, hotratio, nondedupreplicas): siftparams = { "mode": mode, "selective":{ "hotratio": hotratio, }, "standard":{ "nondedupreplicas": nondedupreplicas, }, } return siftparams def createsimulate(wait, accelerater, replicalevel): simulate = { "wait": wait, "accelerater": accelerater, "replicalevel": replicalevel, } return simulate """ dal_layers.lst fra_layers.lst prestage_layers.lst syd_layers.lst dev_layers.lst lon_layers.lst stage_layers.lst testing_layers.lst """ def main(): #first add traces realblobfiles['1mb'] = 'hulk_layers_approx_1MB.lst' realblobfiles['5mb'] = 'hulk_layers_approx_5MB.lst' realblobfiles['10mb'] = 'hulk_layers_approx_10MB.lst' realblobfiles['15mb'] = 'hulk_layers_approx_15MB.lst' realblobfiles['20mb'] = 'hulk_layers_approx_20MB.lst' realblobfiles['25mb'] = 'hulk_layers_approx_25MB.lst' realblobfiles['30mb'] = 'hulk_layers_approx_30MB.lst' realblobfiles['35mb'] = 'hulk_layers_approx_35MB.lst' realblobfiles['40mb'] = 'hulk_layers_approx_40MB.lst' realblobfiles['45mb'] = 'hulk_layers_approx_45MB.lst' realblobfiles['50mb'] = 'hulk_layers_approx_50MB.lst' traces["dal"] = ["dal09/prod-dal09-logstash-2017.06.20-0.json"] traces["dev"] = ["dev-mon01/dev-mon01-logstash-2017.07.13-0.json", "dev-mon01/dev-mon01-logstash-2017.07.13-1.json", "dev-mon01/dev-mon01-logstash-2017.07.13-2.json", "dev-mon01/dev-mon01-logstash-2017.07.13-3.json", "dev-mon01/dev-mon01-logstash-2017.07.14-0.json", "dev-mon01/dev-mon01-logstash-2017.07.14-1.json", "dev-mon01/dev-mon01-logstash-2017.07.14-2.json", "dev-mon01/dev-mon01-logstash-2017.07.14-3.json", "dev-mon01/dev-mon01-logstash-2017.07.15-0.json", "dev-mon01/dev-mon01-logstash-2017.07.15-1.json"] traces["fra"] = ["fra02/prod-fra02-logstash-2017.06.20-0.json", "fra02/prod-fra02-logstash-2017.06.20-1.json", "fra02/prod-fra02-logstash-2017.06.20-3.json", "fra02/prod-fra02-logstash-2017.06.21-0.json", "fra02/prod-fra02-logstash-2017.06.21-1.json", "fra02/prod-fra02-logstash-2017.06.21-2.json", "fra02/prod-fra02-logstash-2017.06.21-3.json", "fra02/prod-fra02-logstash-2017.06.22-0.json", "fra02/prod-fra02-logstash-2017.06.22-1.json", "fra02/prod-fra02-logstash-2017.06.22-2.json", "fra02/prod-fra02-logstash-2017.06.22-3.json"] traces["lon"] = ["lon02/prod-lon02-logstash-2017.06.20-0.json"] traces["prestage"] = ["prestage-mon01/prestage-mon01-logstash-2017.07.03-0.json", "prestage-mon01/prestage-mon01-logstash-2017.07.03-1.json", "prestage-mon01/prestage-mon01-logstash-2017.07.03-2.json", "prestage-mon01/prestage-mon01-logstash-2017.07.03-3.json", "prestage-mon01/prestage-mon01-logstash-2017.07.04-0.json", "prestage-mon01/prestage-mon01-logstash-2017.07.04-1.json", "prestage-mon01/prestage-mon01-logstash-2017.07.04-2.json", "prestage-mon01/prestage-mon01-logstash-2017.07.04-3.json"] traces["stage"] = ["stage-dal09/stage-dal09-logstash-2017.06.27-0.json"] traces["syd"] = ["syd01/prod-syd01-logstash-2017.07.01-0.json", "syd01/prod-syd01-logstash-2017.07.01-1.json", "syd01/prod-syd01-logstash-2017.07.01-2.json", "syd01/prod-syd01-logstash-2017.07.01-3.json", "syd01/prod-syd01-logstash-2017.07.02-0.json", "syd01/prod-syd01-logstash-2017.07.02-1.json", "syd01/prod-syd01-logstash-2017.07.02-2.json", "syd01/prod-syd01-logstash-2017.07.02-3.json"] registries=["192.168.0.200:5000", #"192.168.0.201:5000", #"192.168.0.202:5000", #"192.168.0.203:5000", #"192.168.0.204:5000", #"192.168.0.205:5000", #"192.168.0.208:5000", #"192.168.0.209:5000", #"192.168.0.210:5000", #"192.168.0.211:5000", #"192.168.0.212:5000", #"192.168.0.213:5000", #"192.168.0.214:5000", #"192.168.0.215:5000", #"192.168.0.216:5000", #"192.168.0.217:5000", #"192.168.0.218:5000", #"192.168.0.219:5000", #"192.168.0.221:5000", #"192.168.0.222:5000", #"192.168.0.223:5000" ] clients_amaranths = ["192.168.0.151", "192.168.0.153", "192.168.0.154", "192.168.0.156"] clients = ["192.168.0.220"] clients_hulks = ["192.168.0.170", "192.168.0.171", "192.168.0.172", "192.168.0.174", "192.168.0.176", "192.168.0.177", "192.168.0.179", "192.168.0.180"] parser = ArgumentParser(description='Trace Player, allows for anonymized traces to be replayed to a registry, or for caching and prefecting simulations.') parser.add_argument('-r', '--realblobfiles', dest='realblobfiles', type=str, required=True, help = 'input realblob files: 50m or 1gb') parser.add_argument('-t', '--tracefiles', dest='tracefiles', type=str, required=True, help = 'input trace file: dal, dev, fra, prestage, or syd, lon') parser.add_argument('-m', '--testmode', dest='testmode', type=str, required=True, help = 'input test mode: nodedup, sift, restore') parser.add_argument('-s', '--siftmode', dest='siftmode', type=str, required=True, help = 'input sift mode: standard, selective') parser.add_argument('-a', '--accelerater', dest='accelerater', type=int, required=True, help = 'input accelerater: int') parser.add_argument('-n', '--numofdedupregistries', dest='numofdedupregistries', type=int, required=True, help = 'input numofdedupregistries: int') parser.add_argument('-c', '--numofclients', dest='numofclients', type=int, required=True, help = 'input numofclients: int') parser.add_argument('-p', '--nondedupreplicas', dest='nondedupreplicas', type=int, required=True, help = 'input nondedupreplicas: int') args = parser.parse_args() print args # client_info = createclientinfo(args.tracefiles) if args.realblobfiles == 0: client_info = createclientinfo(args.tracefiles, True) else: client_info = createclientinfo(args.realblobfiles, False) testingtrace = createtrace(traces[args.tracefiles], limitamount) primaryregistry=[] dedupregistry=[] testingclients = clients[:args.numofclients] warmup = createwarmup(warmupthreads) """ nodedup: original registry; primary: b-mode 3 restore: b-mode 0 sift: standard: b-mode 2 or b-mode 1 selective: """ if args.testmode == "nodedup" or args.testmode == "primary": primaryregistry = registries #registries(:,len(registries)-args.numofdedupregistries) elif args.testmode == "sift": dedupregistry = registries[:args.numofdedupregistries] primaryregistry = registries[-(len(registries)-args.numofdedupregistries):] elif args.testmode == "restore": dedupregistry = registries[:args.numofdedupregistries] # elif args.testmode == "primary": # dedupregistry = registries[:args.numofdedupregistries] # primaryregistry = registries[-(len(registries)-args.numofdedupregistries):] testingmode = createtestmode(args.testmode) testingsiftmode = createsiftparams(args.siftmode, hotratio, args.nondedupreplicas) simulate = createsimulate(wait, args.accelerater, replicalevel) config = { "client_info": client_info, "trace": testingtrace, "primaryregistry": primaryregistry, "dedupregistry": dedupregistry, "clients": testingclients, "warmup": warmup, "testmode": testingmode, "siftparams": testingsiftmode, "simulate": simulate, } print config with open(os.path.join(dir, "config.yaml"), 'w') as fp: yaml.dump(config, fp, default_flow_style=False) with open(os.path.join(dir, "run/clients.txt"), 'w') as fp: for i in testingclients: fp.write(i+':22\n') with open(os.path.join(dir, "run/dedupregistries.txt"), 'w') as fp: for i in dedupregistry: tmp = i.split(':')[0] fp.write(tmp+':22\n') with open(os.path.join(dir, "run/primaryregistries.txt"), 'w') as fp: for i in primaryregistry: tmp = i.split(':')[0] fp.write(tmp+':22\n') if __name__ == "__main__": main()
11,484
4,662
""" Train and eval functions used in main.py """ import os import torch from torch.utils.data import DataLoader, DistributedSampler import math import sys import time import datetime from typing import Iterable from pathlib import Path import json import random import numpy as np import torch import wandb from dataset.evaluator import SmoothedValue, MetricLogger from model.detr import build_model from dataset.construction_dataset import build_dataset from dataset.evaluator import collate_fn, evaluate, save_on_master seed = 42 torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.deterministic = True torch.use_deterministic_algorithms(False) # missing some deterministic impl device = torch.device("cuda:0") class Args: pass args = Args() # Postitional encoding args.position_embedding = "sine" # CNN Backbone args.backbone = "resnet50" args.dilation = None # Hungarian matcher args.set_cost_class = 1 args.set_cost_bbox = 5 args.set_cost_giou = 2 # Transformer args.hidden_dim = 256 args.dropout = 0.1 args.nheads = 8 args.dim_feedforward = 2048 args.enc_layers = 6 args.dec_layers = 6 args.pre_norm = None # DETR args.num_queries = 100 args.aux_loss = True # calculate loss at eache decoder layer args.masks = True args.frozen_weights = None args.bbox_loss_coef = 5 args.mask_loss_coef = 1 args.dice_loss_coef = 1 args.giou_loss_coef = 2 args.eos_coef = 0.1 # Dataset args.dataset_file = "coco_panoptic" # construction args.coco_path = "./data" args.coco_panoptic_path = "./data" # Training args.lr = 1e-4 args.weight_decay = 1e-4 args.lr_backbone = 0 # 0 means frozen backbone args.batch_size = 3 args.epochs = 2 args.lr_drop = 200 args.clip_max_norm = 0.1 args.output_dir = "out_dir" args.eval = False # !mkdir out_dir/panoptic_eval -p try: os.mkdir("out_dir/panoptic_eval") except Exception as e: pass # set if you plan to log on wandb ENABLE_WANDB = True # if set not train from scratch (detre pretrained on COCO) used_artifact = None # "2_2_attentionfreeze_aux:latest" # set if starting a new run wandb_experiment_name = "2_2_1_transf_unfreeze_aux" # set to None if starting a new run run_id = None if ENABLE_WANDB: import wandb if run_id is not None: wandb.init(project="detr", id=run_id, resume="allow") else: wandb.init(project="detr", name=wandb_experiment_name) wandb.config.position_embedding = args.position_embedding wandb.config.backbone = args.backbone wandb.config.dilation = args.dilation wandb.config.set_cost_class = args.set_cost_class wandb.config.set_cost_bbox = args.set_cost_bbox wandb.config.set_cost_giou = args.set_cost_giou wandb.config.hidden_dim = args.hidden_dim wandb.config.dropout = args.dropout wandb.config.nheads = args.nheads wandb.config.dim_feedforward = args.dim_feedforward wandb.config.enc_layers = args.enc_layers wandb.config.dec_layers = args.dec_layers wandb.config.pre_norm = args.pre_norm wandb.config.num_queries = args.num_queries wandb.config.aux_loss = args.aux_loss wandb.config.masks = args.masks wandb.config.frozen_weights = args.frozen_weights wandb.config.bbox_loss_coef = args.bbox_loss_coef wandb.config.mask_loss_coef = args.mask_loss_coef wandb.config.dice_loss_coef = args.dice_loss_coef wandb.config.giou_loss_coef = args.giou_loss_coef wandb.config.eos_coef = args.eos_coef wandb.config.lr = args.lr wandb.config.weight_decay = args.weight_decay wandb.config.lr_backbone = args.lr_backbone wandb.config.batch_size = args.batch_size wandb.config.epochs = args.epochs wandb.config.lr_drop = args.lr_drop wandb.config.clip_max_norm = args.clip_max_norm def freeze_attn(model, args): for i in range(args.dec_layers): for param in model.detr.transformer.decoder.layers[i].self_attn.parameters(): param.requires_grad = False for param in model.detr.transformer.decoder.layers[ i ].multihead_attn.parameters(): param.requires_grad = False for i in range(args.enc_layers): for param in model.detr.transformer.encoder.layers[i].self_attn.parameters(): param.requires_grad = False def freeze_decoder(model, args): for param in model.detr.transformer.decoder.parameters(): param.requires_grad = False def freeze_first_layers(model, args): for i in range(args.enc_layers // 2): for param in model.detr.transformer.encoder.layers[i].parameters(): param.requires_grad = False for i in range(args.dec_layers // 2): for param in model.detr.transformer.decoder.layers[i].parameters(): param.requires_grad = False def build_pretrained_model(args): pre_trained = torch.hub.load( "facebookresearch/detr", "detr_resnet50_panoptic", pretrained=True, return_postprocessor=False, num_classes=250, ) model, criterion, postprocessors = build_model(args) model.detr.backbone.load_state_dict(pre_trained.detr.backbone.state_dict()) model.detr.bbox_embed.load_state_dict(pre_trained.detr.bbox_embed.state_dict()) model.detr.query_embed.load_state_dict(pre_trained.detr.query_embed.state_dict()) model.detr.input_proj.load_state_dict(pre_trained.detr.input_proj.state_dict()) model.detr.transformer.load_state_dict(pre_trained.detr.transformer.state_dict()) model.bbox_attention.load_state_dict(pre_trained.bbox_attention.state_dict()) model.mask_head.load_state_dict(pre_trained.mask_head.state_dict()) freeze_attn(model, args) return model, criterion, postprocessors def train_one_epoch( model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, max_norm: float = 0, ): model.train() criterion.train() metric_logger = MetricLogger(delimiter=" ") metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}")) metric_logger.add_meter( "class_error", SmoothedValue(window_size=1, fmt="{value:.2f}") ) header = "Epoch: [{}]".format(epoch) print_freq = 10 for samples, targets in metric_logger.log_every(data_loader, print_freq, header): samples = samples.to(device) targets = [{k: v.to(device) for k, v in t.items()} for t in targets] outputs = model(samples) loss_dict = criterion(outputs, targets) weight_dict = criterion.weight_dict losses = sum( loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict ) # reduce losses over all GPUs for logging purposes loss_dict_reduced = loss_dict loss_dict_reduced_unscaled = { f"{k}_unscaled": v for k, v in loss_dict_reduced.items() } loss_dict_reduced_scaled = { k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict } losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) loss_value = losses_reduced_scaled.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value)) print(loss_dict_reduced) sys.exit(1) optimizer.zero_grad() losses.backward() if max_norm > 0: torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) optimizer.step() metric_logger.update( loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled ) metric_logger.update(class_error=loss_dict_reduced["class_error"]) metric_logger.update(lr=optimizer.param_groups[0]["lr"]) if ENABLE_WANDB: wandb.log(loss_dict_reduced) wandb.log({"loss": loss_value}) metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} def train(): if args.frozen_weights is not None: assert args.masks, "Frozen training is meant for segmentation only" model, criterion, postprocessors = build_pretrained_model(args) model.to(device) if ENABLE_WANDB: wandb.watch(model) model_without_ddp = model n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print("number of params:", n_parameters) param_dicts = [ { "params": [ p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad ] }, { "params": [ p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad ], "lr": args.lr_backbone, }, ] optimizer = torch.optim.AdamW( param_dicts, lr=args.lr, weight_decay=args.weight_decay ) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop) if ENABLE_WANDB and used_artifact is not None: artifact = wandb.use_artifact(used_artifact) artifact_dir = artifact.download() checkpoint = torch.load(artifact_dir + "/checkpoint.pth") model.load_state_dict(checkpoint["model"]) if run_id is not None: optimizer.load_state_dict(checkpoint["optimizer"]) # lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) start_epoch = checkpoint["epoch"] else: start_epoch = 0 dataset_train = build_dataset(image_set="train", args=args) dataset_val = build_dataset(image_set="val", args=args) sampler_train = torch.utils.data.RandomSampler(dataset_train) sampler_val = torch.utils.data.SequentialSampler(dataset_val) batch_sampler_train = torch.utils.data.BatchSampler( sampler_train, args.batch_size, drop_last=True ) data_loader_train = DataLoader( dataset_train, batch_sampler=batch_sampler_train, collate_fn=collate_fn, num_workers=4, ) data_loader_val = DataLoader( dataset_val, args.batch_size, sampler=sampler_val, drop_last=False, collate_fn=collate_fn, num_workers=4, ) if args.frozen_weights is not None: checkpoint = torch.load(args.frozen_weights, map_location="cpu") model_without_ddp.detr.load_state_dict(checkpoint["model"]) output_dir = Path(args.output_dir) if args.eval: test_stats = evaluate( model, criterion, postprocessors, data_loader_val, device, args.output_dir ) print(test_stats) return print("Start training") start_time = time.time() for epoch in range(start_epoch + 1, args.epochs): train_stats = train_one_epoch( model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm, ) lr_scheduler.step() if args.output_dir: checkpoint_path = output_dir / "checkpoint.pth" save_on_master( { "model": model_without_ddp.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch, "args": args, }, checkpoint_path, ) if ENABLE_WANDB: artifact = wandb.Artifact(wandb_experiment_name, type="model") artifact.add_file(checkpoint_path) wandb.log_artifact(artifact) test_stats = evaluate( model, criterion, postprocessors, data_loader_val, device, args.output_dir ) log_stats = { **{f"train_{k}": v for k, v in train_stats.items()}, **{f"test_{k}": v for k, v in test_stats.items()}, "epoch": epoch, "n_parameters": n_parameters, } if ENABLE_WANDB: wandb.log(test_stats) if args.output_dir: with (output_dir / "log.txt").open("a") as f: f.write(json.dumps(log_stats) + "\n") total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print("Training time {}".format(total_time_str))
12,539
4,312
# Generated by Django 2.0 on 2017-12-19 19:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='category', options={'verbose_name_plural': 'Categories'}, ), migrations.AlterField( model_name='post', name='date', field=models.DateTimeField(auto_now_add=True), ), ]
511
163
import pytest from awx.api.versioning import reverse from awx.main.models.mixins import WebhookTemplateMixin from awx.main.models.credential import Credential, CredentialType @pytest.mark.django_db @pytest.mark.parametrize( "user_role, expect", [ ('superuser', 200), ('org admin', 200), ('jt admin', 200), ('jt execute', 403), ('org member', 403), ] ) def test_get_webhook_key_jt(organization_factory, job_template_factory, get, user_role, expect): objs = organization_factory("org", superusers=['admin'], users=['user']) jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template if user_role == 'superuser': user = objs.superusers.admin else: user = objs.users.user grant_obj = objs.organization if user_role.startswith('org') else jt getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user) url = reverse('api:webhook_key', kwargs={'model_kwarg': 'job_templates', 'pk': jt.pk}) response = get(url, user=user, expect=expect) if expect < 400: assert response.data == {'webhook_key': ''} @pytest.mark.django_db @pytest.mark.parametrize( "user_role, expect", [ ('superuser', 200), ('org admin', 200), ('jt admin', 200), ('jt execute', 403), ('org member', 403), ] ) def test_get_webhook_key_wfjt(organization_factory, workflow_job_template_factory, get, user_role, expect): objs = organization_factory("org", superusers=['admin'], users=['user']) wfjt = workflow_job_template_factory("wfjt", organization=objs.organization).workflow_job_template if user_role == 'superuser': user = objs.superusers.admin else: user = objs.users.user grant_obj = objs.organization if user_role.startswith('org') else wfjt getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user) url = reverse('api:webhook_key', kwargs={'model_kwarg': 'workflow_job_templates', 'pk': wfjt.pk}) response = get(url, user=user, expect=expect) if expect < 400: assert response.data == {'webhook_key': ''} @pytest.mark.django_db @pytest.mark.parametrize( "user_role, expect", [ ('superuser', 201), ('org admin', 201), ('jt admin', 201), ('jt execute', 403), ('org member', 403), ] ) def test_post_webhook_key_jt(organization_factory, job_template_factory, post, user_role, expect): objs = organization_factory("org", superusers=['admin'], users=['user']) jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template if user_role == 'superuser': user = objs.superusers.admin else: user = objs.users.user grant_obj = objs.organization if user_role.startswith('org') else jt getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user) url = reverse('api:webhook_key', kwargs={'model_kwarg': 'job_templates', 'pk': jt.pk}) response = post(url, {}, user=user, expect=expect) if expect < 400: assert bool(response.data.get('webhook_key')) @pytest.mark.django_db @pytest.mark.parametrize( "user_role, expect", [ ('superuser', 201), ('org admin', 201), ('jt admin', 201), ('jt execute', 403), ('org member', 403), ] ) def test_post_webhook_key_wfjt(organization_factory, workflow_job_template_factory, post, user_role, expect): objs = organization_factory("org", superusers=['admin'], users=['user']) wfjt = workflow_job_template_factory("wfjt", organization=objs.organization).workflow_job_template if user_role == 'superuser': user = objs.superusers.admin else: user = objs.users.user grant_obj = objs.organization if user_role.startswith('org') else wfjt getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user) url = reverse('api:webhook_key', kwargs={'model_kwarg': 'workflow_job_templates', 'pk': wfjt.pk}) response = post(url, {}, user=user, expect=expect) if expect < 400: assert bool(response.data.get('webhook_key')) @pytest.mark.django_db @pytest.mark.parametrize( "service", [s for s, _ in WebhookTemplateMixin.SERVICES] ) def test_set_webhook_service(organization_factory, job_template_factory, patch, service): objs = organization_factory("org", superusers=['admin']) jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template admin = objs.superusers.admin assert (jt.webhook_service, jt.webhook_key) == ('', '') url = reverse('api:job_template_detail', kwargs={'pk': jt.pk}) patch(url, {'webhook_service': service}, user=admin, expect=200) jt.refresh_from_db() assert jt.webhook_service == service assert jt.webhook_key != '' @pytest.mark.django_db @pytest.mark.parametrize( "service", [s for s, _ in WebhookTemplateMixin.SERVICES] ) def test_unset_webhook_service(organization_factory, job_template_factory, patch, service): objs = organization_factory("org", superusers=['admin']) jt = job_template_factory("jt", organization=objs.organization, webhook_service=service, inventory='test_inv', project='test_proj').job_template admin = objs.superusers.admin assert jt.webhook_service == service assert jt.webhook_key != '' url = reverse('api:job_template_detail', kwargs={'pk': jt.pk}) patch(url, {'webhook_service': ''}, user=admin, expect=200) jt.refresh_from_db() assert (jt.webhook_service, jt.webhook_key) == ('', '') @pytest.mark.django_db @pytest.mark.parametrize( "service", [s for s, _ in WebhookTemplateMixin.SERVICES] ) def test_set_webhook_credential(organization_factory, job_template_factory, patch, service): objs = organization_factory("org", superusers=['admin']) jt = job_template_factory("jt", organization=objs.organization, webhook_service=service, inventory='test_inv', project='test_proj').job_template admin = objs.superusers.admin assert jt.webhook_service == service assert jt.webhook_key != '' cred_type = CredentialType.defaults['{}_token'.format(service)]() cred_type.save() cred = Credential.objects.create(credential_type=cred_type, name='test-cred', inputs={'token': 'secret'}) url = reverse('api:job_template_detail', kwargs={'pk': jt.pk}) patch(url, {'webhook_credential': cred.pk}, user=admin, expect=200) jt.refresh_from_db() assert jt.webhook_service == service assert jt.webhook_key != '' assert jt.webhook_credential == cred @pytest.mark.django_db @pytest.mark.parametrize( "service,token", [ (s, WebhookTemplateMixin.SERVICES[i - 1][0]) for i, (s, _) in enumerate(WebhookTemplateMixin.SERVICES) ] ) def test_set_wrong_service_webhook_credential(organization_factory, job_template_factory, patch, service, token): objs = organization_factory("org", superusers=['admin']) jt = job_template_factory("jt", organization=objs.organization, webhook_service=service, inventory='test_inv', project='test_proj').job_template admin = objs.superusers.admin assert jt.webhook_service == service assert jt.webhook_key != '' cred_type = CredentialType.defaults['{}_token'.format(token)]() cred_type.save() cred = Credential.objects.create(credential_type=cred_type, name='test-cred', inputs={'token': 'secret'}) url = reverse('api:job_template_detail', kwargs={'pk': jt.pk}) response = patch(url, {'webhook_credential': cred.pk}, user=admin, expect=400) jt.refresh_from_db() assert jt.webhook_service == service assert jt.webhook_key != '' assert jt.webhook_credential is None assert response.data == {'webhook_credential': ["Must match the selected webhook service."]} @pytest.mark.django_db @pytest.mark.parametrize( "service", [s for s, _ in WebhookTemplateMixin.SERVICES] ) def test_set_webhook_credential_without_service(organization_factory, job_template_factory, patch, service): objs = organization_factory("org", superusers=['admin']) jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template admin = objs.superusers.admin assert jt.webhook_service == '' assert jt.webhook_key == '' cred_type = CredentialType.defaults['{}_token'.format(service)]() cred_type.save() cred = Credential.objects.create(credential_type=cred_type, name='test-cred', inputs={'token': 'secret'}) url = reverse('api:job_template_detail', kwargs={'pk': jt.pk}) response = patch(url, {'webhook_credential': cred.pk}, user=admin, expect=400) jt.refresh_from_db() assert jt.webhook_service == '' assert jt.webhook_key == '' assert jt.webhook_credential is None assert response.data == {'webhook_credential': ["Must match the selected webhook service."]} @pytest.mark.django_db @pytest.mark.parametrize( "service", [s for s, _ in WebhookTemplateMixin.SERVICES] ) def test_unset_webhook_service_with_credential(organization_factory, job_template_factory, patch, service): objs = organization_factory("org", superusers=['admin']) jt = job_template_factory("jt", organization=objs.organization, webhook_service=service, inventory='test_inv', project='test_proj').job_template admin = objs.superusers.admin assert jt.webhook_service == service assert jt.webhook_key != '' cred_type = CredentialType.defaults['{}_token'.format(service)]() cred_type.save() cred = Credential.objects.create(credential_type=cred_type, name='test-cred', inputs={'token': 'secret'}) jt.webhook_credential = cred jt.save() url = reverse('api:job_template_detail', kwargs={'pk': jt.pk}) response = patch(url, {'webhook_service': ''}, user=admin, expect=400) jt.refresh_from_db() assert jt.webhook_service == service assert jt.webhook_key != '' assert jt.webhook_credential == cred assert response.data == {'webhook_credential': ["Must match the selected webhook service."]}
10,581
3,532
"""Main Application and routing Logic for TESS Flask App""" from decouple import config from flask import Flask, render_template, request from flask_sqlalchemy import SQLAlchemy import pickle from sklearn.pipeline import make_pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import RobustScaler from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense import pandas as pd import numpy as np from tensorflow.keras.models import load_model from .models import DB, TOI_Table, TIC_Cat_Table from .models import * from .light_curve import * from .Data_in import * def create_app(): """create and config an instance of the Flask App""" app = Flask(__name__) # configure DB, will need to update this when changing DBs? app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['ENV'] = config('ENV') DB.init_app(app) # Create home route @app.route('/') def root(): toi_table=(TOI_Table.query.all()) #Pull example data from Notebooks folder. Will be be pulled from sql DB in the future. return render_template('home.html', title = 'Findin Planets:TESS', toi_table=toi_table) @app.route('/total_reset') def total_reset(): DB.drop_all() DB.create_all() get_visual_data() get_toi_data() get_tic_catalog() return render_template('home.html', title='Reset Database!') @app.route('/image') def image(): return render_template('image.html', title='Light curve images') @app.route('/predict') def predict(): # Gathering the necessary data from sql database: def get_data(): # Pulling data from sql database toi_rows = TOI_Table.query.all() tic_catalog_rows = TIC_Cat_Table.query.all() toi_dict = {'TIC_ID': [row.TIC_ID for row in toi_rows], 'TOI': [row.TOI for row in toi_rows], 'Epoch': [row.Epoch for row in toi_rows], 'Period': [row.Period for row in toi_rows], 'Duration': [row.Duration for row in toi_rows], 'Depth': [row.Depth for row in toi_rows], 'Planet_Radius': [row.Planet_Radius for row in toi_rows], 'Planet_Insolation': [row.Planet_Insolation for row in toi_rows], 'Planet_Equil_Temp': [row.Planet_Equil_Temp for row in toi_rows], 'Planet_SNR': [row.Planet_SNR for row in toi_rows], 'Stellar_Distance': [row.Stellar_Distance for row in toi_rows], 'Stellar_log_g': [row.Stellar_log_g for row in toi_rows], 'Stellar_Radius': [row.Stellar_Radius for row in toi_rows], 'TFOPWG_Disposition': [row.TFOPWG_Disposition for row in toi_rows]} tic_catalog_dict = {'TIC_ID': [row.TIC_ID for row in tic_catalog_rows], 'ra': [row.ra for row in tic_catalog_rows], 'dec': [row.dec for row in tic_catalog_rows], 'pmRA': [row.pmRA for row in tic_catalog_rows], 'pmDEC': [row.pmDEC for row in tic_catalog_rows], 'plx': [row.plx for row in tic_catalog_rows], 'gallong': [row.gallong for row in tic_catalog_rows], 'gallat': [row.gallat for row in tic_catalog_rows], 'eclong': [row.eclong for row in tic_catalog_rows], 'eclat': [row.eclat for row in tic_catalog_rows], 'Tmag': [row.Tmag for row in tic_catalog_rows], 'Teff': [row.Teff for row in tic_catalog_rows], 'logg': [row.logg for row in tic_catalog_rows], 'MH': [row.MH for row in tic_catalog_rows], 'rad': [row.rad for row in tic_catalog_rows], 'mass': [row.mass for row in tic_catalog_rows], 'rho': [row.rho for row in tic_catalog_rows], 'lum': [row.lum for row in tic_catalog_rows], 'd': [row.d for row in tic_catalog_rows], 'ebv': [row.ebv for row in tic_catalog_rows], 'numcont': [row.numcont for row in tic_catalog_rows], 'contratio': [row.contratio for row in tic_catalog_rows], 'priority': [row.priority for row in tic_catalog_rows]} toi = pd.DataFrame(toi_dict) tic_catalog = pd.DataFrame(tic_catalog_dict) df = toi.merge(tic_catalog, on='TIC_ID') return df # Shaping the data for input: def shape_data(df): # Dropping data not needed for model: X = df.drop(columns=['TIC_ID', 'TOI', 'TFOPWG_Disposition']) return X # Setting up model architecture for neural net: def create_model(): # Instantiate model: model = Sequential() # Add input layer: model.add(Dense(20, input_dim=33, activation='relu')) # Add hidden layer: model.add(Dense(20, activation='relu')) # Add output layer: model.add(Dense(1, activation='sigmoid')) # Compile model: model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model # Loading pipeline: def load_pipeline(): tess_pipeline = pickle.load(open('tess_pipeline.pkl', 'rb')) tess_pipeline.named_steps[ 'kerasclassifier'].model = load_model( 'keras_classifier.h5') return tess_pipeline # Get predictions for all observations: def get_all_predictions(): y_pred_proba_full = load_pipeline().predict_proba(shape_data(get_data())) toi_index = pd.DataFrame(get_data()['TOI']) output_df = pd.DataFrame(y_pred_proba_full, columns=[ 'actual_exoplanet_prob', 'false_positive_prob']) output_df = toi_index.join(output_df) output_df['prediction'] = np.where( output_df['actual_exoplanet_prob'] >= output_df[ 'false_positive_prob'], 'Actual Exoplanet', 'False Positive') output_df['prediction_prob'] = np.where(output_df[ 'actual_exoplanet_prob']>= output_df[ 'false_positive_prob'], output_df[ 'actual_exoplanet_prob'], output_df[ 'false_positive_prob']) return output_df get_all_predictions() return render_template('predict.html', title='prediction pipeline works!') # @app.route('/test') # def get_urls(tic_id): # urls = Visual_Table.query.filter_by(TIC_ID=tic_id).all() # urls = [url.dataURL for url in urls] # return urls return app # """Main Application and routing Logic for TESS Flask App""" # from decouple import config # from flask import Flask, render_template, request # from flask_sqlalchemy import SQLAlchemy # from .models import * # from .light_curve import * # from .Data_in import * # # from .predict import * # def create_app(): # """create and config an instance of the Flask App""" # app = Flask(__name__) # # configure DB, will need to update this when changing DBs? # app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL') # app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # app.config['ENV'] = config('ENV') # DB.init_app(app) # # with app.app_context(): # # db.create_all() # # Create home route # @app.route('/') # def root(): # #Pull example data from Notebooks folder. Will be be pulled from sql DB in the future. # return render_template('home.html', # title = 'Finding Planets:TESS', # toi_table=(TOI_Table.query.all()), # tic_table=(TIC_Cat_Table.query.all()) # ) # @app.route('/total_reset') # def total_reset(): # DB.drop_all() # DB.create_all() # get_visual_data() # get_toi_data() # get_tic_catalog() # return render_template('home.html', title='Reset Database!') # @app.route('/predict') # def predict(): # get_all_predictions() # return render_template('home.html', title='prediction pipeline works!') # # @app.route('/test') # # def get_urls(tic_id): # # urls = Visual_Table.query.filter_by(TIC_ID=tic_id).all() # # urls = [url.dataURL for url in urls] # # return urls # return app # import pickle # from sklearn.pipeline import make_pipeline # from sklearn.impute import SimpleImputer # from sklearn.preprocessing import RobustScaler # from tensorflow.keras.wrappers.scikit_learn import KerasClassifier # from tensorflow.keras.models import Sequential # from tensorflow.keras.layers import Dense # import pandas as pd # import numpy as np # from tensorflow.keras.models import load_model # from .models import DB, TOI_Table, TIC_Cat_Table # # Gathering the necessary data from sql database: # def get_data(): # # Pulling data from sql database # toi_rows = TOI_Table.query.all() # tic_catalog_rows = TIC_Cat_Table.query.all() # toi_dict = {'TIC_ID': [row.TIC_ID for row in toi_rows], # 'TOI': [row.TOI for row in toi_rows], # 'Epoch': [row.Epoch for row in toi_rows], # 'Period': [row.Period for row in toi_rows], # 'Duration': [row.Duration for row in toi_rows], # 'Depth': [row.Depth for row in toi_rows], # 'Planet_Radius': [row.Planet_Radius for row in toi_rows], # 'Planet_Insolation': [row.Planet_Insolation for row in toi_rows], # 'Planet_Equil_Temp': [row.Planet_Equil_Temp for row in toi_rows], # 'Planet_SNR': [row.Planet_SNR for row in toi_rows], # 'Stellar_Distance': [row.Stellar_Distance for row in toi_rows], # 'Stellar_log_g': [row.Stellar_log_g for row in toi_rows], # 'Stellar_Radius': [row.Stellar_Radius for row in toi_rows], # 'TFOPWG_Disposition': [row.TFOPWG_Disposition for row in toi_rows]} # tic_catalog_dict = {'TIC_ID': [row.TIC_ID for row in tic_catalog_rows], # 'ra': [row.ra for row in tic_catalog_rows], # 'dec': [row.dec for row in tic_catalog_rows], # 'pmRA': [row.pmRA for row in tic_catalog_rows], # 'pmDEC': [row.pmDEC for row in tic_catalog_rows], # 'plx': [row.plx for row in tic_catalog_rows], # 'gallong': [row.gallong for row in tic_catalog_rows], # 'gallat': [row.gallat for row in tic_catalog_rows], # 'eclong': [row.eclong for row in tic_catalog_rows], # 'eclat': [row.eclat for row in tic_catalog_rows], # 'Tmag': [row.Tmag for row in tic_catalog_rows], # 'Teff': [row.Teff for row in tic_catalog_rows], # 'logg': [row.logg for row in tic_catalog_rows], # 'MH': [row.MH for row in tic_catalog_rows], # 'rad': [row.rad for row in tic_catalog_rows], # 'mass': [row.mass for row in tic_catalog_rows], # 'rho': [row.rho for row in tic_catalog_rows], # 'lum': [row.lum for row in tic_catalog_rows], # 'd': [row.d for row in tic_catalog_rows], # 'ebv': [row.ebv for row in tic_catalog_rows], # 'numcont': [row.numcont for row in tic_catalog_rows], # 'contratio': [row.contratio for row in tic_catalog_rows], # 'priority': [row.priority for row in tic_catalog_rows]} # toi = pd.DataFrame(toi_dict) # tic_catalog = pd.DataFrame(tic_catalog_dict) # df = toi.merge(tic_catalog, on='TIC_ID') # return df # # Shaping the data for input: # def shape_data(df): # # Dropping data not needed for model: # X = df.drop(columns=['TIC_ID', 'TOI', 'TFOPWG_Disposition']) # return X # # Setting up model architecture for neural net: # def create_model(): # # Instantiate model: # model = Sequential() # # Add input layer: # model.add(Dense(20, input_dim=33, activation='relu')) # # Add hidden layer: # model.add(Dense(20, activation='relu')) # # Add output layer: # model.add(Dense(1, activation='sigmoid')) # # Compile model: # model.compile(loss='binary_crossentropy', optimizer='adam', # metrics=['accuracy']) # return model # # Loading pipeline: # def load_pipeline(): # tess_pipeline = pickle.load(open('tess_pipeline.pkl', 'rb')) # tess_pipeline.named_steps[ # 'kerasclassifier'].model = load_model( # 'keras_classifier.h5') # return tess_pipeline # # Get predictions for all observations: # def get_all_predictions(): # y_pred_proba_full = load_pipeline().predict_proba(shape_data(get_data())) # toi_index = pd.DataFrame(get_data()['TOI']) # output_df = pd.DataFrame(y_pred_proba_full, columns=[ # 'actual_exoplanet_prob', 'false_positive_prob']) # output_df = toi_index.join(output_df) # output_df['prediction'] = np.where( # output_df['actual_exoplanet_prob'] >= output_df[ # 'false_positive_prob'], # 'Actual Exoplanet', 'False Positive') # output_df['prediction_prob'] = np.where(output_df[ # 'actual_exoplanet_prob']>= output_df[ # 'false_positive_prob'], output_df[ # 'actual_exoplanet_prob'], output_df[ # 'false_positive_prob']) # return output_df # get_all_predictions()
14,667
4,558
import logging import scipy.optimize class MotionOptimizer(object): def __init__(self, motion, evaluator): self.logger = logging.getLogger(__name__) self.motion = motion self.evaluator = evaluator def obj(self, x): self.counter += 1 self.motion.set_params(x) cost = self.evaluator.cost() # self.logger.debug('params = %s' % x) if self.counter % 100 == 1: self.logger.debug('%d: cost = %.10f' % (self.counter, cost)) return cost # return x[0] ** 2 + (x[1] - 1.3) ** 2 def solve(self): self.counter = 0 logging.info('start to solve optimization') logger = self.logger x0 = self.motion.params() # x0 = [10.0, 10.0] logger.info('x0 = %s' % x0) options = {'maxiter': 100000, 'maxfev': 100000, 'xtol': 10e-10, 'ftol': 10e-10} logger.info('options = %s' % options) res = scipy.optimize.minimize(self.obj, x0, method='SLSQP', options=options) logger.info('result = %s' % res) logger.info('finished to solve optimization') logger.info('OK')
1,231
421
# type: ignore # noqa: D100 import setuptools setuptools.setup()
66
28
# # Copyright The NOMAD Authors. # # This file is part of NOMAD. # See https://nomad-lab.eu for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re import os import textwrap import datetime import ase import numpy as np from nomad.units import ureg from nomad import atomutils from nomad.parsing.parser import FairdiParser from nomad.parsing.file_parser import TextParser, Quantity from nomad.datamodel.metainfo.public import ( section_run, section_method, section_system, section_XC_functionals, section_scf_iteration, section_single_configuration_calculation, section_sampling_method, section_frame_sequence, section_dos, section_k_band, section_k_band_segment, section_basis_set_atom_centered ) from crystalparser.metainfo.crystal import x_crystal_section_shell def capture(regex): return r'(' + regex + r')' flt = r'-?(?:\d+\.?\d*|\d*\.?\d+)(?:E[\+-]?\d+)?' # Floating point number flt_c = capture(flt) # Captures a floating point number flt_crystal_c = r'(-?\d+(?:.\d+)?\*\*-?.*\d+)' # Crystal specific floating point syntax ws = r'\s+' # Series of white-space characters integer = r'-?\d+' # Integer number integer_c = capture(integer) # Captures integer number word = r'[a-zA-Z]+' # A single alphanumeric word word_c = capture(word) # Captures a single alphanumeric word br = r'\r?\n' # Newline that works for both Windows and Unix. Crystal can be run on a Windows machine as well. class CrystalParser(FairdiParser): """NOMAD-lab parser for Crystal. """ def __init__(self): super().__init__( name='parsers/crystal', code_name='Crystal', code_homepage='https://www.crystal.unito.it/', mainfile_contents_re=( fr'({br} \*\s+CRYSTAL[\d]+\s+\*{br} \*\s*{word} : \d+[\.\d+]*)' ) ) def parse_output(self, filepath): """Reads the calculation output. """ outputparser = TextParser( filepath, quantities=[ # Header Quantity("datetime", fr'(?:Date\:|date)\s+(.*?){br}', str_operation=lambda x: x, repeats=False), Quantity("hostname", fr'(?:Running on\:|hostname)\s+(.*?){br}', str_operation=lambda x: x, repeats=False), Quantity("os", fr'(?:system)\s+(.*?){br}', str_operation=lambda x: x, repeats=False), Quantity("user", fr'user\s+(.*?){br}', str_operation=lambda x: x, repeats=False), Quantity("input_path", fr'(?:Input data|input data in)\s+(.*?){br}', str_operation=lambda x: x, repeats=False), Quantity("output_path", fr'(?:Output\:|output data in)\s+(.*?){br}', str_operation=lambda x: x, repeats=False), Quantity("executable_path", fr'(?:Executable\:|crystal executable in)\s+(.*?){br}', str_operation=lambda x: x, repeats=False), Quantity("tmpdir", fr'(?:Temporary directory\:|temporary directory)\s+(.*?){br}', str_operation=lambda x: x, repeats=False), Quantity("system_type", fr'(CRYSTAL|SLAB|POLYMER|HELIX|MOLECULE|EXTERNAL|DLVINPUT)', repeats=False), Quantity("calculation_type", fr'(OPTGEOM|FREQCALC|ANHARM)', repeats=False), # Input Quantity( "dftd3", fr'(DFTD3{br}[\s\S]*?END{br})', sub_parser=TextParser(quantities=[ Quantity( "version", r'(VERSION \d)', str_operation=lambda x: x, repeats=False, ), ]), repeats=False, ), Quantity( "grimme", fr'(GRIMME{br}[\s\S]*?END{br})', repeats=False, ), Quantity( "dft", fr'(DFT{br}[\w\s]*?END{br})', sub_parser=TextParser(quantities=[ Quantity( "exchange", fr'EXCHANGE{br}(LDA|VBH|BECKE|PBE|PBESOL|mPW91|PWGGA|SOGGA|WCGGA)', repeats=False, ), Quantity( "correlation", fr'CORRELAT{br}(PZ|VBH|VWN|LYP|P86|PBE|PBESOL|PWGGA|PWLSD|WL)', repeats=False, ), Quantity( "exchange_correlation", fr'(SVWN|BLYP|PBEXC|PBESOLXC|SOGGAXC|B3PW|B3LYP|PBE0|PBESOL0|B1WC|WCILYP|B97H|PBE0-13|HYBRID|NONLOCAL|HSE06|HSESOL|HISS|RSHXLDA|wB97|wB97X|LC-WPBE|LC-WPBESOL|LC-WBLYP|M05-2X|M05|M062X|M06HF|M06L|M06|B2PLYP|B2GPPLYP|mPW2PLYP|DHYBRID)', repeats=False, ), ]), repeats=False, ), Quantity("program_version", fr'{br} \*\s+CRYSTAL([\d]+)\s+\*', repeats=False, dtype=str), Quantity("distribution", fr'{br} \*\s*({word} : \d+[\.\d+]*)', str_operation=lambda x: x, repeats=False), Quantity("start_timestamp", fr' EEEEEEEEEE STARTING DATE\s+(.*? TIME .*?){br}', str_operation=lambda x: x, repeats=False), Quantity("title", fr' EEEEEEEEEE STARTING DATE.*?{br}\s*(.*?){br}{br}', str_operation=lambda x: x, repeats=False), Quantity("hamiltonian_type", fr' (KOHN-SHAM HAMILTONIAN|HARTREE-FOCK HAMILTONIAN)', str_operation=lambda x: x, repeats=False), Quantity("xc_out", fr' \(EXCHANGE\)\[CORRELATION\] FUNCTIONAL:(\([\s\S]+?\)\[[\s\S]+?\])', str_operation=lambda x: x, repeats=False), Quantity("hybrid_out", fr' HYBRID EXCHANGE - PERCENTAGE OF FOCK EXCHANGE\s+{flt_c}', repeats=False), # Geometry optimization settings Quantity('initial_trust_radius', fr' INITIAL TRUST RADIUS\s+{flt_c}', repeats=False), Quantity('maximum_trust_radius', fr' MAXIMUM TRUST RADIUS\s+{flt_c}', repeats=False), Quantity('maximum_gradient_component', fr' MAXIMUM GRADIENT COMPONENT\s+{flt_c}', repeats=False), Quantity('rms_gradient_component', fr' R\.M\.S\. OF GRADIENT COMPONENT\s+{flt_c}', repeats=False), Quantity('rms_displacement_component', fr' R\.M\.S\. OF DISPLACEMENT COMPONENTS\s+{flt_c}', repeats=False), Quantity('geometry_change', fr' MAXIMUM DISPLACEMENT COMPONENT\s+{flt_c}', unit=ureg.bohr, repeats=False), Quantity('energy_change', fr' THRESHOLD ON ENERGY CHANGE\s+{flt_c}', unit=ureg.hartree, repeats=False), Quantity('extrapolating_polynomial_order', fr' EXTRAPOLATING POLYNOMIAL ORDER{ws}{integer_c}', repeats=False), Quantity('max_steps', fr' MAXIMUM ALLOWED NUMBER OF STEPS\s+{integer_c}', repeats=False), Quantity('sorting_of_energy_points', fr'SORTING OF ENERGY POINTS\:\s+{word_c}', repeats=False), # System Quantity("material_type", fr' ((?:MOLECULAR|SLAB) CALCULATION){br}', str_operation=lambda x: x, repeats=False), Quantity("crystal_family", fr' CRYSTAL FAMILY\s*:\s*([\s\S]+?)\s*{br}', str_operation=lambda x: x, repeats=False), Quantity("crystal_class", fr' CRYSTAL CLASS \(GROTH - 1921\)\s*:\s*([\s\S]+?)\s*{br}', str_operation=lambda x: x, repeats=False), Quantity("space_group", fr' SPACE GROUP \(CENTROSYMMETRIC\)\s*:\s*([\s\S]+?)\s*{br}', str_operation=lambda x: x, repeats=False), Quantity("dimensionality", fr' GEOMETRY FOR WAVE FUNCTION - DIMENSIONALITY OF THE SYSTEM\s+(\d)', repeats=False), Quantity( 'lattice_parameters', fr' (?:PRIMITIVE CELL - CENTRING CODE\s*[\s\S]*?\s*VOLUME=\s*{flt} - DENSITY\s*{flt} g/cm\^3{br}|PRIMITIVE CELL{br})' +\ fr' A B C ALPHA BETA GAMMA\s*' +\ fr'{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}', shape=(6), dtype=np.float64, repeats=False, ), Quantity( "labels_positions", fr' ATOMS IN THE ASYMMETRIC UNIT\s+{integer} - ATOMS IN THE UNIT CELL:\s+{integer}{br}' +\ fr'\s+ATOM\s+X(?:/A|\(ANGSTROM\))\s+Y(?:/B|\(ANGSTROM\))\s+Z(?:/C|\(ANGSTROM\))\s*{br}' +\ re.escape(' *******************************************************************************') +\ fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}{br})+)', shape=(-1, 7), dtype=str, repeats=False, ), # Used to capture an edited geometry. Can contain # substitutions, supercells, deformations etc. in any order. Quantity( 'system_edited', fr' \*\s+GEOMETRY EDITING[\S\s]*?' +\ re.escape(' *******************************************************************************') + fr'{br}' +\ fr' LATTICE PARAMETERS \(ANGSTROMS AND DEGREES\) - BOHR =\s*0?\.\d+ ANGSTROM{br}' +\ fr' (?:PRIMITIVE CELL - CENTRING CODE [\s\S]*?VOLUME=\s*{flt} - DENSITY\s*{flt} g/cm\^3|PRIMITIVE CELL){br}' +\ fr'\s+A\s+B\s+C\s+ALPHA\s+BETA\s+GAMMA\s*{br}' +\ fr'(\s+{flt}\s+{flt}\s+{flt}\s+{flt}\s+{flt}\s+{flt}{br}' +\ re.escape(' *******************************************************************************') + fr'{br}' +\ fr' ATOMS IN THE ASYMMETRIC UNIT\s+{integer} - ATOMS IN THE UNIT CELL:\s+{integer}{br}' +\ fr'\s+ATOM\s+X(?:/A|\(ANGSTROM\))\s+Y(?:/B|\(ANGSTROM\))\s+Z(?:/C|\(ANGSTROM\))(?:\s+R\(ANGS\))?\s*{br}' +\ re.escape(' *******************************************************************************') +\ fr'(?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}(?:\s+{flt})?{br})+)' +\ fr'{br}' +\ fr' T = ATOM BELONGING TO THE ASYMMETRIC UNIT', sub_parser=TextParser(quantities=[ Quantity( "lattice_parameters", fr'\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}', shape=(6), dtype=np.float64, repeats=False, ), Quantity( "labels_positions", fr'\s+ATOM\s+X(?:/A|\(ANGSTROM\))\s+Y(?:/B|\(ANGSTROM\))\s+Z(?:/C|\(ANGSTROM\))\s*{br}' +\ re.escape(' *******************************************************************************') +\ fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}{br})+)', shape=(-1, 7), dtype=str, repeats=False, ), Quantity( "labels_positions_nanotube", fr'\s+ATOM\s+X/A\s+Y\(ANGSTROM\)\s+Z\(ANGSTROM\)\s+R\(ANGS\)\s*{br}' +\ re.escape(' *******************************************************************************') +\ fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}\s+{flt}{br})+)', shape=(-1, 8), dtype=str, repeats=False, ), ]), repeats=False, ), Quantity( 'lattice_vectors_restart', fr' DIRECT LATTICE VECTOR COMPONENTS \(ANGSTROM\){br}' +\ fr'\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}' +\ fr'\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}' +\ fr'\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}', shape=(3, 3), dtype=np.float64, repeats=False, ), Quantity( "labels_positions_restart", fr' ATOM N\.AT\. SHELL X\(A\) Y\(A\) Z\(A\) EXAD N\.ELECT\.{br}' +\ re.escape(' *******************************************************************************') +\ fr'((?:\s+{integer}\s+{integer}\s+{word}\s+{integer}\s+{flt}\s+{flt}\s+{flt}\s+{flt}\s+{flt}{br})+)', shape=(-1, 9), dtype=str, repeats=False, ), Quantity("symmops", fr' NUMBER OF SYMMETRY OPERATORS\s*:\s*(\d){br}', repeats=False), # Method Quantity( 'basis_set', re.escape(r' *******************************************************************************') +\ fr'{br} LOCAL ATOMIC FUNCTIONS BASIS SET{br}' +\ re.escape(r' *******************************************************************************') +\ fr'{br} ATOM X\(AU\) Y\(AU\) Z\(AU\) N. TYPE EXPONENT S COEF P COEF D/F/G COEF{br}' +\ fr'([\s\S]*?){br} INFORMATION', sub_parser=TextParser(quantities=[ Quantity( "basis_sets", fr'({ws}{integer}{ws}{word}{ws}{flt}{ws}{flt}{ws}{flt}{br}(?:(?:\s+(?:\d+-\s+)?\d+\s+(?:S|P|SP|D|F|G)\s*{br}[\s\S]*?(?:{ws}{flt}(?:{ws})?{flt}(?:{ws})?{flt}(?:{ws})?{flt}{br})+)+)?)', sub_parser=TextParser(quantities=[ Quantity( "species", fr'({ws}{integer}{ws}{word}{ws}{flt}{ws}{flt}{ws}{flt}{br})', repeats=False, ), Quantity( "shells", fr'(\s+(?:\d+-\s+)?\d+\s+(?:S|P|SP|D|F|G)\s*{br}[\s\S]*?(?:{ws}{flt}(?:{ws})?{flt}(?:{ws})?{flt}(?:{ws})?{flt}{br})+)', sub_parser=TextParser(quantities=[ Quantity( "shell_range", r'(\s+(?:\d+-\s+)?\d+)', str_operation=lambda x: "".join(x.split()), repeats=False, ), Quantity( "shell_type", fr'((?:S|P|SP|D|F|G))\s*{br}', str_operation=lambda x: x.strip(), repeats=False, ), Quantity( "shell_coefficients", fr'{ws}({flt})(?:{ws})?({flt})(?:{ws})?({flt})(?:{ws})?({flt}){br}', repeats=True, dtype=np.float64, shape=(4) ), ]), repeats=True, ), ]), repeats=True, ), ]), repeats=False, ), Quantity("fock_ks_matrix_mixing", fr' INFORMATION \*+.*?\*+.*?\:\s+FOCK/KS MATRIX MIXING SET TO\s+{integer_c}\s+\%{br}', repeats=False), Quantity("coulomb_bipolar_buffer", fr' INFORMATION \*+.*?\*+.*?\:\s+COULOMB BIPOLAR BUFFER SET TO\s+{flt_c} Mb{br}', repeats=False), Quantity("exchange_bipolar_buffer", fr' INFORMATION \*+.*?\*+.*?\:\s+EXCHANGE BIPOLAR BUFFER SET TO\s+{flt_c} Mb{br}', repeats=False), Quantity("toldee", fr' INFORMATION \*+ TOLDEE \*+\s*\*+ SCF TOL ON TOTAL ENERGY SET TO\s+{flt_c}{br}', repeats=False), Quantity("n_atoms_per_cell", r' N\. OF ATOMS PER CELL\s+' + integer_c, repeats=False), Quantity("n_shells", r' NUMBER OF SHELLS\s+' + integer_c, repeats=False), Quantity("n_ao", r' NUMBER OF AO\s+' + integer_c, repeats=False), Quantity("n_electrons", r' N\. OF ELECTRONS PER CELL\s+' + integer_c, repeats=False), Quantity("n_core_electrons", r' CORE ELECTRONS PER CELL\s+' + integer_c, repeats=False), Quantity("n_symmops", r' N\. OF SYMMETRY OPERATORS\s+' + integer_c, repeats=False), Quantity("tol_coulomb_overlap", r' COULOMB OVERLAP TOL\s+\(T1\) ' + flt_crystal_c, str_operation=to_float, repeats=False), Quantity("tol_coulomb_penetration", r' COULOMB PENETRATION TOL\s+\(T2\) ' + flt_crystal_c, str_operation=to_float, repeats=False), Quantity("tol_exchange_overlap", r' EXCHANGE OVERLAP TOL\s+\(T3\) ' + flt_crystal_c, str_operation=to_float, repeats=False), Quantity("tol_pseudo_overlap_f", r' EXCHANGE PSEUDO OVP \(F\(G\)\)\s+\(T4\) ' + flt_crystal_c, str_operation=to_float, repeats=False), Quantity("tol_pseudo_overlap_p", r' EXCHANGE PSEUDO OVP \(P\(G\)\)\s+\(T5\) ' + flt_crystal_c, str_operation=to_float, repeats=False), Quantity("pole_order", r' POLE ORDER IN MONO ZONE\s+' + integer_c, repeats=False), Quantity("calculation_type", fr' TYPE OF CALCULATION \:\s+(.*?{br}\s+.*?){br}', str_operation=lambda x: " ".join(x.split()), repeats=False), Quantity('xc_functional', fr' \(EXCHANGE\)\[CORRELATION\] FUNCTIONAL:(\(.+\)\[.+\]){br}', str_operation=lambda x: x, repeats=False,), Quantity("cappa", fr'CAPPA:IS1\s+{integer_c};IS2\s+{integer_c};IS3\s+{integer_c}; K PTS MONK NET\s+{integer_c}; SYMMOPS:\s*K SPACE\s+{integer_c};G SPACE\s+{integer_c}', repeats=False), Quantity('scf_max_iteration', r' MAX NUMBER OF SCF CYCLES\s+' + integer_c, repeats=False), Quantity('convergenge_deltap', r'CONVERGENCE ON DELTAP\s+' + flt_crystal_c, str_operation=to_float, repeats=False), Quantity('weight_f', r'WEIGHT OF F\(I\) IN F\(I\+1\)\s+' + integer_c, repeats=False), Quantity('scf_threshold_energy_change', r'CONVERGENCE ON ENERGY\s+' + flt_crystal_c, str_operation=to_float, repeats=False, unit=ureg.hartree), Quantity('shrink', r'SHRINK\. FACT\.\(MONKH\.\)\s+(' + integer + ws + integer + ws + integer + r')', repeats=False), Quantity('n_k_points_ibz', r'NUMBER OF K POINTS IN THE IBZ\s+' + integer_c, repeats=False), Quantity('shrink_gilat', r'SHRINKING FACTOR\(GILAT NET\)\s+' + integer_c, repeats=False), Quantity('n_k_points_gilat', r'NUMBER OF K POINTS\(GILAT NET\)\s+' + integer_c, repeats=False), # SCF Quantity( "scf_block", r' CHARGE NORMALIZATION FACTOR([\s\S]*?) == SCF ENDED', sub_parser=TextParser(quantities=[ Quantity( 'scf_iterations', r'( CHARGE NORMALIZATION FACTOR[\s\S]*? (?:TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT PDIG|TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT MPP_KSPA|== SCF ENDED))', sub_parser=TextParser(quantities=[ Quantity('charge_normalization_factor', fr' CHARGE NORMALIZATION FACTOR{ws}{flt}{br}', repeats=False), Quantity('total_atomic_charges', fr' TOTAL ATOMIC CHARGES:{br}(?:{ws}{flt})+{br}', repeats=False), Quantity('QGAM', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT QGAM TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False), Quantity('BIEL2', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT BIEL2 TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False), Quantity('energy_kinetic', fr' ::: KINETIC ENERGY\s+{flt_c}{br}', unit=ureg.hartree, repeats=False), Quantity('energy_ee', fr' ::: TOTAL E-E\s+{flt_c}{br}', unit=ureg.hartree, repeats=False), Quantity('energy_en_ne', fr' ::: TOTAL E-N \+ N-E\s+{flt_c}{br}', unit=ureg.hartree, repeats=False), Quantity('energy_nn', fr' ::: TOTAL N-N\s+{flt_c}{br}', unit=ureg.hartree, repeats=False), Quantity('virial_coefficient', fr' ::: VIRIAL COEFFICIENT\s+{flt_c}{br}', repeats=False), Quantity('TOTENY', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT TOTENY TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False), Quantity('integrated_density', fr' NUMERICALLY INTEGRATED DENSITY{ws}{flt}{br}', repeats=False), Quantity('NUMDFT', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT NUMDFT TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False), Quantity('energies', fr' CYC{ws}{integer}{ws}ETOT\(AU\){ws}{flt_c}{ws}DETOT{ws}{flt_c}{ws}tst{ws}{flt}{ws}PX{ws}{flt}{br}', repeats=False, dtype=np.float64, unit=ureg.hartree), Quantity('FDIK', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT FDIK TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False), ]), repeats=True, ), ]), repeats=False, ), Quantity('number_of_scf_iterations', fr' == SCF ENDED - CONVERGENCE ON (?:ENERGY|TESTER)\s+E\(AU\)\s*{flt}\s*CYCLES\s+{integer_c}', repeats=False), Quantity( 'energy_total', fr' TOTAL ENERGY\((?:DFT|HF)\)\(AU\)\(\s*{integer}\)\s*{flt_c} DE\s*{flt} (?:tester|tst)\s*{flt}', unit=ureg.hartree, repeats=False, ), # Geometry optimization steps Quantity( "geo_opt", fr'( (?:COORDINATE AND CELL OPTIMIZATION|COORDINATE OPTIMIZATION) - POINT\s+1{br}' +\ r'[\s\S]*?' +\ re.escape(r' ******************************************************************') + fr'{br}' +\ fr'\s*\* OPT END - CONVERGED \* E\(AU\)\:\s+{flt}\s+POINTS\s+{integer})\s+\*{br}', sub_parser=TextParser(quantities=[ Quantity( 'geo_opt_step', fr' (?:COORDINATE AND CELL OPTIMIZATION|COORDINATE OPTIMIZATION) - POINT\s+{integer}{br}' +\ fr'([\s\S]*?)' +\ fr' (?:TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT OPTI|\* OPT END)', sub_parser=TextParser(quantities=[ Quantity( 'lattice_parameters', fr' (?:PRIMITIVE CELL - CENTRING CODE [\s\S]*?VOLUME=\s*{flt} - DENSITY\s*{flt} g/cm\^3{br}|PRIMITIVE CELL{br})' +\ fr' A B C ALPHA BETA GAMMA\s*' +\ fr'{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}', shape=(6), dtype=np.float64, repeats=False, ), Quantity( "labels_positions", fr'\s+ATOM\s+X(?:/A|\(ANGSTROM\))\s+Y(?:/B|\(ANGSTROM\))\s+Z(?:/C|\(ANGSTROM\))\s*{br}' +\ re.escape(' *******************************************************************************') +\ fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}{br})+)', shape=(-1, 7), dtype=str, repeats=False, ), Quantity( "labels_positions_nanotube", fr'\s+ATOM\s+X/A\s+Y\(ANGSTROM\)\s+Z\(ANGSTROM\)\s+R\(ANGS\)\s*{br}' +\ re.escape(' *******************************************************************************') +\ fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}\s+{flt}{br})+)', shape=(-1, 8), dtype=str, repeats=False, ), Quantity('energy', fr' TOTAL ENERGY\({word}\)\(AU\)\(\s*{integer}\)\s*{flt_c}', unit=ureg.hartree, repeats=False), ]), repeats=True, ), Quantity('converged', fr' \* OPT END - ([\s\S]*?) \* E\(AU\)\:\s+{flt}\s+POINTS\s+{integer}', repeats=False), ]), repeats=False, ), # Band structure Quantity( "band_structure", re.escape(fr' *******************************************************************************') + fr'{br}' +\ fr' \* \*{br}' +\ fr' \* BAND STRUCTURE \*{br}' +\ fr'[\s\S]*?' +\ fr' \* FROM BAND\s+{integer} TO BAND\s+{integer}\s+\*{br}' +\ fr' \* TOTAL OF\s+{integer} K-POINTS ALONG THE PATH\s+\*{br}' +\ fr' \* \*{br}' +\ re.escape(r' *******************************************************************************') + fr'{br}' +\ fr'([\s\S]*?' +\ fr' ENERGY RANGE \(A\.U\.\)\s*{flt} - \s*{flt} EFERMI\s*{flt_c}{br})', sub_parser=TextParser(quantities=[ Quantity( 'segments', fr' (LINE\s+{integer} \( {flt} {flt} {flt}: {flt} {flt} {flt}\) IN TERMS OF PRIMITIVE LATTICE VECTORS{br}' +\ fr'\s+{integer} POINTS - SHRINKING_FACTOR\s*{integer}{br}' +\ fr' CARTESIAN COORD\.\s+\( {flt} {flt} {flt}\):\( {flt} {flt} {flt}\) STEP\s+{flt}{br}{br}{br})', sub_parser=TextParser(quantities=[ Quantity( 'start_end', fr'LINE\s+{integer} \( {flt_c} {flt_c} {flt_c}: {flt_c} {flt_c} {flt_c}\) IN TERMS OF PRIMITIVE LATTICE VECTORS{br}', type=np.float64, shape=(2, 3), repeats=False, ), Quantity( 'n_steps', fr'\s+{integer_c} POINTS - ', repeats=False, ), Quantity( 'shrinking_factor', fr'SHRINKING_FACTOR\s*{integer_c}{br}', repeats=False, ), ]), repeats=True, ), Quantity("fermi_energy", fr' ENERGY RANGE \(A\.U\.\)\s*{flt} - \s*{flt} EFERMI\s*{flt_c}', repeats=False), ]), repeats=False, ), # DOS Quantity( 'dos', fr' RESTART WITH NEW K POINTS NET{br}' +\ fr'([\s\S]+?' +\ fr' TOTAL AND PROJECTED DENSITY OF STATES - FOURIER LEGENDRE METHOD{br}' +\ fr'[\s\S]+?)' +\ fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT DOSS TELAPSE', sub_parser=TextParser(quantities=[ Quantity( 'k_points', fr' \*\*\* K POINTS COORDINATES (OBLIQUE COORDINATES IN UNITS OF IS = {int}){br}', repeats=False, ), Quantity( 'highest_occupied', fr' TOP OF VALENCE BANDS - BAND\s*{integer}; K\s*{integer}; EIG {flt_c}\s*AU', unit=ureg.hartree, repeats=False, ), Quantity( 'lowest_unoccupied', fr' BOTTOM OF VIRTUAL BANDS - BAND\s*{integer}; K\s*{integer}; EIG\s*{flt_c}\s*AU', unit=ureg.hartree, repeats=False, ), ]), repeats=False, ), Quantity("end_timestamp", fr' EEEEEEEEEE TERMINATION DATE\s+(.*? TIME .*?){br}', str_operation=lambda x: x, repeats=False), # Forces Quantity( 'forces', fr' CARTESIAN FORCES IN HARTREE/BOHR \(ANALYTICAL\){br}' fr' ATOM X Y Z{br}' +\ fr'((?:' + ws + integer + ws + integer + ws + flt + ws + flt + ws + flt + fr'{br})*)', shape=(-1, 5), dtype=str, repeats=False, ), Quantity("end_timestamp", fr' EEEEEEEEEE TERMINATION DATE\s+(.*? TIME .*?){br}', str_operation=lambda x: x, repeats=False), # Filepaths Quantity("f25_filepath1", fr'file fort\.25 saved as ([\s\S]+?){br}', str_operation=lambda x: x, repeats=False), Quantity("f25_filepath2", fr'BAND/MAPS/DOSS data for plotting fort.25 saved as ([\s\S]+?){br}', str_operation=lambda x: x, repeats=False), ] ) return outputparser def parse_f25(self, filepath): """Parses the f25 file containing e.g. the band structure energies." """ f25parser = TextParser( filepath, quantities=[ # Band structure energies Quantity( 'segments', fr'(-\%-0BAND\s*{integer}\s*{integer}\s?{flt}\s?{flt}\s?{flt}{br}' +\ fr'\s*{flt}\s*{flt}{br}' +\ fr'\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}{br}' +\ fr'(?:\s*{flt})+)', sub_parser=TextParser(quantities=[ Quantity( 'first_row', fr'-\%-0BAND\s*{integer_c}\s*{integer_c}\s?{flt_c}\s?{flt_c}\s?{flt_c}{br}', repeats=False, ), Quantity( 'second_row', fr'\s?{flt_c}\s?{flt_c}{br}', repeats=False, ), Quantity( 'energies', fr'\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}{br}' +\ fr'((?:{flt}\s?)+)', str_operation=lambda x: x, repeats=False, ), ]), repeats=True, ), # DOS values Quantity( "dos", fr'(-\%-0DOSS\s*{integer}\s*{integer}\s?{flt}\s?{flt}\s?{flt}{br}' +\ fr'\s*{flt}\s?{flt}{br}' +\ fr'\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}{br}' +\ fr'(?:\s*{flt})+)', sub_parser=TextParser(quantities=[ Quantity( 'first_row', fr'-\%-0DOSS\s*{integer_c}\s*{integer_c}\s?{flt_c}\s?{flt_c}\s?{flt_c}{br}', repeats=False, ), Quantity( 'second_row', fr'\s?{flt_c}\s?{flt_c}{br}', repeats=False, ), Quantity( 'values', fr'\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}{br}' +\ fr'((?:\s*{flt})+)', str_operation=lambda x: x, repeats=False, ), ]), repeats=False, ), ] ) return f25parser def parse(self, filepath, archive, logger): # Read files out = self.parse_output(filepath) wrkdir, _ = os.path.split(filepath) f25_filepath1 = out["f25_filepath1"] f25_filepath2 = out["f25_filepath2"] f25_filepath_original = f25_filepath1 if f25_filepath1 else f25_filepath2 f25 = None if f25_filepath_original is not None: _, f25_filename = os.path.split(f25_filepath_original) f25_filepath = os.path.join(wrkdir, f25_filename) if os.path.exists(f25_filepath): f25 = self.parse_f25(f25_filepath) # Run run = archive.m_create(section_run) run.program_name = 'Crystal' run.program_version = out["program_version"] run.program_basis_set_type = 'gaussians' run.x_crystal_datetime = out["datetime"] run.x_crystal_hostname = out["hostname"] run.x_crystal_user = out["user"] run.x_crystal_os = out["os"] run.x_crystal_input_path = out["input_path"] run.x_crystal_output_path = out["output_path"] run.x_crystal_tmpdir = out["tmpdir"] run.x_crystal_executable_path = out["executable_path"] distribution = out["distribution"] if distribution is not None: dist, minor = distribution.split(" : ", 1) run.x_crystal_distribution = dist run.x_crystal_version_minor = minor title = out["title"] if title is not None: run.x_crystal_run_title = title.strip() run.time_run_date_start = to_unix_time(out["start_timestamp"]) run.time_run_date_end = to_unix_time(out["end_timestamp"]) # System. There are several alternative sources for this information # depending on the run type. system = run.m_create(section_system) material_type = out["material_type"] system_edited = out["system_edited"] labels_positions = out["labels_positions"] lattice_vectors_restart = out["lattice_vectors_restart"] pbc = None if material_type == "MOLECULAR CALCULATION" else np.array([True, True, True]) # By default the system is read from the configuration at the beginning # of the file: it may come from restart or clean start pos_type = { "MOLECULAR CALCULATION": "cartesian", "SLAB CALCULATION": "slab", None: "scaled", }.get(material_type) if labels_positions is not None: atomic_numbers = labels_positions[:, 2] atom_labels = labels_positions[:, 3] atom_pos = labels_positions[:, 4:7] lattice = out["lattice_parameters"] elif lattice_vectors_restart is not None: labels_positions = out["labels_positions_restart"] atomic_numbers = labels_positions[:, 1] atom_labels = labels_positions[:, 2] atom_pos = labels_positions[:, 4:7] lattice = lattice_vectors_restart pos_type = "cartesian" # If any geometry edits (supercells, substitutions, dispplacements, # deformations, nanotube construction, etc.) are done on top of the # original system, they override the original system. if system_edited is not None: if system_edited["labels_positions_nanotube"] is not None: pos_type = "nanotube" labels_positions = system_edited["labels_positions_nanotube"] else: labels_positions = system_edited["labels_positions"] atomic_numbers = labels_positions[:, 2] atom_labels = labels_positions[:, 3] atom_pos = labels_positions[:, 4:7] lattice = system_edited["lattice_parameters"] cart_pos, atomic_numbers, atom_labels, lattice_vectors = to_system( atomic_numbers, atom_labels, atom_pos, lattice, pos_type=pos_type, ) system.lattice_vectors = lattice_vectors system.configuration_periodic_dimensions = pbc system.atom_positions = cart_pos system.atom_species = atomic_numbers system.atom_labels = atom_labels dimensionality = out["dimensionality"] system.x_crystal_dimensionality = dimensionality crystal_family = out["crystal_family"] system.x_crystal_family = crystal_family crystal_class = out["crystal_class"] system.x_crystal_class = crystal_class n_symmops = out["n_symmops"] system.x_crystal_n_symmops = n_symmops space_group = out["space_group"] system.x_crystal_space_group = space_group # Method method = run.m_create(section_method) method.electronic_structure_method = 'DFT' method.scf_max_iteration = out["scf_max_iteration"] method.scf_threshold_energy_change = out["scf_threshold_energy_change"] dftd3 = out["dftd3"] if dftd3: if dftd3["version"] == "VERSION 2": method.van_der_Waals_method = "G06" else: method.van_der_Waals_method = "DFT-D3" if out["grimme"]: method.van_der_Waals_method = "G06" # Try to primarily read the methodology from input dft = out["dft"] if dft: exchange = dft["exchange"] correlation = dft["correlation"] exchange_correlation = dft["exchange_correlation"] functionals = to_libxc(exchange, correlation, exchange_correlation) if functionals: for xc in functionals: method.m_add_sub_section(section_method.section_XC_functionals, xc) method.XC_functional = to_libxc_name(functionals) # If methodology not reported in input, try to read from output if dft is None or not functionals: hamiltonian_type = out["hamiltonian_type"] if hamiltonian_type == "HARTREE-FOCK HAMILTONIAN": xc = section_XC_functionals() xc.XC_functional_name = "HF_X" xc.XC_functional_weight = 1.0 method.m_add_sub_section(section_method.section_XC_functionals, xc) method.XC_functional = to_libxc_name([xc]) elif hamiltonian_type == "KOHN-SHAM HAMILTONIAN": xc_output = out["xc_out"] hybrid = out["hybrid_out"] functionals = to_libxc_out(xc_output, hybrid) if functionals: for xc in functionals: method.m_add_sub_section(section_method.section_XC_functionals, xc) method.XC_functional = to_libxc_name(functionals) method.x_crystal_fock_ks_matrix_mixing = out["fock_ks_matrix_mixing"] method.x_crystal_coulomb_bipolar_buffer = out["coulomb_bipolar_buffer"] method.x_crystal_exchange_bipolar_buffer = out["exchange_bipolar_buffer"] method.x_crystal_toldee = out["toldee"] method.x_crystal_n_atoms = out["n_atoms_per_cell"] method.x_crystal_n_shells = out["n_shells"] method.x_crystal_n_orbitals = out["n_ao"] method.x_crystal_n_electrons = out["n_electrons"] method.x_crystal_n_core_electrons = out["n_core_electrons"] method.x_crystal_n_symmops = out["n_symmops"] method.x_crystal_tol_coulomb_overlap = out["tol_coulomb_overlap"] method.x_crystal_tol_coulomb_penetration = out["tol_coulomb_penetration"] method.x_crystal_tol_exchange_overlap = out["tol_exchange_overlap"] method.x_crystal_tol_pseudo_overlap_f = out["tol_pseudo_overlap_f"] method.x_crystal_tol_pseudo_overlap_p = out["tol_pseudo_overlap_p"] method.x_crystal_pole_order = out["pole_order"] method.x_crystal_type_of_calculation = out["calculation_type"] cappa = out["cappa"] if cappa is not None: method.x_crystal_is1 = cappa[0] method.x_crystal_is2 = cappa[1] method.x_crystal_is3 = cappa[2] method.x_crystal_k_pts_monk_net = cappa[3] method.x_crystal_symmops_k = cappa[4] method.x_crystal_symmops_g = cappa[5] method.x_crystal_weight_f = out["weight_f"] method.x_crystal_shrink = out["shrink"] method.x_crystal_shrink_gilat = out["shrink_gilat"] method.x_crystal_convergence_deltap = out["convergenge_deltap"] method.x_crystal_n_k_points_ibz = out["n_k_points_ibz"] method.x_crystal_n_k_points_gilat = out["n_k_points_gilat"] basis_set = out["basis_set"] covered_species = set() if basis_set is not None: for bs in basis_set["basis_sets"]: atomic_number = label_to_atomic_number(bs["species"][1]) shells = bs["shells"] if atomic_number != covered_species and shells is not None: section_basis_set = section_basis_set_atom_centered() section_basis_set.basis_set_atom_number = atomic_number run.m_add_sub_section(section_run.section_basis_set_atom_centered, section_basis_set) covered_species.add(atomic_number) for shell in shells: section_shell = x_crystal_section_shell() section_shell.x_crystal_shell_range = str(shell["shell_range"]) section_shell.x_crystal_shell_type = shell["shell_type"] section_shell.x_crystal_shell_coefficients = np.array(shell["shell_coefficients"]) section_basis_set.m_add_sub_section(section_basis_set_atom_centered.x_crystal_section_shell, section_shell) # SCC scc = run.m_create(section_single_configuration_calculation) scf_block = out["scf_block"] if scf_block is not None: number_of_scf_iterations = out["number_of_scf_iterations"] scc.single_configuration_calculation_converged = number_of_scf_iterations is not None for scf in scf_block["scf_iterations"]: energies = scf["energies"] section_scf = section_scf_iteration() section_scf.energy_total_scf_iteration = energies[0] section_scf.energy_change_scf_iteration = energies[1] energy_kinetic = scf["energy_kinetic"] section_scf.electronic_kinetic_energy_scf_iteration = energy_kinetic energy_ee = scf["energy_ee"] section_scf.x_crystal_scf_energy_ee = energy_ee energy_en_ne = scf["energy_en_ne"] section_scf.x_crystal_scf_energy_en_ne = energy_en_ne energy_nn = scf["energy_nn"] section_scf.x_crystal_scf_energy_nn = energy_nn virial_coefficient = scf["virial_coefficient"] section_scf.x_crystal_scf_virial_coefficient = virial_coefficient scc.m_add_sub_section(section_single_configuration_calculation.section_scf_iteration, section_scf) scc.number_of_scf_iterations = len(scc.section_scf_iteration) if out["energy_total"] is not None: # If the final energy is found, replace the final SCF step energy # with it, as it is more accurate. if scc.section_scf_iteration: scc.section_scf_iteration[-1].energy_total_scf_iteration = out["energy_total"] scc.energy_total = out["energy_total"] forces = out["forces"] if forces is not None: scc.atom_forces = forces[:, 2:].astype(float) * ureg.hartree / ureg.bohr scc.single_configuration_calculation_to_system_ref = system scc.single_configuration_to_calculation_method_ref = method # Band structure band_structure = out["band_structure"] if band_structure is not None: section_band = section_k_band() section_band.band_structure_kind = "electronic" section_band.reciprocal_cell = atomutils.reciprocal_cell(system.lattice_vectors.magnitude) * 1 / ureg.meter segments = band_structure["segments"] k_points = to_k_points(segments) for i_seg, segment in enumerate(segments): section_segment = section_k_band_segment() start_end = segment["start_end"] section_segment.band_k_points = k_points[i_seg] section_segment.band_segm_start_end = start_end section_segment.number_of_k_points_per_segment = k_points[i_seg].shape[0] section_band.m_add_sub_section(section_k_band.section_k_band_segment, section_segment) # Read energies from the f25-file. If the file is not found, the # band structure is not written in the archive. The meaning of the # values is given in an appendix of the Crystal manual. if f25 is not None: segments = f25["segments"] prev_energy = None prev_k_point = None first_row = segments[0]["first_row"] fermi_energy = first_row[4] scc.energy_reference_fermi = np.array([fermi_energy]) * ureg.hartree for i_seg, segment in enumerate(segments): first_row = segment["first_row"] cols = int(first_row[0]) rows = int(first_row[1]) energies = segment["energies"] energies = to_array(cols, rows, energies) # If a segment starts from the previous point, then # re-report the energy. This way segments get the same # treatment in the metainfo whether they are continuous # or not. start_k_point = section_band.section_k_band_segment[i_seg].band_k_points[0] end_k_point = section_band.section_k_band_segment[i_seg].band_k_points[-1] if prev_k_point is not None and np.allclose(prev_k_point, start_k_point): energies = np.concatenate(([prev_energy], energies), axis=0) section_band.section_k_band_segment[i_seg].band_energies = energies[None, :] * ureg.hartree prev_energy = energies[-1] prev_k_point = end_k_point scc.m_add_sub_section(section_single_configuration_calculation.section_k_band, section_band) # DOS dos = out["dos"] if dos is not None: # Read values and energies from the f25-file. If the file is not # found, the dos is not written in the archive. The meaning of the # values is given in an appendix of the Crystal manual. if f25 is not None: dos_f25 = f25["dos"] if dos_f25 is not None: scc_dos = section_single_configuration_calculation() scc_dos.single_configuration_calculation_to_system_ref = system scc_dos.single_configuration_to_calculation_method_ref = method sec_dos = section_dos() first_row = dos_f25["first_row"] cols = int(first_row[0]) rows = int(first_row[1]) de = first_row[3] fermi_energy = first_row[4] scc_dos.energy_reference_fermi = np.array([fermi_energy]) * ureg.hartree second_row = dos_f25["second_row"] start_energy = second_row[1] sec_dos.dos_energies = (start_energy + np.arange(rows) * de) * ureg.hartree dos_values = dos_f25["values"] dos_values = to_array(cols, rows, dos_values) sec_dos.dos_values = dos_values.T sec_dos.dos_kind = "electronical" sec_dos.number_of_dos_values = sec_dos.dos_values.shape[1] scc_dos.m_add_sub_section(section_single_configuration_calculation.section_dos, sec_dos) run.m_add_sub_section(section_run.section_single_configuration_calculation, scc_dos) # Sampling geo_opt = out["geo_opt"] if geo_opt is not None: steps = geo_opt["geo_opt_step"] if steps is not None: sampling_method = section_sampling_method() sampling_method.sampling_method = "geometry_optimization" sampling_method.geometry_optimization_energy_change = out["energy_change"] sampling_method.geometry_optimization_geometry_change = out["geometry_change"] run.m_add_sub_section(section_run.section_sampling_method, sampling_method) fs = section_frame_sequence() run.m_add_sub_section(section_run.section_frame_sequence, fs) # First step is special: it refers to the initial system which # was printed before entering the geometry optimization loop. i_system = system i_energy = steps[0]["energy"] scc.energy_total = i_energy frames = [] for step in steps[1:]: i_scc = section_single_configuration_calculation() i_system = section_system() i_energy = step["energy"] if step["labels_positions_nanotube"] is not None: i_labels_positions = step["labels_positions_nanotube"] else: i_labels_positions = step["labels_positions"] i_atomic_numbers = i_labels_positions[:, 2] i_atom_labels = i_labels_positions[:, 3] i_atom_pos = i_labels_positions[:, 4:7] i_lattice_parameters = step["lattice_parameters"] i_cart_pos, i_atomic_numbers, i_atom_labels, i_lattice_vectors = to_system( i_atomic_numbers, i_atom_labels, i_atom_pos, i_lattice_parameters, pos_type, ) i_system.atom_species = i_atomic_numbers i_system.atom_labels = i_atom_labels i_system.atom_positions = i_cart_pos i_system.lattice_vectors = i_lattice_vectors i_system.configuration_periodic_dimensions = pbc i_scc.energy_total = i_energy i_scc.single_configuration_calculation_to_system_ref = i_system i_scc.single_configuration_to_calculation_method_ref = method run.m_add_sub_section(section_run.section_system, i_system) run.m_add_sub_section(section_run.section_single_configuration_calculation, i_scc) frames.append(i_scc) fs.frame_sequence_local_frames_ref = frames fs.number_of_frames_in_sequence = len(fs.frame_sequence_local_frames_ref) fs.frame_sequence_to_sampling_ref = sampling_method fs.geometry_optimization_converged = geo_opt["converged"] == "CONVERGED" # Remove ghost atom information. The metainfo does not provide a very # good way to deal with them currently so they are simply removed. remove_ghosts(run) def to_k_points(segments): """Converts the given start and end points, the shrinking factor and the number of steps into a list of concrete sampling points in k-space. The shrinking factor tells to how many portions one reciprocal basis vector is divided into. This needs to be done manually as sometimes the k-points are not reported in the output. """ all_k_points = [] prev_point = None for segment in segments: start = segment["start_end"][0, :] end = segment["start_end"][1, :] shrinking_factor = segment["shrinking_factor"] n_steps = segment["n_steps"] # Segments that do not start from a previous segment get special # treatment. end_idx = n_steps + 1 if prev_point is None or not np.allclose(prev_point, start): end_idx = n_steps n_steps = n_steps - 1 delta = end - start start_step = (shrinking_factor * start).astype(np.int) step_size = (shrinking_factor * delta / n_steps).astype(np.int) steps = (start_step + step_size * np.arange(0, end_idx)[:, None]) k_points = steps / shrinking_factor all_k_points.append(k_points) prev_point = end return all_k_points def to_system(atomic_numbers, labels, positions, lattice, pos_type="scaled", wrap=False): """Converts a Crystal-specific structure format into cartesian positions and lattice vectors (if present). The conversion depends on the material type. """ atomic_numbers = std_atomic_number(atomic_numbers.astype(np.int)) atom_labels = std_label(labels) positions = positions.astype(np.float64) # Get the lattice vectors if lattice is not None: if lattice.shape == (6,): lattice_vectors = atomutils.cellpar_to_cell(lattice, degrees=True) elif lattice.shape == (3, 3): lattice_vectors = lattice else: lattice_vectors = None # Convert positions based on the given type if pos_type == "cartesian": if lattice_vectors is not None and wrap: cart_pos = atomutils.wrap_positions(positions, lattice_vectors) else: cart_pos = positions elif pos_type == "slab": n_atoms = atomic_numbers.shape[0] scaled_pos = np.zeros((n_atoms, 3), dtype=np.float64) scaled_pos[:, 0:2] = positions[:, 0:2] if wrap: wrapped_pos = atomutils.wrap_positions(scaled_pos) else: wrapped_pos = scaled_pos cart_pos = atomutils.to_cartesian(wrapped_pos, lattice_vectors) cart_pos[:, 2:3] = positions[:, 2:3] elif pos_type == "nanotube": n_atoms = atomic_numbers.shape[0] scaled_pos = np.zeros((n_atoms, 3), dtype=np.float64) scaled_pos[:, 0:1] = positions[:, 0:1] if wrap: wrapped_pos = atomutils.wrap_positions(scaled_pos) else: wrapped_pos = scaled_pos cart_pos = atomutils.to_cartesian(wrapped_pos, lattice_vectors) cart_pos[:, 1:3] = positions[:, 1:3] elif pos_type == "scaled": scaled_pos = atomutils.wrap_positions(positions) if wrap else positions cart_pos = atomutils.to_cartesian(scaled_pos, lattice_vectors) if lattice_vectors is not None: lattice_vectors *= ureg.angstrom return cart_pos * ureg.angstrom, atomic_numbers, atom_labels, lattice_vectors def to_float(value): """Transforms the Crystal-specific float notation into a floating point number. """ base, exponent = value.split("**") base = int(base) exponent = int("".join(exponent.split())) return pow(base, exponent) def to_array(cols, rows, values): """Transforms the Crystal-specific f25 array syntax into a numpy array. """ values.replace("\n", "") values = textwrap.wrap(values, 12) values = np.array(values, dtype=np.float64) values = values.reshape((rows, cols)) return values def std_atomic_number(value): """Given an atomic numer in the NAT form (conventional atomic number, where the real atomic number is the remainder when divided by 100), return the actual atomic number. """ return value % 100 def remove_ghosts(run): """Removes ghost atoms from the given section_system. In Crystal ghost atoms are indicated by the atomic number 0. """ for system in run.section_system: ghosts_mask = system.atom_species == 0 if np.any(ghosts_mask): system.atom_species = np.delete(system.atom_species, ghosts_mask) system.atom_labels = np.delete(system.atom_labels, ghosts_mask) system.atom_positions = np.delete(system.atom_positions.magnitude, ghosts_mask, axis=0) def label_to_atomic_number(value): """Given a Crystal specific uppercase species name, returns the corresponding atomic number. """ symbol = value.lower().capitalize() atomic_number = ase.data.atomic_numbers[symbol] return atomic_number def atomic_numbers_to_labels(value): """Given a NAT atomic number, returns the corresponding label. """ atomic_numbers = std_atomic_number(value) labels = np.array(ase.data.chemical_symbols)[atomic_numbers] return labels def std_label(value): """Given Crystal specific uppercase species names, returns the capitalized versions. """ labels = [] for label in value: labels.append(label.lower().capitalize()) return labels def to_unix_time(value): """Transforms the Crystal-specific float notation into a floating point number. """ if value is None: return None value = value.strip() date_time_obj = datetime.datetime.strptime(value, '%d %m %Y TIME %H:%M:%S.%f') return date_time_obj.timestamp() def to_libxc(exchange, correlation, exchange_correlation): """Transforms the Crystal-specific XC naming into a list of section_XC_functionals. """ xc_list = [] # Handle the XC's defined with single shortcut if exchange_correlation: exchange_correlation = exchange_correlation.upper() shortcut_map = { "PBEXC": ["GGA_C_PBE", "GGA_X_PBE"], "PBE0": ["HYB_GGA_XC_PBEH"], "B3LYP": ["HYB_GGA_XC_B3LYP"], "HSE06": ["HYB_GGA_XC_HSE06"], "M06": ["HYB_MGGA_XC_M06"], "M05-2X": ["HYB_MGGA_XC_M05_2X"], "LC-WPBE": ["HYB_GGA_XC_LRC_WPBE"], } norm_xc = shortcut_map.get(exchange_correlation) if norm_xc: xc_list.extend(norm_xc) # Handle the exchange part if exchange: exchange = exchange.upper() exchange_map = { "PBE": "GGA_X_PBE", "PBESOL": "GGA_X_PBE_SOL", "BECKE": "GGA_X_B88", "LDA": "LDA_X", "PWGGA": "GGA_X_PW91", } norm_x = exchange_map.get(exchange) if norm_x: xc_list.append(norm_x) # Handle the correlation part if correlation: correlation = correlation.upper() correlation_map = { "PBE": "GGA_C_PBE", "PBESOL": "GGA_C_PBE_SOL", "PZ": "LDA_C_PZ", "WFN": "LDA_C_VWN", "PWGGA": "GGA_C_PW91", } norm_c = correlation_map.get(correlation) if norm_c: xc_list.append(norm_c) # Go throught the XC list and add the sections and gather a summary functionals = [] for xc in xc_list: section = section_XC_functionals() weight = 1.0 section.XC_functional_name = xc section.XC_functional_weight = weight functionals.append(section) return functionals def to_libxc_out(xc, hybridization): """Transforms the Crystal-specific XC naming in the output into a list of section_XC_functionals. """ xc_list = [] exchange, correlation = xc[1:-1].split(")[") # Handle the exchange part if exchange: exchange = exchange.upper() exchange_map = { "PERDEW-BURKE-ERNZERHOF": "GGA_X_PBE", "PERDEW-WANG GGA": "GGA_X_PW91", "WU-COHEN GGA": "GGA_X_WC", } norm_x = exchange_map.get(exchange) if norm_x: xc_list.append(norm_x) # Handle the correlation part if correlation: correlation = correlation.upper() correlation_map = { "PERDEW-BURKE-ERNZERHOF": "GGA_C_PBE", "PERDEW-WANG GGA": "GGA_C_PW91", "LEE-YANG-PARR": "GGA_C_LYP", } norm_c = correlation_map.get(correlation) if norm_c: xc_list.append(norm_c) # Shortcuts if norm_x == "GGA_X_PBE" and norm_c == "GGA_C_PBE" and hybridization == 25.00: section = section_XC_functionals() section.XC_functional_name = "HYB_GGA_XC_PBEH" section.XC_functional_weight = 1 return [section] # Go throught the XC list and add the sections and gather a summary functionals = [] if hybridization: section = section_XC_functionals() section.XC_functional_name = "HF_X" section.XC_functional_weight = float(hybridization) / 100 functionals.append(section) for xc in xc_list: section = section_XC_functionals() weight = 1.0 if hybridization and "_X_" in xc: weight = 1.0 - float(hybridization) / 100 section.XC_functional_name = xc section.XC_functional_weight = weight functionals.append(section) return functionals def to_libxc_name(functionals): """Given a list of section_XC_functionals, returns the single string that represents them all. """ return "+".join("{}*{}".format(x.XC_functional_weight, x.XC_functional_name) for x in sorted(functionals, key=lambda x: x.XC_functional_name))
64,626
20,538
import traceback from functools import wraps from collections import namedtuple from .constants import HttpStatusCodes from . import lambda_errors from .lambda_proxy_response import LambdaProxyResponse KNOWN_ERRORS = [lambda_errors.AlreadyExistsError, lambda_errors.NotFoundError, lambda_errors.ValidationError, lambda_errors.MissingParameterError, lambda_errors.InvalidParameterError, lambda_errors.InvalidFunctionRequestError, lambda_errors.InvalidUserError, lambda_errors.RelatedRecordsExistError, lambda_errors.GeneralError] FunctionResponse = namedtuple('FunctionResponse', ['status_code', 'payload', 'url']) def handle_exception(e): if type(e) in KNOWN_ERRORS: return e.status_code, e.message, repr(e) else: # TODO: check for SNS notification environment vars and dispatch accordingly # TODO: include traceback information in the notification # traceback_info = traceback.format_exc().splitlines() return HttpStatusCodes.INTERNAL_SERVER_ERROR, f"An unhandled exception was raised: {e}", repr(e) def lambda_proxy_response_wrapper(): """ A service wrapper that handles error handling and correct formatting of our Lambda function responses. Lambda function handlers can add this as a wrapper so they only need to make calls to the relevant methods and return a FunctionResponse object. This wrapper will handle the correct response formatting. Status code for errors are contained within the assigned error class itself. Unhandled exceptions will always raise a 500 Internal Server Error :return: """ def wrapper(func): @wraps(func) def decorated_view(*args, **kwargs): resp = LambdaProxyResponse() try: resp.status, resp.payload, resp.location = func(*args, **kwargs) except Exception as e: resp.status, resp.error, resp.error_type = handle_exception(e) return resp.make_response() return decorated_view return wrapper
2,164
538
import os import numpy as np from PIL import Image import cv2 class ImageMaskUtil: # RGB Color Maps for labelling label_color_map = { 'background': (0, 0, 0), # background 'apple': (224, 0, 224), # apple } def __init__(self, img_dir=None, mask_dir=None, transforms=None): print("***", img_dir, '\n', mask_dir) if not img_dir or not os.path.exists(img_dir): raise FileNotFoundError("Image path does not exist") if not mask_dir or not os.path.exists(mask_dir): raise FileNotFoundError("Mask path does not exist") self.img_dir = img_dir self.mask_dir = mask_dir self.transforms = transforms # Load all image and mask files, sorting them to ensure they are aligned file_types = ("png", "jpg", "jpeg") self.imgs = list(sorted(os.listdir(img_dir))) self.imgs = [i for i in self.imgs if i.endswith(file_types)] self.masks = list(sorted(os.listdir(mask_dir))) self.masks = [i for i in self.masks if i.endswith(file_types)] print("***", self.imgs, '\n', self.masks) if len(self.imgs) != len(self.masks): raise ValueError("Number of images must be equal to number of masks") @staticmethod def overlay_mask_on_image(image: np.array, mask: np.array, alpha=1.0, beta=0.9, gamma=0.0) -> np.array: """ alpha = 1 # transparency for the original image beta = 0.9 # transparency for the segmentation map gamma = 0 # scalar added to each sum """ # mask = cv2.cvtColor(mask, cv2.COLOR_RGB2BGR) # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) return cv2.addWeighted(image, alpha, mask, beta, gamma) def __getitem__(self, idx): pass def get_img_mask_overlay(self, idx, n_channels=3, cls="apple"): if idx >= self.__len__(): raise IndexError(f"requested image index {idx} must be less than {self.__len__()}") # Load image and mask img_path = os.path.join(self.img_dir, self.imgs[idx]) mask_path = os.path.join(self.mask_dir, self.masks[idx]) img: Image.Image = Image.open(img_path).convert("RGB") img_res = np.array(img) mask: Image.Image = Image.open(mask_path) # Convert the PIL image to np array mask_np = np.array(mask) # Convert non-zero values to constant 1, retains 0 (background) mask_np = np.minimum(1, mask_np) mask_col = [] for i in range(0, n_channels): res = np.multiply(mask_np, np.full_like(mask_np, self.label_color_map[cls][i], dtype=np.uint8)) mask_col.append(res) mask_res = np.stack(mask_col, axis=2) return Image.fromarray(ImageMaskUtil.overlay_mask_on_image(img_res, mask_res)) def __len__(self): return len(self.imgs) def get_img_name(self, idx): return self.imgs[idx] def show_image(self, idx): if idx >= self.__len__(): raise IndexError(f"requested image index {idx} must be less than {self.__len__()}") img_path = os.path.join(self.img_dir, self.imgs[idx]) img: Image.Image = Image.open(img_path).convert("RGB") img.show() def show_mask(self, idx): if idx >= self.__len__(): raise IndexError(f"requested mask index {idx} must be less than {self.__len__()}") mask_path = os.path.join(self.mask_dir, self.masks[idx]) mask: Image.Image = Image.open(mask_path) mask.show()
3,540
1,193
def catalan_direct(n): return factorial(2 * n) / (factorial(n + 1) * factorial(n)) def catalan_binomial(n): if n == 0: return 1 return binomial_coefficient(2 * n, n) - binomial_coefficient(2 * n, n - 1) def catalan_sum(n): return sum([binomial_coefficient(n, i) ** 2 for i in xrange(n + 1)]) / (n + 1) def binomial_coefficient(n, k): return factorial(n) / (factorial(k) * factorial(n - k)) def factorial(n): return n * factorial(n-1) if n > 1 else 1 if __name__ == '__main__': # Make sure all implementations give the same results print(all([catalan_direct(n) == catalan_binomial(n) == catalan_sum(n) for n in xrange(100)]))
685
267
from az.cli import az import os, configparser def checkVars(config): required_vars = { 'AADclientID': False, 'AADtenantID': False, 'AADsecret': False, 'AADGroupName': False, 'VoltTenantApiToken': False, 'VoltTenantTokenName': False, 'VoltTenantName': False, 'Region': False, 'ResourceGroupName': False, 'StorageName': False, 'KeyVaultName': False, 'FunctionAppName': False, 'TeamsWebhookUrl': False } for s in config.sections(): for v in required_vars: required_vars[v] = config.has_option(s, v) if required_vars[v] == False: raise ValueError("A value must be provided for: {0} in section: {1}".format(v, s)) def kvSecret(vault: str, name: str, value: str): return azCommand("keyvault secret set --vault-name {0} --name {1} --value {2}".format(vault, name, value)) def appSetting(name: str, vault: str, function: str, resourceGroup: str): settingURI = azCommand("keyvault secret show --vault-name {0} --name {1} --query id".format(vault, name)) return azCommand('functionapp config appsettings set --name {0} --resource-group {1} --settings "{2}=@Microsoft.KeyVault(SecretUri={3})"'.format(function, resourceGroup, name, settingURI)) def azCommand(command: str): res = az(command) if res[0]: raise RuntimeError(res[2]) return res[1] def azCmdNoError(command: str): res = az(command) #NOTE:: Intentionally returning the entire dict response (in case we need to do something else with it) return res def deployBase(section): secrets = { "VoltTenantName" : section['VoltTenantName'], "VoltTenantApiToken" : section['VoltTenantApiToken'], "VoltTenantTokenName" : section['VoltTenantTokenName'], "AADclientID" : section['AADclientID'], "AADtenantID" : section['AADtenantID'], "AADsecret" : section['AADsecret'], "AADGroupName" : section['AADGroupName'], "TeamsWebhookUrl" : section['TeamsWebhookUrl'] } createRG = "group create --name {0} --location {1}" \ .format(section['ResourceGroupName'], section['Region']) azCommand(createRG) createSA = "storage account create --name {0} --location {1} --resource-group {2} --sku Standard_LRS" \ .format(section['StorageName'], section['Region'], section['ResourceGroupName']) azCommand(createSA) #KeyVaults are, evidently, **not** idempotent in the Azure CLI. We need treat them differently. createKV = "keyvault create --name {0} --resource-group {1} --location {2}" \ .format(section['KeyVaultName'], section['ResourceGroupName'], section['Region']) try: azCommand(createKV) except: print("KeyVault likely already exists. Skipping creation.") pass for s in secrets: kvSecret(section['KeyVaultName'], s, secrets[s]) createApp = "functionapp create --name {0} --storage-account {1} --consumption-plan-location {2} --resource-group {3} --os-type linux --functions-version 3 --runtime python" \ .format(section['FunctionAppName'], section['StorageName'], section['Region'], section['ResourceGroupName']) azCommand(createApp) appId = "functionapp identity assign --resource-group {0} --name {1}" \ .format(section['ResourceGroupName'], section['FunctionAppName']) azCommand(appId) principalId = azCommand("functionapp identity show --resource-group {0} --name {1} --query principalId".format(section['ResourceGroupName'], section['FunctionAppName'])) kvPolicy = "keyvault set-policy --name {0} --resource-group {1} --object-id {2} --secret-permission get list" \ .format(section['KeyVaultName'], section['ResourceGroupName'], principalId) azCommand(kvPolicy) for a in secrets: appSetting(a, section['KeyVaultName'], section['FunctionAppName'], section['ResourceGroupName']) def main(): config = configparser.ConfigParser() config.read(os.path.join(os.path.dirname(__file__), 'funcConfig.ini')) checkVars(config) for section in config.sections(): deployBase(config[section]) print("Deployment for {0} complete.".format(section)) print("All Deployments Complete.") if __name__ == "__main__": main()
4,340
1,318
from pacotes import ex109 print('\033[36;40mExercício Python #109​ - Formatando Moedas em Python\033[m\n') v = float(input('Digite o valor: ')) escolhaMoeda = False while True: escolha = str(input('Ele devera ser formatado como moeda? [s/n]: ')).strip().lower()[0] if escolha == 's' or escolha == 'n': if escolha == 's': escolhaMoeda = True break else: print('\nVALOR INVÁLIDO. TENTE NOVAMENTE!\n') print(f'\nA metade de {v} é {ex109.metade(v, escolhaMoeda)}') print(f'O dobro de {v} é {ex109.dobro(v, escolhaMoeda)}') print(f'Aumentando 10%, temos {ex109.aumentar(v, 10, escolhaMoeda)}') print(f'Reduzindo 13%, temos {ex109.diminuir(v, 13, escolhaMoeda)}')
710
311
from django import template from django.utils.translation import ugettext as _ from cv.views import education_view_context, experience_view_context, interests_view_context, skills_view_context from cv.views import skill_view_context, school_view_context, hobby_view_context, job_view_context from cv.views import date_view_context from root.utils import divide_index register = template.Library() @register.inclusion_tag('components/cv/education.html') def education(): return education_view_context() @register.inclusion_tag('components/cv/experience.html') def experience(): return experience_view_context() @register.inclusion_tag('components/cv/interests.html') def interests(): return interests_view_context() @register.inclusion_tag('components/cv/skills.html') def skills(): return skills_view_context() @register.inclusion_tag('components/cv/single/school.html') def school(item, wow=2): return school_view_context(item, divide_index(wow, 10)) @register.inclusion_tag('components/cv/single/job.html') def job(item, wow=2): return job_view_context(item, divide_index(wow, 10)) @register.inclusion_tag('components/cv/single/hobby.html') def hobby(item): return hobby_view_context(item) @register.inclusion_tag('components/cv/single/skill.html') def skill(item): return skill_view_context(item) @register.inclusion_tag('components/cv/utils/date.html') def date(start, end): return date_view_context(start, end)
1,473
477
screeing_type = input() rows = int(input()) colums = int(input()) if screeing_type == 'Premiere': print(f'{rows * colums * 12:.2f} leva') elif screeing_type == 'Normal': print(f'{rows * colums * 7.5:.2f} leva') elif screeing_type == 'Discount': print(f'{rows * colums * 5:.2f} leva')
296
130
#! /usr/bin/env python3 # -*- coding: utf-8 -*- """ Contains list of APIs for Development model """ __author__ = 'Ari Saha (arisaha@icloud.com), Mingyang Liu(liux3941@umn.edu)' # Index INDEX = '/index' # Environment params NUM_UES = '/num_ues' NUM_APS = '/num_aps' AP_LIST = '/ap_list' AP_INFO = '/ap_info/' BR_LIST = '/br_list' BR_INFO = '/br_info/' UE_LIST = '/ue_list' UE_INFO = '/ue_info/' RESET_NETWORK = '/reset_network' RESET_NETWORK_AFTER_MOVE = '/reset_network_after_move' NEIGHBORING_APS = '/neighboring_aps/' UE_THROUGHPUT = '/ue_throughput/' UE_SLA = '/ue_sla/' UE_SIGNAL_POWER = '/ue_signal_power/' AP_SLAS = '/ap_slas/' HANDOFF = '/handoff/'
662
298
#!/usr/bin/env python3 import sys import time import struct import socket messages = [] messages.append('announce route 66.185.112.0/24 next-hop self as-path [174,2914,42]') messages.append('announce route 144.41.0.0/16 next-hop self as-path [174,553]') messages.append('announce route 193.16.4.0/22 next-hop self as-path [174,1299,680]') messages.append('announce route 23.162.96.0/24 next-hop self as-path [174]') messages.append('announce route 1.36.160.0/19 next-hop self as-path [174]') messages.append('announce route 131.196.192.0/24 next-hop self as-path [174 16735 16735 28158 1]') messages.append('announce route 194.20.8.0/21 next-hop self as-path [8968 3313]') messages.append('announce route 151.17.0.0/16 next-hop self as-path [8968 1267]') messages.append('announce route 211.13.0.0/17 next-hop self as-path [1299 2518 2518]') for message in messages: sys.stdout.write(message + '\n') sys.stdout.flush() while True: time.sleep(1)
961
463
from optparse import OptionParser import os import tempfile from git import Repo from python_terraform import * import logging import json import re import copy logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) tf = Terraform() default_git_user=os.getenv('GIT_USER') default_git_token=os.getenv('GIT_TOKEN') default_git_repo=os.getenv('GIT_REPO') default_git_domain=os.getenv('GIT_DOMAIN') def tf_plan(arguments, options): path=arguments[0] return_code, stdout, stderr = tf.plan(path, compact_warnings = options.compact_warnings, destroy = options.destroy, detailed_exitcode = options.detailed_exitcode, lock = options.lock, no_color = options.no_color, lock_timeout = options.lock_timeout, out = options.out, state = options.state, parallelism = options.parallelism, refresh = options.refresh, target = options.target, var_file = options.var_file, var={var.split()[0] : var.split()[1] for var in options.var} if options.var is not None else None ) if stdout is not None: logging.info(stdout) if return_code != 0: raise Exception(stderr) def tf_apply(arguments, options): path=arguments[0] return_code, stdout, stderr = tf.plan(path, auto_approve = options.auto_approve, backup = options.backup, state_out = options.state_out, compact_warnings = options.compact_warnings, lock = options.lock, no_color = options.no_color, lock_timeout = options.lock_timeout, state = options.state, parallelism = options.parallelism, refresh = options.refresh, target = options.target, var_file = options.var_file, var={var.split()[0] : var.split()[1] for var in options.var} ) if stdout is not None: logging.info(stdout) if stderr is not None: raise Exception(stderr) def try_get_target_from_commit(local_repo_path): try: repo = Repo(local_repo_path) last_commit_message = repo.head.commit.message if "[terraspanner]" not in last_commit_message: return None parameters=json.loads(last_commit_message[len("[terraspanner]")+1:len(last_commit_message)-2]) return parameters['target'] except Exception as ex: logging.debug(ex) return None def try_get_var_from_commit(local_repo_path): try: repo = Repo(local_repo_path) last_commit_message = repo.head.commit.message if "[terraspanner]" not in last_commit_message: return None parameters=json.loads(last_commit_message[len("[terraspanner]")+1:len(last_commit_message)-2]) return parameters['var'] except Exception as ex: logging.debug(ex) return None tf_commands = { 'plan': tf_plan, 'apply': tf_apply } def run_tf_command(arguments, options): if options.target is None: options.target = try_get_target_from_commit(options.local_repo_path) if options.var is None: options.var = try_get_var_from_commit(options.local_repo_path) command=arguments[0] command_arguments=arguments[1:len(arguments)] tf_commands[command](command_arguments, options) def validate_trigger_terraform_repo(options): if options.token is None or options.domain is None or options.repo is None: raise Exception('some parameters are missing') def trigger_terraform_repo(_, options): validate_trigger_terraform_repo(options) with tempfile.TemporaryDirectory() as temp_repo_dir: git_url=f'https://{options.token}@{options.domain}/{options.repo}.git' repo = Repo.clone_from(git_url, temp_repo_dir) repo.git.commit('--allow-empty','-m', f'"[terraspanner]{json.dumps({ "target": options.target, "var": options.var })}"') repo.git.push() commands = { 'trigger': trigger_terraform_repo, 'tf': run_tf_command } def main(): parser = OptionParser() #trigger options parser.add_option('-t','--git-token', dest='token', help='git token', metavar='GIT_TOKEN', default=default_git_token) parser.add_option('-d','--git-domain', dest='domain', help='git domain (ex. github.com)', metavar='GIT_DOMAIN', default=default_git_domain) parser.add_option('-r','--git-repo', dest='repo', help='git repo (ex. )', metavar='GIT_DOMAIN', default=default_git_domain) #repo finder parser.add_option('-l','--local-repo-path', dest='local_repo_path', help='local repository path (defaults to local folder)', metavar='GIT_DOMAIN', default=os.getcwd()) #terraform plan options parser.add_option('--compact-warnings', dest='compact_warnings', action='store_true', help='If Terraform produces any warnings that are not accompanied by errors, show them in a more compact form that includes only the summary messages.') parser.add_option('--destroy', dest='destroy', action='store_true', help='If set, a plan will be generated to destroy all resources managed by the given configuration and state.') parser.add_option('--detailed-exitcode', dest='detailed_exitcode', action='store_true', help='return detailed exit codes when the command exits.') parser.add_option('--lock', dest='lock', action='store_true', help='Lock the state file when locking is supported.') parser.add_option('--no-color', dest='no_color', action='store_true', help='If specified, output won\'t contain any color.') parser.add_option('--lock-timeout', dest='lock_timeout', help='Duration to retry a state lock.', metavar='TF_LOCK_TIMEOUT') parser.add_option('--out', dest='out', help='Write a plan file to the given path. This can be used as input to the "apply" command.', metavar='TF_OUT') parser.add_option('--state', dest='state', help='Path to a Terraform state file to use to look up Terraform-managed resources. By default it will use the state "terraform.tfstate" if it exists.', metavar='TF_STATE') parser.add_option('--parallelism', dest='parallelism', help='Limit the number of concurrent operations. Defaults to 10.', metavar='TF_PARALLELISM') parser.add_option('--refresh', dest='refresh', action='store_true', help='Update state prior to checking for differences.', metavar='TF_REFRESH') parser.add_option('--target', dest='target', action='append', help='Resource to target. Operation will be limited to this resource and its dependencies. This flag can be used multiple times.', metavar='TF_TARGET') parser.add_option('--var', dest='var', action='append', help='Set a variable in the Terraform configuration. This flag can be set multiple times.', metavar='TF_VAR') parser.add_option('--var-file', dest='var_file', help='Set variables in the Terraform configuration from a file.', metavar='TF_VAR_FILE') #terraform apply options parser.add_option('--auto-approve', dest='auto_approve', action='store_true', help='Skip interactive approval of plan before applying.', default=True) parser.add_option('--backup', dest='backup', help='Path to backup the existing state file before modifying. Defaults to the "-state-out" path with ".backup" extension. Set to "-" to disable backup.') parser.add_option('--state-out', dest='state_out', help='Path to write state to that is different than "-state". This can be used to preserve the old state.') # parser.add_option('--compact-warnings', dest='compact_warnings', action='store_true', help='If Terraform produces any warnings that are not accompanied by errors, show them in a more compact form that includes only the summary messages.') # parser.add_option('--lock', dest='lock', action='store_true', help='Lock the state file when locking is supported.') # parser.add_option('--no-color', dest='no_color', action='store_true', help='If specified, output won\'t contain any color.') # parser.add_option('--lock-timeout', dest='lock_timeout', help='Duration to retry a state lock.', metavar='TF_LOCK_TIMEOUT') # parser.add_option('--state', dest='state', help='Path to a Terraform state file to use to look up Terraform-managed resources. By default it will use the state \"terraform.tfstate\" if it exists.', metavar='TF_STATE') # parser.add_option('--parallelism', dest='parallelism', help='Limit the number of concurrent operations. Defaults to 10.', metavar='TF_PARALLELISM') # parser.add_option('--refresh', dest='refresh', action='store_true', help='Update state prior to checking for differences.', metavar='TF_REFRESH') # parser.add_option('--target', dest='target', action='append', help='Resource to target. Operation will be limited to this resource and its dependencies. This flag can be used multiple times.', metavar='TF_TARGET') # parser.add_option('--var', dest='var', action='append', help='Set a variable in the Terraform configuration. This flag can be set multiple times.', metavar='TF_VAR') # parser.add_option('--var-file', dest='var_file', help='Set variables in the Terraform configuration from a file.', metavar='TF_VAR_FILE') (options, arguments) = parser.parse_args() command=arguments[0] command_args=arguments[1:len(arguments)] try: commands[command](command_args,options) except Exception as ex: logging.error('command does not exist or failed with:', ex) main()
9,349
2,777
# Get a string, print a list of chars and the indices of those chars user_input = input() str_order = list(user_input) str_dict = {} str_dict = {key: [] for key in str_order} for key in str_dict.keys(): start_index = 0 while True: try: start_index = user_input.index(key, start_index) str_dict[key].append(str(start_index)) start_index += 1 except ValueError: break result = [key + ':' + '/'.join(str_dict[key]) for key in str_dict.keys()] print(*result, sep = '\n')
543
178
import numpy as np import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 22}) # marker size n = 10 x = np.arange(1, 100, 10) # x = np.arange(1, 7) algo_dict = {'RMS+Bankers': r'$ALG_1$', 'EDF+Bankers': r'$ALG_2$', 'RMS+wound wait': r'$ALG_3$', 'RMS+wait die': r'$ALG_4$', 'EDF+wound wait': r'$ALG_5$', 'EDF+wait die': r'$ALG_6$'} y1 = x ** 2 y2 = x * (2 ** (1 / x) - 1) y3 = x * np.log2(x) y4 = x y5 = x case1 = y1 + y2 case2 = y3 + y1 case3 = y2 + y4 case4 = y2 + y5 case5 = y3 + y4 case6 = y3 + y5 plt.grid(True) plt.yscale('log') plt.plot(x, case1, 'r--+', label=algo_dict['RMS+Bankers'], markersize=n) plt.plot(x, case2, 'g-->', label=algo_dict['EDF+Bankers'], markersize=n) plt.plot(x, case3, 'y--o', label=algo_dict['RMS+wound wait'], markersize=n) plt.plot(x, case4, 'b--*', label=algo_dict['RMS+wait die'], markersize=n) plt.plot(x, case5, 'c--s', label=algo_dict['EDF+wound wait'], markersize=n) plt.plot(x, case6, 'k--^', label=algo_dict['EDF+wait die'], markersize=n) plt.ylabel('No of Process') plt.xlabel('No of Resources') plt.title('Time Complexity Analysis') # ax_dl.set_title('Deadlock Prevention/Avoidence Algorithms') plt.legend() # ax_rt.legend() plt.show() #https://matplotlib.org/3.1.1/tutorials/text/mathtext.html
1,323
620
#!/usr/bin/env python # -*- coding: utf-8 -*- from ykdl.extractor import VideoExtractor from ykdl.videoinfo import VideoInfo from ykdl.util.html import get_content, add_header from ykdl.util.match import match1 from ykdl.compact import urlencode from .iqiyi.util import get_macid import json import time import random import hashlib def gsign(params): s = [] for key in sorted(params.keys()): s.append('{}:{}'.format(key, params[key])) s.append('w!ytDgy#lEXWoJmN4HPf') s = ''.join(s) return hashlib.sha1(s.encode('utf8')).hexdigest() def getlive(uid, rate='source'): tm = int(time.time()) api = 'https://m-glider-xiu.pps.tv/v2/stream/get.json' params = { 'type_id': 1, 'vid': 1, 'anchor_id': uid, 'app_key': 'show_web_h5', 'version': '1.0.0', 'platform': '1_10_101', 'time': tm, 'netstat': 'wifi', 'device_id': get_macid(), 'bit_rate_type': rate, 'protocol': 5, } params['sign'] = gsign(params) data = urlencode(params) if not isinstance(data, bytes): data = data.encode() html = get_content(api, data=data) return json.loads(html) class PPS(VideoExtractor): name = u"奇秀(Qixiu)" ids = ['TD', 'HD', 'SD'] rate_2_id = { 'source': 'TD', 'high': 'HD', 'smooth': 'SD' } rate_2_profile = { 'source': u'超清', 'high': u'高清', 'smooth': u'标清' } def prepare(self): info = VideoInfo(self.name, True) html = get_content(self.url) self.vid = match1(html, '"user_id":"([^"]+)",') title = json.loads(match1(html, '"room_name":("[^"]*"),')) artist = json.loads(match1(html, '"nick_name":("[^"]+"),')) info.title = u'{} - {}'.format(title, artist) info.artist = artist def get_live_info(rate='source'): data = getlive(self.vid, rate) self.logger.debug('data:\n' + str(data)) if data['code'] != 'A00000': return data.get('msg') data = data['data'] url = data.get('https_flv') or data.get('flv') or data.get('rtmp') if url: url = url.replace('rtmp://', 'http://') ran = random.randrange(1e4) if '?' in url: url = '{}&ran={}'.format(url, ran) else: url = '{}?ran={}'.format(url, ran) stream_profile = self.rate_2_profile[rate] stream_id = self.rate_2_id[rate] info.stream_types.append(stream_id) info.streams[stream_id] = { 'video_profile': stream_profile, 'container': 'flv', 'src' : [url], 'size': float('inf') } error_msges = [] if rate == 'source': rate_list = data['rate_list'] if 'source' in rate_list: rate_list.remove('source') for rate in rate_list: error_msg = get_live_info(rate) if error_msg: error_msges.append(error_msg) if error_msges: return ', '.join(error_msges) error_msg = get_live_info() if error_msg: self.logger.debug('error_msg:\n' + error_msg) assert len(info.stream_types), error_msg or 'can\'t play this live video!!' info.stream_types = sorted(info.stream_types, key=self.ids.index) return info site = PPS()
3,633
1,210
from django.shortcuts import render, redirect from django.http import HttpResponse, HttpResponseRedirect from django.template.loader import render_to_string from django.contrib.auth.decorators import login_required from django.views.decorators.cache import never_cache from home.models import CPerson,student from django.contrib.auth.models import User from .models import Tutorial # Create your views here #tut_list=['Program structure','Basic Syntax','Datatypes','variables','Storage classes','Operators','Decision making','Loops','Functions','Scope rules','Arrays','Pointers','String','Structures','Unions','Bit Fields','Typedef','Input/Output','File I/O','Preprocessors','Header Files','Type casting','Error Handling','Recurssion','Variable Arguments','Memory management','Command Line Argument'] #@login_required(redirect_field_name='login') @never_cache def stu_home(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": stu=True tut_list=Tutorial.objects.all() context={ 'stu_first_name': request.user.first_name, 'stu_last_name': request.user.last_name, 'stu_email': request.user.email, 'stu_collagename': activeuser.collage_name, 'stu_progress': activeuser.progress, 'tut_list':tut_list, 'stu':stu, } return render(request,'tutorials/stu_home.html',context) else: return redirect('accounts:login') def show_tutorial(request,tutorial_id): tut=True if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() tutorial=Tutorial.objects.get(pk=tutorial_id) tut_name=tutorial.name temp_name=tutorial.html_name pre_tut=get_pretut(tut_id=tutorial_id) next_tut=get_nexttut(tut_id=tutorial_id) preid=pre_tut.id nextid=next_tut.id pre_link = str(preid) next_link = str(nextid) stu_progress=activeuser.progress enable=True if is_progresstemplate(tutorial_id,stu_progress): enable=False context={ 'tut_id':tutorial_id, 'tut_name':tut_name, 'pre_link':pre_link, 'next_link':next_link, 'tut_list':tut_list, 'stu_progress': stu_progress, 'enable':enable, 'tut':tut, } return render(request,temp_name,context) else: return redirect('accounts:login') def get_pretut(tut_id): if tut_id==1: return Tutorial.objects.get(pk=1) while True: tut_id=tut_id-1 pre_tut=Tutorial.objects.filter(pk=tut_id).first() if pre_tut: return pre_tut def get_nexttut(tut_id): while True: tut_id=tut_id+1 next_tut=Tutorial.objects.filter(pk=tut_id).first() if next_tut: return next_tut def is_progresstemplate(tut_id,progress): c=0 temp_id=1 while temp_id<=tut_id: if Tutorial.objects.filter(pk=temp_id).first(): c=c+1 temp_id=temp_id+1 if c==progress: return True else: return False def arithmetic_example(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/arithmetic_example.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def relational_example(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/relational_example.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def logical_example(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/logical_example.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def bitwise_example(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/bitwise_example.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def assignment_example(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/assignment_example.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def misc_example(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/misc_example.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def precedence_example(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/precedence_example.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def if_statement(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/if_statement.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def ifelse_statement(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/ifelse_statement.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def nestedif_statement(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/nestedif_statement.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def switch_statement(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/switch_statement.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login') def nestedswitch_statement(request): if request.user.is_authenticated: try: cperson=CPerson.objects.get(user=request.user) except: return HttpResponse("You're not allowed to view this page.........") if cperson.type=='student': activeuser=student.objects.get(cperson=cperson) s="stu" if request.user.is_active and s=="stu": tut_list=Tutorial.objects.all() temp_name="tutorials/nestedswitch_statement.html" context={ 'tut_list':tut_list, 'stu_progress': activeuser.progress, } return render(request,temp_name,context) else: return redirect('accounts:login')
12,825
3,667
import dash_bootstrap_components as dbc import dash_html_components as html def Navbar(current_program=None): if not current_program: brand_title = "Tree House Explorer" else: brand_title = f"Tree House Explorer: {current_program}" navbar = dbc.NavbarSimple( children=[ html.Div( children=[], style={'background-color': 'black'} ), html.Div( children=[ dbc.DropdownMenu( nav=True, in_navbar=True, label="Program Toggle", color="warning", right=True, children=[ dbc.DropdownMenuItem( "Tree Viewer", href="/apps/tree_viewer" ), dbc.DropdownMenuItem( "p-Distance Tracer", href="/apps/p_distance_tracer", ), dbc.DropdownMenuItem( "Data Preparation", href="/apps/data_prep" ), ], style={ "color":'black', "font-size": "20px", "border": "2px black solid", "border-radius": "5px", }, ), ], ), ], brand=brand_title, brand_href="/", brand_style={ "color":'black', "font-size": "35px", "border":"2px black solid", "border-radius": "5px", "padding-left": "10px", "padding-right": "10px", "padding-top": "0px", "padding-bottom": "0px", }, sticky="top", fluid=True, color="warning" ) return navbar
2,086
497
def mat_exp(mat, p, mod): """ Fast Matrix Exponentiation with modulo Parameters ---------- mat: numpy matrix The matrix to exponentiate. It must be a matrix that supports the * operator. p: int The power to raise the matrix to. mod: int The modulo value used to calculate elements of the matrix Returns ------- out: numpy matrix Exponentiated matrix """ if p < 0: mat = mat ** (-1) p = -p if p == 0: return mat mat2 = 1 while p > 1: if p % 2 == 0: mat = (mat * mat) % mod p //= 2 else: mat2 = (mat2 * mat) % mod mat = (mat * mat) % mod p = (p - 1) // 2 return (mat * mat2) % mod
784
250
#!/usr/bin/env python # -*- coding: utf-8 -*- class Unit(object): """ The smallest unit. """ def __init__(self, value): if not isinstance(value, (float, int)): raise TypeError( f"@value must be integer or float value, but {value} was applied.") self._value = value self._enabled = True def __bool__(self): return self._enabled @property def value(self): """ float: value of the unit """ return self._value def enable(self): """ Enable the unit. """ self._enabled = True def disable(self): """ Disable the unit. """ self._enabled = False class Series(object): """ A series of units. """ def __init__(self): self._units = [] def __iter__(self): yield from self._units def add(self, unit): """ Append a unit. Args: unit (Unit]): the smallest unit Raises: TypeError: unit is not an instance of Unit """ if not isinstance(unit, Unit): raise TypeError("@unit must be a instance of Unit") self._units.append(unit) def _validate_index(self, num): """ Validate the index number. Args: num (int): index number of a unit Raises: TypeError: @num is not an integer IndexError: @num is not a valid index number """ if not isinstance(num, int): raise TypeError( f"@num must be integer, but {num} was applied.") try: self._units[num] except IndexError: raise IndexError(f"@num must be under {len(self._units)}") def enable(self, num): """ Enable a unit. Args: num (int): index of the unit to be enabled Raises: TypeError: @num is not an integer IndexError: @num is not a valid index number """ self._validate_index(num) self._units[num].enable() def disable(self, num): """ Disable a unit. Args: num (int): index of the unit to be disabled Raises: TypeError: @num is not an integer IndexError: @num is not a valid index number """ self._validate_index(num) self._units[num].disable() def show_enabled(series): """ Show the values of enabled units. """ if not isinstance(series, Series): raise TypeError("@unit must be a instance of Series") print([unit.value for unit in series if unit]) if __name__ == "__main__": # Unit class unit1 = Unit(value=1.0) if unit1: print(unit1.value) unit1.disable() if unit1: print("Disabled") print(unit1.value) unit1.enable() if unit1: print("Enabled") print(unit1.value) # Create a series of units series = Series() [series.add(Unit(i)) for i in range(6)] show_enabled(series) # Disable two units series.disable(4) series.disable(5) show_enabled(series) # Enable one disabled unit series.enable(4) show_enabled(series)
3,276
950
from ansi.colour.rgb import rgb256 from ansi.colour.fx import reset class TextImage(): def __init__(self, original, palette): self.original = original self.palette = palette self.lines = self._build_lines() self.text = self._build_text() def _build_text(self): result = str() for line in self.lines: result += line + '\n' return result def _build_lines(self): result = list() width, height = self.original.size for y in range(height): line = str() for x in range(width): value = self.original.getpixel((x, y)) char = self._value_to_char(value) line += char result.append(line) return result def _value_to_char(self, value): if type(value) is int or len(value) != 3: raise TypeError("Value should be a 3-tuple") r, g, b = value luminosity = 0.2 * r + 0.72 * g + 0.07 * b palette_range = (0, len(self.palette)) mapped = int(_scale(luminosity, (0, 256), palette_range)) char = self.palette[mapped] return char class ColourImage(TextImage): def __init__(self, *args, background=False): self.background = background super().__init__(*args) def _value_to_char(self, value): char = super()._value_to_char(value) colour = rgb256(*value) if self.background: colour.replace('38', '48', 1) # Modify the ANSI escape code to use bg instead of fg return colour + char + str(reset) class Animation(): def __init__(frames, speed): pass def _scale(value, source, destination): """ Linear map a value from a source to a destination range. :param int value: original value :param tuple source: source range :param tuple destination: destination range :rtype: float """ return ( ((value - source[0]) / (source[1]-source[0])) * (destination[1]-destination[0]) + destination[0] )
2,101
650
#!/user/bin python # -*- coding:utf-8 -*- ''' @Author: xiaodong @Email: fuxd@jidongnet.com @DateTime: 2015-12-16 17:01:58 @Description: 加勒比 安卓平台遍历全区 搜索100W以上钻石的玩家 ''' import redis import sys import json redisList = [6510,6520,6530,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,6566,6567,6568,6569] hostList = ['192.168.1.190','192.168.1.187','192.168.1.188','192.168.1.189'] for redis_port in redisList: rightRole = [] host_index = int(str(redis_port)[2:]) % 4 host = hostList[host_index] print redis_port ,'---',host r = redis.StrictRedis(host=host,port=redis_port,db=3) # 过滤掉机器人 allRoles = filter(lambda x:len(x) > 6,r.keys('r_*')) # 开始过滤 钻石数大于100W for role in allRoles: roleInfo = [] roleInfo = r.hmget(role,'name','cash') if int(roleInfo[1]) > 1000000: roleInfo.append(role[-2:]) rightRole.append(roleInfo) f = file('role_100W_%s.txt'%redis_port,'w+') f.write('\n'.join([ ' -- '.join(i) for i in rightRole])) f.flush() f.close()
1,070
657
#!/usr/bin/env python3 # pylint: disable=protected-access # pylint: disable=no-self-use # pylint: disable=missing-docstring # pylint: disable=too-many-public-methods import time import threading import unittest import apocrypha.client from apocrypha.exceptions import DatabaseError from apocrypha.server import ServerDatabase, ServerHandler, Server from test_node import random_query PORT = 49999 client = apocrypha.client.Client(port=PORT) def query(args, raw=False): ''' list of string -> string ''' return client.query(args, interpret=raw) class TestServerBase(unittest.TestCase): database = None server = None server_thread = None @classmethod def setUpClass(cls): ''' create an Apocrypha instance and server to handle connections run the server in a thread so test cases may run ''' # create the ServerDatabase instance, which inherits from Apocrypha TestServerBase.database = ServerDatabase( 'test/test-db.json', stateless=True) # Create the tcp server host, port = '0.0.0.0', PORT TestServerBase.server = Server( (host, port), ServerHandler, TestServerBase.database, quiet=True) # start the server TestServerBase.server_thread = threading.Thread( target=TestServerBase.server.serve_forever) TestServerBase.server_thread.start() TestServerBase.db = apocrypha.client.Client(port=PORT) @classmethod def tearDownClass(cls): ''' shutdown the server ''' TestServerBase.server.teardown() TestServerBase.server.socket.close() TestServerBase.server_thread.join(1) class TestServer(TestServerBase): # server tests # caching def test_cache_hit(self): # write operations don't update the cache query(['pizza', '=', 'sauce']) self.assertNotIn(('pizza',), TestServer.database.cache) # get operations do query(['pizza']) self.assertIn(('pizza',), TestServer.database.cache) result = query(['pizza']) self.assertEqual(result, ['sauce']) self.assertIn(('pizza',), TestServer.database.cache) def test_cache_deep_hit(self): query(['a', '-d']) query(['a', 'b', 'c', 'd', 'e', '=', 'f']) query(['a', 'b', 'c', 'd', 'e']) self.assertIn( ('a', 'b', 'c', 'd', 'e'), TestServer.database.cache) @unittest.skip('using simple caching') def test_cache_invalidate(self): query(['pizza', '=', 'sauce']) query(['pizza']) query([]) self.assertIn(('pizza',), TestServer.database.cache) self.assertIn((), TestServer.database.cache) query(['pizza', '-d']) self.assertNotIn(('pizza',), TestServer.database.cache) self.assertNotIn((), TestServer.database.cache) @unittest.skip('using simple caching') def test_cache_invalidate_parent(self): ''' changing a child key invalidates all of it's parents ''' query(['one layer', 'two layer', '=', 'cake']) query(['one layer', 'two layer']) self.assertIn(('one layer', 'two layer'), TestServer.database.cache) query(['one layer']) self.assertIn(('one layer',), TestServer.database.cache) # both parent and child are in cache, now change the child and make # sure the parent is also invalidated query(['one layer', 'two layer', '=', 'goop']) self.assertNotIn(('one layer', 'two layer'), TestServer.database.cache) self.assertNotIn(('one layer',), TestServer.database.cache) @unittest.skip('using simple caching') def test_cache_invalidate_child(self): ''' changing a parent key invalidates all of it's direct children ''' query(['one layer', 'two layer', '=', 'cake']) query(['one layer', 'two layer']) self.assertIn(('one layer', 'two layer'), TestServer.database.cache) query(['one layer']) self.assertIn(('one layer',), TestServer.database.cache) # both parent and child are in cache, now change the parent and make # sure the child is also invalidated query(['one layer', '-d']) self.assertNotIn(('one layer', 'two layer'), TestServer.database.cache) self.assertNotIn(('one layer',), TestServer.database.cache) @unittest.skip('unknown issue') def test_cache_doesnt_effect_sibling(self): client.delete('one layer') client.set('one layer', 'two layer', value='cake') client.set('one layer', 'apple layer', value='sauce') print(TestServer.database.data) self.assertEqual( client.get('one layer', 'two layer'), 'cake') self.assertEqual( client.get('one layer', 'apple layer'), 'sauce') self.assertEqual( client.get('one layer'), {'two layer': 'cake', 'apple layer': 'sauce'}) print(TestServer.database.cache) self.assertIn(('one layer',), TestServer.database.cache) self.assertIn(('one layer', 'two layer',), TestServer.database.cache) self.assertIn(('one layer', 'apple layer',), TestServer.database.cache) def test_cache_top_level_read_operators(self): ''' make sure --keys, --edit on root are invalidated correctly ''' pass def test_cache_top_level_write_operators(self): ''' writing to root clears the entire cache ''' pass def test_cache_write_ops_not_cached(self): pass def test_cache_read_ops_are_cached(self): query(['pizza', '=', 'sauce']) value = query(['pizza', '--edit']) self.assertIn(('pizza', '--edit',), TestServer.database.cache) self.assertEqual(value, ['"sauce"']) # timing @unittest.skip('timing not currently supported') def test_timing(self): result = query(['-t', 'wolf', 'legs']) self.assertEqual(result, ['0']) query(['wolf', 'legs', '=', '4']) result = query(['-t', 'wolf', 'legs']) self.assertNotEqual(result, ['0']) # client tests - query def test_assign(self): query(['apple', '=', 'sauce']) result = query(['apple']) self.assertEqual(result, ['sauce']) def test_strict(self): with self.assertRaises(DatabaseError): query(['-s', 'gadzooks']) def test_context(self): result = query(['-c', '@', 'red']) self.assertEqual(result, ['sub apple = red']) def test_query_json_dict(self): result = query(['octopus'], raw=True) self.assertEqual(result, {'legs': 8}) self.assertTrue(isinstance(result, dict)) def test_query_json_list(self): result = query(['colors'], raw=True) self.assertTrue(isinstance(result, list)) def test_query_json_string(self): result = query(['apple'], raw=True) self.assertTrue(isinstance(result, str)) # client tests - Client def test_get_string(self): self.assertEqual( TestServer.db.get('green'), 'nice') self.assertEqual( TestServer.db.get('octopus', 'legs'), 8) # get def test_get_list(self): self.assertEqual( TestServer.db.get('animals'), ['wolf', 'octopus', 'bird']) def test_get_dict(self): self.assertEqual( TestServer.db.get('octopus'), {'legs': 8}) def test_get_non_existant(self): self.assertEqual( TestServer.db.get('yahoo', 'foobar'), None) def test_get_default(self): ''' when a key doesn't exist, default=<something> determines what to respond with ''' self.assertEqual( TestServer.db.get('yahoo', 'foobar', default={}), {}) self.assertEqual( TestServer.db.get('yahoo', 'foobar', default=[]), []) self.assertEqual( TestServer.db.get('yahoo', 'foobar', default='abc'), 'abc') def test_get_error(self): with self.assertRaises(DatabaseError): TestServer.db.get('animals', 'octopus') def test_get_cast_to_list(self): self.assertEqual( TestServer.db.get('green', cast=list), ['nice']) def test_get_cast_to_str(self): self.assertEqual( TestServer.db.get('animals', cast=str), "['wolf', 'octopus', 'bird']") def test_get_cast_to_set(self): self.assertEqual( TestServer.db.get('animals', cast=set), {'wolf', 'octopus', 'bird'}) def test_get_cast_to_error(self): with self.assertRaises(DatabaseError): TestServer.db.get('animals', cast=dict) # keys def test_keys(self): self.assertEqual( TestServer.db.keys('octopus'), ['legs']) def test_keys_non_existant(self): self.assertEqual( TestServer.db.keys('does not exist', 'foobar'), []) def test_keys_error(self): with self.assertRaises(DatabaseError): TestServer.db.keys('animals', 'octopus') # remove def test_remove(self): TestServer.db.set('test list', value=['a', 'b', 'c']) TestServer.db.remove('test list', value='a') self.assertEqual( TestServer.db.get('test list'), ['b', 'c']) def test_remove_list(self): TestServer.db.set('test list', value=['a', 'b', 'c']) TestServer.db.remove('test list', value=['a', 'b']) self.assertEqual( TestServer.db.get('test list'), 'c') def test_remove_error(self): with self.assertRaises(DatabaseError): TestServer.db.remove('octopus', value='sandwich') def test_remove_type_error(self): TestServer.db.set('octopus', value={1: 2, 3: 4}) with self.assertRaises(DatabaseError): TestServer.db.remove('octopus', value='sandwich') def test_remove_error_top_level(self): with self.assertRaises(DatabaseError): TestServer.db.remove(value='key that does not exist') # append def test_append(self): TestServer.db.delete('test list') TestServer.db.append('test list', value='apple') self.assertEqual( TestServer.db.get('test list'), 'apple') TestServer.db.append('test list', value='blue') self.assertEqual( TestServer.db.get('test list'), ['apple', 'blue']) def test_append_list(self): TestServer.db.delete('test list') TestServer.db.append('test list', value=['a', 'b']) self.assertEqual( TestServer.db.get('test list'), ['a', 'b']) TestServer.db.append('test list', value=['c', 'd']) self.assertEqual( TestServer.db.get('test list'), ['a', 'b', 'c', 'd']) def test_append_non_existant(self): TestServer.db.delete('test list') TestServer.db.append('test list', value=['a', 'b']) self.assertEqual( TestServer.db.get('test list'), ['a', 'b']) def test_append_error(self): with self.assertRaises(DatabaseError): TestServer.db.append('octopus', value='sandwich') def test_append_type_error(self): with self.assertRaises(DatabaseError): TestServer.db.append('octopus', value={'a': 1}) # set def test_set(self): TestServer.db.set('test item', value='hello') value = TestServer.db.get('test item') self.assertEqual(value, 'hello') def test_set_list(self): TestServer.db.set('test list', value=['hello', 'there']) self.assertEqual( TestServer.db.get('test list'), ['hello', 'there']) def test_set_error(self): with self.assertRaises(DatabaseError): TestServer.db.set('hello', value=set()) # delete def test_delete(self): TestServer.db.set('test item', value='hello') self.assertEqual( TestServer.db.get('test item'), 'hello') TestServer.db.delete('test item') self.assertEqual( TestServer.db.get('test item'), None) # pop def test_pop_cast(self): TestServer.db.set('item', value='hello') result = TestServer.db.pop('item', cast=list) self.assertEqual( result, list('hello')) def test_pop_bad_cast(self): TestServer.db.set('item', value='hello') with self.assertRaises(DatabaseError): TestServer.db.pop('item', cast=dict) # apply def test_apply(self): TestServer.db.set('list', value=['a', 'a', 'b', 'c']) TestServer.db.apply('list', func=lambda xs: list(set(xs))) self.assertEqual( sorted(TestServer.db.get('list')), sorted(['a', 'b', 'c'])) # raw query def test_query(self): self.assertEqual( apocrypha.client.query( ['non', 'existant', '--keys'], port=PORT), []) def test_fuzz(self): ''' throw a ton of junk at the server and see if it crashes ''' for _ in range(0, 1000): random_query(client, debug=False) def test_lock_stress(self): ''' make a ton of junk queries from several threads not interested in what the queries do, just that they don't crash the server ''' num_requests = 500 num_workers = 10 def worker(): time.sleep(0.1) for _ in range(0, num_requests): random_query(client, debug=False) threads = [] for _ in range(0, num_workers): threads += [ threading.Thread(target=worker) ] for thread in threads: thread.start() for thread in threads: thread.join() if __name__ == '__main__': unittest.main()
14,126
4,317
#!/usr/bin/env python import unittest from unittest.mock import Mock from genie.conf import Genie from genie.conf.base import Testbed, Device, Link, Interface from genie.libs.conf.l2vpn import Xconnect from genie.libs.conf.bgp import RouteTarget class test_xconnect(unittest.TestCase): def test_init(self): testbed = Genie.testbed = Testbed() dev1 = Device(testbed=testbed, name='PE1', os='iosxr') intf1 = Interface(device=dev1, name='GigabitEthernet0/0/0/1') intf2 = Interface(device=dev1, name='GigabitEthernet0/0/0/2') dev2 = Device(testbed=testbed, name='PE2', os='iosxr') intf3 = Interface(device=dev2, name='GigabitEthernet0/0/0/3') intf4 = Interface(device=dev2, name='GigabitEthernet0/0/0/4') link1 = Link(testbed=testbed, name='link1', interfaces=(intf1, intf3)) link2 = Link(testbed=testbed, name='link2', interfaces=(intf2, intf4)) with self.assertRaises(TypeError): xc1 = Xconnect() with self.assertRaises(TypeError): xc1 = Xconnect(group_name='bg1') xc1 = Xconnect(name='xc1', group_name='bg1') self.assertIs(xc1.xconnect_type, Xconnect.Type.p2p) self.assertEqual(xc1.name, 'xc1') self.assertEqual(xc1.group_name, 'bg1') xc1 = Xconnect(name='xc1') self.assertEqual(xc1.name, 'xc1') self.assertEqual(xc1.group_name, 'xc1g') self.assertCountEqual(xc1.devices, []) self.assertCountEqual(xc1.interfaces, []) self.assertCountEqual(xc1.segments, []) self.assertCountEqual(xc1.link.interfaces, []) dev1.add_feature(xc1) self.assertCountEqual(xc1.devices, [dev1]) self.assertCountEqual(xc1.interfaces, []) self.assertCountEqual(xc1.segments, []) self.assertCountEqual(xc1.link.interfaces, []) cfgs = xc1.build_config(apply=False) self.assertCountEqual(cfgs.keys(), [dev1.name]) self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join([ 'l2vpn', ' xconnect group xc1g', ' p2p xc1', ' exit', ' exit', ' exit', ])) #xc1.add_interface(intf1) intf1.l2transport.enabled = True #self.assertCountEqual(xc1.interfaces, [intf1]) #self.assertCountEqual(xc1.devices, [dev1]) #self.assertCountEqual(xc1.segments, [intf1]) #self.assertCountEqual(xc1.link.interfaces, [intf3]) #self.assertCountEqual(xc1.device_attr[dev1].interfaces, [intf1]) #self.assertCountEqual(xc1.device_attr[dev2].interfaces, []) #self.assertCountEqual(xc1.device_attr[dev1].segments, [intf1]) self.assertCountEqual(xc1.device_attr[dev2].segments, []) cfgs = xc1.build_config(apply=False) self.assertCountEqual(cfgs.keys(), [dev1.name]) if False: self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join([ 'l2vpn', ' xconnect group xc1g', ' p2p xc1', ' interface GigabitEthernet0/0/0/1', ' exit', ' exit', ' exit', ])) dev2.add_feature(xc1) xc1.xconnect_type = Xconnect.Type.mp2mp xc1.autodiscovery_bgp.enabled = True xc1.autodiscovery_bgp.signaling_protocol_bgp.enabled = True xc1.autodiscovery_bgp.export_route_targets = [RouteTarget.ImportExport('1.1.1.1:1')] xc1.autodiscovery_bgp.import_route_targets = [RouteTarget.ImportExport('1.1.1.1:1')] xc1.autodiscovery_bgp.rd = '1000:1' xc1.device_attr[dev1].vpn_id = 100 xc1.device_attr[dev2].vpn_id = 101 ce_id1 = 1001 xc1.device_attr[dev1].autodiscovery_bgp.signaling_protocol_bgp.add_ce_id(ce_id1) xc1.device_attr[dev1].autodiscovery_bgp.signaling_protocol_bgp.ce_attr[ce_id1].add_interface(intf1) ce_id2 = 1000 xc1.device_attr[dev2].autodiscovery_bgp.signaling_protocol_bgp.add_ce_id(ce_id1) xc1.device_attr[dev2].autodiscovery_bgp.signaling_protocol_bgp.ce_attr[ce_id2].add_interface(intf2) xc1.device_attr[dev1].autodiscovery_bgp.signaling_protocol_bgp.ce_attr[ce_id1].interface_attr[intf1].remote_ce_id = ce_id2 xc1.device_attr[dev2].autodiscovery_bgp.signaling_protocol_bgp.ce_attr[ce_id2].interface_attr[intf2].remote_ce_id = ce_id1 cfgs = xc1.build_config(apply=False) # TODO print(cfgs) if __name__ == '__main__': unittest.main()
4,556
1,716
#!/usr/bin/env python # import libs import unittest import json import sys import os from types import DictType # import classes import analytics.utils.misc as misc import analytics.exceptions.exceptions as ex import analytics.service as service import projectpaths as paths from analytics.loading.jsonloader import JsonLoader from analytics.loading.xmlloader import XmlLoader general_input = [ None, True, False, sys.maxint, -sys.maxint-1, {}, [], {"1": 1, "2": 2}, [1, 2, 3, 4, 5], "abc", 0, 1, -1, 1.233, -3.343, 0.23435, " string ", " test test test ", "1" ] class Service_TestSequence(unittest.TestCase): def test_service_isUserInEmaillist(self): for item in general_input: self.assertEqual(service.isUserInEmaillist(item), False) for item in service.EMAIL_LIST: self.assertEqual(service.isUserInEmaillist(item), True) def test_service_searchDatasets(self): scanls = [] for root, dirs, files in os.walk(paths.DATASETS_PATH): for file in files: if file == "manifest.json": scanls.append(os.path.join(root, file)) ls = service.searchDatasets() self.assertEqual(len(ls), len(scanls)) def test_service_loaderForDatatype(self): for item in general_input: with self.assertRaises(ex.AnalyticsStandardError): service._loaderForDatatype(item, item) # test json loader loader = service._loaderForDatatype("json", "path") self.assertEqual(type(loader), JsonLoader) self.assertEqual(loader._filepath, "path") # test xml loader loader = service._loaderForDatatype("xml", "path") self.assertEqual(type(loader), XmlLoader) self.assertEqual(loader._filepath, "path") def test_service_generateErrorMessage(self): messages = ["test"] code = 401 obj = service._generateErrorMessage(messages, code) self.assertEqual(type(obj), DictType) self.assertEqual(obj["messages"], messages) self.assertEqual(obj["code"], code) self.assertEqual(obj["status"], "error") def test_service_generateSuccessMessage(self): messages = ["test"] dataobj = {"test": "test"} obj = service._generateSuccessMessage(messages, dataobj) self.assertEqual(type(obj), DictType) self.assertEqual(obj["messages"], messages) self.assertEqual(obj["data"], dataobj) self.assertEqual(obj["status"], "success") # Load test suites def _suites(): return [ Service_TestSequence ] # Load tests def loadSuites(): # global test suite for this module gsuite = unittest.TestSuite() for suite in _suites(): gsuite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite)) return gsuite if __name__ == '__main__': suite = loadSuites() print "" print "### Running tests ###" print "-" * 70 unittest.TextTestRunner(verbosity=2).run(suite)
3,006
927
import argparse from abc import ABC, abstractmethod from typing import Any, Dict class BaseSubcommand(ABC): @abstractmethod def name(self): """The name of the subcommand, recommended to be a single, kebab-cased word.""" pass @abstractmethod def register_args(self, subparser: argparse.ArgumentParser): """Registers the arguments for this given subcommand.""" pass @abstractmethod def run(self, args: Dict[str, Any]): """Runs the subcommand with the passed arguments.""" pass
552
153
"""Input/output functions.""" import astropy.io.fits as fits from astropy.table import Table import numpy as np import astropy.units as u from astropy.coordinates import ( EarthLocation, AltAz, Angle, ICRS, GCRS, SkyCoord, get_sun, ) import os from astropy.time import Time import warnings from astropy import log import copy import re import glob from collections.abc import Iterable from scipy.interpolate import interp1d from .utils import force_move_file try: from sunpy.coordinates import frames, sun DEFAULT_SUN_FRAME = frames.Helioprojective except ImportError: DEFAULT_SUN_FRAME = None __all__ = [ "mkdir_p", "detect_data_kind", "correct_offsets", "observing_angle", "get_rest_angle", "print_obs_info_fitszilla", "read_data_fitszilla", "read_data", "root_name", "get_chan_columns", ] chan_re = re.compile( r"^Ch([0-9]+)$" r"|^Feed([0-9]+)_([a-zA-Z]+)$" r"|^Feed([0-9]+)_([a-zA-Z]+)_([0-9]+)$" ) # 'srt': EarthLocation(4865182.7660, 791922.6890, 4035137.1740, # unit=u.m) # EarthLocation(Angle("9:14:42.5764", u.deg), # Angle("39:29:34.93742", u.deg), # 600 * u.meter) # not precise enough locations = { "srt": EarthLocation(4865182.7660, 791922.6890, 4035137.1740, unit=u.m), "medicina": EarthLocation( Angle("11:38:49", u.deg), Angle("44:31:15", u.deg), 25 * u.meter ), "greenwich": EarthLocation(lat=51.477 * u.deg, lon=0 * u.deg), } def interpret_chan_name(chan_name): """Get feed, polarization and baseband info from chan name. Examples >>> feed, polar, baseband = interpret_chan_name('blablabal') >>> feed # None >>> polar # None >>> baseband # None >>> feed, polar, baseband = interpret_chan_name('Ch0') >>> feed 0 >>> polar # None >>> baseband # None >>> feed, polar, baseband = interpret_chan_name('Feed1_LCP') >>> feed 1 >>> polar 'LCP' >>> baseband # None >>> feed, polar, baseband = interpret_chan_name('Feed2_LCP_3') >>> feed 2 >>> polar 'LCP' >>> baseband 3 """ matchobj = chan_re.match(chan_name) if not matchobj: return None, None, None matches = [matchobj.group(i) for i in range(7)] polar, baseband = None, None if matches[6] is not None: baseband = int(matchobj.group(6)) polar = matchobj.group(5) feed = int(matchobj.group(4)) elif matches[3] is not None: polar = matchobj.group(3) feed = int(matchobj.group(2)) else: feed = int(matchobj.group(1)) return feed, polar, baseband def classify_chan_columns(chans): """Classify the name of channels per feed, polarization, baseband. Examples -------- >>> chans = ['Feed0_LCP_3', 'Feed0_RCP_3'] >>> classif = classify_chan_columns(chans) >>> classif[0][3]['LCP'] 'Feed0_LCP_3' >>> classif[0][3]['RCP'] 'Feed0_RCP_3' >>> chans = ['Ch0'] >>> classif = classify_chan_columns(chans) >>> classif[0][1]['N'] 'Ch0' >>> chans = ['Feed0_LCP'] >>> classif = classify_chan_columns(chans) >>> classif[0][1]['LCP'] 'Feed0_LCP' """ combinations = {} for ch in chans: feed, polar, baseband = interpret_chan_name(ch) if baseband is None: baseband = 1 if polar is None: polar = "N" if feed not in combinations: combinations[feed] = {} if baseband not in combinations[feed]: combinations[feed][baseband] = {} combinations[feed][baseband][polar] = ch return combinations def get_chan_columns(table): return np.array([i for i in table.columns if chan_re.match(i)]) def get_channel_feed(ch): if re.search("Feed?", ch): return int(ch[4]) def mkdir_p(path): """Safe mkdir function. Parameters ---------- path : str Name of the directory/ies to create Notes ----- Found at http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python """ import errno try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def _check_derotator(derot_angle): # Check that derotator angle is outside any plausible value if np.any(np.abs(derot_angle) > 2 * 360): return False return True def detect_data_kind(fname): """Placeholder for function that recognizes data format.""" if fname.endswith(".hdf5"): return "hdf5" elif "fits" in fname: return "fitszilla" else: warnings.warn("File {} is not in a known format".format(fname)) return None def correct_offsets(obs_angle, xoffset, yoffset): """Correct feed offsets for derotation angle. All angles are in radians. Examples -------- >>> x = 2 ** 0.5 >>> y = 2 ** 0.5 >>> angle = np.pi / 4 >>> xoff, yoff = correct_offsets(angle, x, y) >>> np.allclose([xoff, yoff], 2 ** 0.5) True """ sep = np.sqrt(xoffset ** 2.0 + yoffset ** 2.0) new_xoff = sep * np.cos(obs_angle) new_yoff = sep * np.sin(obs_angle) return new_xoff, new_yoff def observing_angle(rest_angle, derot_angle): """Calculate the observing angle of the multifeed. If values have no units, they are assumed in radians Parameters ---------- rest_angle : float or Astropy quantity, angle rest angle of the feeds derot_angle : float or Astropy quantity, angle derotator angle Examples -------- >>> observing_angle(0 * u.rad, 2 * np.pi * u.rad).to(u.rad).value 0.0 >>> observing_angle(0, 2 * np.pi).to(u.rad).value 0.0 """ if not hasattr(rest_angle, "unit"): rest_angle *= u.rad if not hasattr(derot_angle, "unit"): derot_angle *= u.rad return rest_angle + (2 * np.pi * u.rad - derot_angle) def _rest_angle_default(n_lat_feeds): """Default rest angles for a multifeed, in units of a circle Assumes uniform coverage. Examples -------- >>> np.allclose(_rest_angle_default(5), ... np.array([1., 0.8, 0.6, 0.4, 0.2])) True >>> np.allclose(_rest_angle_default(6) * 360, ... np.array([360., 300., 240., 180., 120., 60.])) True """ return np.arange(1, 0, -1 / n_lat_feeds) def get_rest_angle(xoffsets, yoffsets): """Calculate the rest angle for multifeed. The first feed is assumed to be at position 0, for it the return value is 0 Examples -------- >>> xoffsets = [0.0, -0.0382222, -0.0191226, 0.0191226, 0.0382222, ... 0.0191226, -0.0191226] >>> yoffsets = [0.0, 0.0, 0.0331014, 0.0331014, 0.0, -0.0331014, ... -0.0331014] >>> np.allclose(get_rest_angle(xoffsets, yoffsets).to(u.deg).value, ... np.array([0., 180., 120., 60., 360., 300., 240.])) True """ if len(xoffsets) <= 2: return np.array([0] * len(xoffsets)) xoffsets = np.asarray(xoffsets) yoffsets = np.asarray(yoffsets) n_lat_feeds = len(xoffsets) - 1 rest_angle_default = _rest_angle_default(n_lat_feeds) * 2 * np.pi * u.rad w_0 = np.where((xoffsets[1:] > 0) & (yoffsets[1:] == 0.0))[0][0] return ( np.concatenate(([0], np.roll(rest_angle_default.to(u.rad).value, w_0))) * u.rad ) def infer_skydip_from_elevation(elevation, azimuth=None): if azimuth is None: azimuth = np.array([0, 0]) el_condition = np.max(elevation) - np.min(elevation) > np.pi / 3.0 az_condition = np.max(azimuth) - np.min(azimuth) < 0.1 / 180.0 * np.pi return az_condition & el_condition def get_sun_coords_from_radec(obstimes, ra, dec, sun_frame=None): if sun_frame is None: # pragma: no cover sun_frame = DEFAULT_SUN_FRAME coords = GCRS( ra=Angle(ra), dec=Angle(dec), obstime=obstimes, distance=sun.earth_distance(obstimes), ) coords_asec = coords.transform_to( sun_frame(obstime=obstimes, observer="earth") ) lon = [ca.Tx.value for ca in coords_asec] * coords_asec.Tx.unit lat = [ca.Ty.value for ca in coords_asec] * coords_asec.Ty.unit dist = [ ca.distance.value for ca in coords_asec ] * coords_asec.distance.unit return lon.to(u.radian), lat.to(u.radian), dist.to(u.m).value def update_table_with_sun_coords(new_table, sun_frame=None): lon_str, lat_str = "hpln", "hplt" if not ("dsun" in new_table.colnames): new_table[lon_str] = np.zeros_like(new_table["el"]) new_table[lat_str] = np.zeros_like(new_table["az"]) new_table["dsun"] = np.zeros(len(new_table["az"])) for i in range(0, new_table["el"].shape[1]): obstimes = Time(new_table["time"] * u.day, format="mjd", scale="utc") lon, lat, dist = get_sun_coords_from_radec( obstimes, new_table["ra"][:, i], new_table["dec"][:, i], sun_frame=sun_frame, ) new_table[lon_str][:, i] = lon new_table[lat_str][:, i] = lat if i == 0: new_table["dsun"][:] = dist return new_table def get_coords_from_altaz_offset( obstimes, el, az, xoffs, yoffs, location, inplace=False ): """""" # Calculate observing angle if not inplace: el = copy.deepcopy(el) az = copy.deepcopy(az) el += yoffs.to(u.rad).value az += xoffs.to(u.rad).value / np.cos(el) coords = AltAz( az=Angle(az), alt=Angle(el), location=location, obstime=obstimes ) # According to line_profiler, coords.icrs is *by far* the longest # operation in this function, taking between 80 and 90% of the # execution time. Need to study a way to avoid this. coords_deg = coords.transform_to(ICRS()) ra = np.radians(coords_deg.ra) dec = np.radians(coords_deg.dec) return ra, dec def is_close_to_sun(ra, dec, obstime, tolerance=3 * u.deg): """Test if current source is close to the Sun. Examples -------- >>> ra, dec = 131.13535699 * u.deg, 18.08202663 * u.deg >>> obstime = Time("2017-08-01") >>> is_close_to_sun(ra, dec, obstime, tolerance=3 * u.deg) True >>> is_close_to_sun(ra, dec + 4 * u.deg, obstime, tolerance=3 * u.deg) False """ coords = SkyCoord(ra=ra, dec=dec, frame=GCRS(obstime=obstime)) sun_position = get_sun(obstime).transform_to(GCRS(obstime=obstime)) return (coords.separation(sun_position)).to(u.deg).value < tolerance.value def update_table_with_offsets(new_table, xoffsets, yoffsets, inplace=False): rest_angles = get_rest_angle(xoffsets, yoffsets) if not inplace: new_table = copy.deepcopy(new_table) lon_str, lat_str = "ra", "dec" if not (lon_str in new_table.colnames): new_table[lon_str] = np.zeros_like(new_table["el"]) new_table[lat_str] = np.zeros_like(new_table["az"]) for i in range(0, new_table["el"].shape[1]): obs_angle = observing_angle(rest_angles[i], new_table["derot_angle"]) # offsets < 0.001 arcseconds: don't correct (usually feed 0) if ( np.abs(xoffsets[i]) < np.radians(0.001 / 60.0) * u.rad and np.abs(yoffsets[i]) < np.radians(0.001 / 60.0) * u.rad ): continue xoffs, yoffs = correct_offsets(obs_angle, xoffsets[i], yoffsets[i]) obstimes = Time(new_table["time"] * u.day, format="mjd", scale="utc") location = locations[new_table.meta["site"]] lon, lat = get_coords_from_altaz_offset( obstimes, new_table["el"][:, i], new_table["az"][:, i], xoffs, yoffs, location=location, inplace=inplace, ) new_table[lon_str][:, i] = lon new_table[lat_str][:, i] = lat return new_table def print_obs_info_fitszilla(fname): """Placeholder for function that prints out oberving information.""" with fits.open(fname, memmap=False) as lchdulist: section_table_data = lchdulist["SECTION TABLE"].data sample_rates = get_value_with_units(section_table_data, "sampleRate") print("Sample rates:", sample_rates) rf_input_data = lchdulist["RF INPUTS"].data print("Feeds :", get_value_with_units(rf_input_data, "feed")) print( "IFs :", get_value_with_units(rf_input_data, "ifChain") ) print( "Polarizations :", get_value_with_units(rf_input_data, "polarization"), ) print( "Frequencies :", get_value_with_units(rf_input_data, "frequency"), ) print( "Bandwidths :", get_value_with_units(rf_input_data, "bandWidth"), ) def _chan_name(f, p, c=None): if c is not None: return "Feed{}_{}_{}".format(f, p, c) else: return "Feed{}_{}".format(f, p) def read_data_fitszilla(fname): with fits.open(fname, memmap=False) as lchdulist: retval = _read_data_fitszilla(lchdulist) return retval def get_value_with_units(fitsext, keyword, default=""): if isinstance(fitsext, fits.BinTableHDU): fitsext = fitsext.data unitstr = fitsext.columns[keyword].unit if unitstr is None: if default not in ["", None]: unit = u.Unit(default) else: unit = 1 else: unit = u.Unit(unitstr) value = fitsext[keyword] is_string = isinstance(value, str) is_iterable = isinstance(value, Iterable) if is_string or (is_iterable and isinstance(value[0], str)): return value else: return value * unit def adjust_temperature_size_rough(temp, comparison_array): """Adjust the size of the temperature array. Examples -------- >>> temp = [1, 2, 3, 4] >>> adjust_temperature_size_rough(temp, [5, 6, 7]) array([1, 2, 3]) >>> adjust_temperature_size_rough(temp, [5, 6, 7, 5, 4]) array([1, 2, 3, 4, 4]) >>> adjust_temperature_size_rough(temp, [5, 6]) array([2, 3]) >>> adjust_temperature_size_rough(temp, [5, 6, 7, 5, 4, 6]) array([1, 1, 2, 3, 4, 4]) """ import copy temp = np.asarray(temp) comparison_array = np.asarray(comparison_array) temp_save = copy.deepcopy(temp) sizediff = temp.size - comparison_array.size if sizediff > 0: temp = temp[sizediff // 2 : sizediff // 2 + comparison_array.size] elif sizediff < 0: # make it positive sizediff = -sizediff temp = np.zeros_like(comparison_array) temp[sizediff // 2 : sizediff // 2 + temp_save.size] = temp_save temp[: sizediff // 2] = temp_save[0] temp[sizediff // 2 + temp_save.size - 1 :] = temp_save[-1] return temp def adjust_temperature_size(temp, comparison_array): """Adjust the size of the temperature array. Examples -------- >>> temp = [1, 2, 3, 4] >>> np.allclose(adjust_temperature_size(temp, [5, 6]), [1.0, 4.0]) True >>> temp = [1, 2, 3, 4] >>> np.allclose(adjust_temperature_size(temp, [5, 6, 4, 5]), temp) True """ temp = np.asarray(temp) comparison_array = np.asarray(comparison_array) Ntemp = temp.shape[0] Ndata = comparison_array.shape[0] if Ntemp == Ndata: return temp temp_func = interp1d(np.linspace(0, 1, Ntemp), temp) newtemp = temp_func(np.linspace(0, 1, Ndata)) return newtemp # from memory_profiler import profile # @profile def _read_data_fitszilla(lchdulist): """Open a fitszilla FITS file and read all relevant information.""" is_new_fitszilla = np.any(["coord" in i.name.lower() for i in lchdulist]) # ----------- Extract generic observation information ------------------ headerdict = dict(lchdulist[0].header.items()) source = lchdulist[0].header["SOURCE"] site = lchdulist[0].header["ANTENNA"].lower() receiver = lchdulist[0].header["RECEIVER CODE"] ra = lchdulist[0].header["RIGHTASCENSION"] * u.rad dec = lchdulist[0].header["DECLINATION"] * u.rad ra_offset = dec_offset = az_offset = el_offset = 0 * u.rad if "RightAscension Offset" in lchdulist[0].header: ra_offset = lchdulist[0].header["RightAscension Offset"] * u.rad if "Declination Offset" in lchdulist[0].header: dec_offset = lchdulist[0].header["Declination Offset"] * u.rad if "Azimuth Offset" in lchdulist[0].header: az_offset = lchdulist[0].header["Azimuth Offset"] * u.rad if "Elevation Offset" in lchdulist[0].header: el_offset = lchdulist[0].header["Elevation Offset"] * u.rad # ----------- Read the list of channel ids ------------------ section_table_data = lchdulist["SECTION TABLE"].data chan_ids = get_value_with_units(section_table_data, "id") nbin_per_chan = get_value_with_units(section_table_data, "bins") sample_rate = get_value_with_units(section_table_data, "sampleRate") try: bw_section = get_value_with_units(section_table_data, "bandWidth") fr_section = get_value_with_units(section_table_data, "frequency") except KeyError: bw_section = None fr_section = None integration_time = lchdulist["SECTION TABLE"].header["Integration"] * u.ms if len(list(set(nbin_per_chan))) > 1: raise ValueError( "Only datasets with the same nbin per channel are " "supported at the moment" ) nbin_per_chan = list(set(nbin_per_chan))[0] types = get_value_with_units(section_table_data, "type") if "stokes" in types: is_polarized = True else: is_polarized = False # Check. If backend is not specified, use Total Power try: backend = lchdulist[0].header["BACKEND NAME"] except Exception: if "stokes" in types: if nbin_per_chan == 2048: backend = "XARCOS" else: backend = "SARDARA" elif "spectra" in types: backend = "SARDARA" else: backend = "TP" # ----------- Read the list of RF inputs, feeds, polarization, etc. -- rf_input_data = lchdulist["RF INPUTS"].data feeds = get_value_with_units(rf_input_data, "feed") IFs = get_value_with_units(rf_input_data, "ifChain") polarizations = get_value_with_units(rf_input_data, "polarization") sections = get_value_with_units(rf_input_data, "section") frequencies_rf = get_value_with_units(rf_input_data, "frequency") bandwidths_rf = get_value_with_units(rf_input_data, "bandWidth") local_oscillator = get_value_with_units(rf_input_data, "localOscillator") try: cal_mark_temp = get_value_with_units(rf_input_data, "calibrationMark") except KeyError: # Old, stupid typo cal_mark_temp = get_value_with_units(rf_input_data, "calibratonMark") if bw_section is not None: bandwidths_section = [bw_section[i] for i in sections] frequencies_section = [fr_section[i] for i in sections] frequencies_section = [ f + l for (f, l) in zip(frequencies_section, local_oscillator) ] if backend == "TP" or bw_section is None: frequencies, bandwidths = frequencies_rf, bandwidths_rf else: frequencies, bandwidths = frequencies_section, bandwidths_section combinations = list(zip(frequencies, bandwidths)) combination_idx = np.arange(len(combinations)) # Solve stupid problem with old CCB data if receiver.lower() == "ccb": feeds[:] = 0 if len(set(combinations)) > 1: chan_names = [ _chan_name(f, p, c) for f, p, c in zip(feeds, polarizations, combination_idx) ] else: chan_names = [_chan_name(f, p) for f, p in zip(feeds, polarizations)] # ----- Read the offsets of different feeds (nonzero only if multifeed)-- feed_input_data = lchdulist["FEED TABLE"].data # Add management of historical offsets. # Note that we need to add the units by hand in this case. xoffsets = get_value_with_units(feed_input_data, "xOffset", default="rad") yoffsets = get_value_with_units(feed_input_data, "yOffset", default="rad") relpowers = get_value_with_units(feed_input_data, "relativePower") # -------------- Read data!----------------------------------------- datahdu = lchdulist["DATA TABLE"] # N.B.: there is an increase in memory usage here. This is just because # data are being read from the file at this point, not before. data_table_data = Table(datahdu.data) tempdata = Table(lchdulist["ANTENNA TEMP TABLE"].data) for col in data_table_data.colnames: if col == col.lower(): continue data_table_data.rename_column(col, col.lower()) for col in tempdata.colnames: if col == col.lower(): continue tempdata.rename_column(col, col.lower()) is_old_spectrum = "SPECTRUM" in list(datahdu.header.values()) if is_old_spectrum: data_table_data.rename_column("spectrum", "ch0") sections = np.array([0, 0]) unsupported_temperature = False if len(tempdata[tempdata.colnames[0]].shape) == 2: try: tempdata_new = Table() for i, (feed, ifnum) in enumerate(zip(feeds, IFs)): tempdata_new[f"ch{i}"] = tempdata[f"ch{feed}"][:, ifnum] tempdata = tempdata_new except Exception: # pragma: no cover warnings.warn("Temperature format not supported", UserWarning) unsupported_temperature = True pass existing_columns = [ chn for chn in data_table_data.colnames if chn.startswith("ch") ] if existing_columns == []: raise ValueError("Invalid data") is_spectrum = nbin_per_chan > 1 is_single_channel = len(set(combinations)) == 1 good = np.ones(len(feeds), dtype=bool) for i, s in enumerate(sections): section_name = "ch{}".format(s) if section_name not in existing_columns: good[i] = False allfeeds = feeds feeds = allfeeds[good] IFs = IFs[good] polarizations = polarizations[good] sections = sections[good] if is_spectrum: nchan = len(chan_ids) sample_channel = existing_columns[0] _, nbins = data_table_data[sample_channel].shape # Development version of SARDARA -- will it remain the same? if nbin_per_chan == nbins: IFs = np.zeros_like(IFs) if nbin_per_chan * nchan * 2 == nbins and not is_polarized: warnings.warn( "Data appear to contain polarization information " "but are classified as simple, not stokes, in the " "Section table." ) is_polarized = True if ( nbin_per_chan != nbins and nbin_per_chan * nchan != nbins and nbin_per_chan * nchan * 2 != nbins and not is_polarized ): raise ValueError( "Something wrong with channel subdivision: " "{} bins/channel, {} channels, " "{} total bins".format(nbin_per_chan, nchan, nbins) ) for f, ic, p, s in zip(feeds, IFs, polarizations, sections): c = s if is_single_channel: c = None section_name = "ch{}".format(s) ch = _chan_name(f, p, c) start, end = ic * nbin_per_chan, (ic + 1) * nbin_per_chan data_table_data[ch] = data_table_data[section_name][:, start:end] if is_polarized: # for f, ic, p, s in zip(feeds, IFs, polarizations, sections): for s in list(set(sections)): f = feeds[sections == s][0] c = s if is_single_channel: c = None section_name = "ch{}".format(s) qname, uname = _chan_name(f, "Q", c), _chan_name(f, "U", c) qstart, qend = 2 * nbin_per_chan, 3 * nbin_per_chan ustart, uend = 3 * nbin_per_chan, 4 * nbin_per_chan data_table_data[qname] = data_table_data[section_name][ :, qstart:qend ] data_table_data[uname] = data_table_data[section_name][ :, ustart:uend ] chan_names += [qname, uname] for f, ic, p, s in zip(feeds, IFs, polarizations, sections): section_name = "ch{}".format(s) if section_name in data_table_data.colnames: data_table_data.remove_column(section_name) else: for ic, ch in enumerate(chan_names): data_table_data[ch] = data_table_data["ch{}".format(chan_ids[ic])] # ----------- Read temperature data, if possible ---------------- for ic, ch in enumerate(chan_names): data_table_data[ch + "-Temp"] = 0.0 if unsupported_temperature: continue if len(chan_ids) <= ic: continue ch_string = f"ch{chan_ids[ic]}" if ch_string not in tempdata.colnames: continue td = np.asarray(tempdata[ch_string]) data_table_data[ch + "-Temp"] = adjust_temperature_size( td, data_table_data[ch + "-Temp"] ) info_to_retrieve = [ "time", "derot_angle", "weather", "par_angle", "flag_track", "flag_cal", ] + [ch + "-Temp" for ch in chan_names] new_table = Table() new_table.meta.update(headerdict) new_table.meta["SOURCE"] = source new_table.meta["site"] = site new_table.meta["backend"] = backend new_table.meta["receiver"] = receiver new_table.meta["RA"] = ra new_table.meta["Dec"] = dec new_table.meta["channels"] = nbin_per_chan new_table.meta["VLSR"] = new_table.meta["VLSR"] * u.Unit("km/s") for i, off in zip( "ra,dec,el,az".split(","), [ra_offset, dec_offset, el_offset, az_offset], ): new_table.meta[i + "_offset"] = off for info in info_to_retrieve: new_table[info] = data_table_data[info] if not _check_derotator(new_table["derot_angle"]): log.debug("Derotator angle looks weird. Setting to 0") new_table["derot_angle"][:] = 0 # Duplicate raj and decj columns (in order to be corrected later) Nfeeds = np.max(allfeeds) + 1 new_table["ra"] = np.tile( data_table_data["raj2000"], (Nfeeds, 1) ).transpose() new_table["dec"] = np.tile( data_table_data["decj2000"], (Nfeeds, 1) ).transpose() new_table["el"] = np.tile(data_table_data["el"], (Nfeeds, 1)).transpose() new_table["az"] = np.tile(data_table_data["az"], (Nfeeds, 1)).transpose() new_table.meta["is_skydip"] = infer_skydip_from_elevation( data_table_data["el"], data_table_data["az"] ) for info in ["ra", "dec", "az", "el", "derot_angle"]: new_table[info].unit = u.radian if not is_new_fitszilla: update_table_with_offsets(new_table, xoffsets, yoffsets, inplace=True) else: for i in range(len(xoffsets)): try: ext = lchdulist["Coord{}".format(i)] extdata = ext.data ra, dec = extdata["raj2000"], extdata["decj2000"] el, az = extdata["el"], extdata["az"] except KeyError: ra, dec = new_table["ra"][:, 0], new_table["dec"][:, 0] el, az = new_table["el"][:, 0], new_table["az"][:, 0] new_table["ra"][:, i] = ra new_table["dec"][:, i] = dec new_table["el"][:, i] = el new_table["az"][:, i] = az # Don't know if better euristics is needed obstime = Time( np.mean(new_table["time"]) * u.day, format="mjd", scale="utc" ) if is_close_to_sun( new_table.meta["RA"], new_table.meta["Dec"], obstime, tolerance=3 * u.deg, ): if DEFAULT_SUN_FRAME is None: raise ValueError("You need Sunpy to process Sun observations.") update_table_with_sun_coords( new_table, sun_frame=DEFAULT_SUN_FRAME, ) lchdulist.close() # So ugly. But it works filtered_frequencies = [f for (f, g) in zip(frequencies, good) if g] for i, fr in enumerate(filtered_frequencies): f = feeds[i] s = sections[i] ic = IFs[i] p = polarizations[i] b = bandwidths[i] lo = local_oscillator[i] cal = cal_mark_temp[i] c = s if is_single_channel: c = None chan_name = _chan_name(f, p, c) if bandwidths[ic] < 0: frequencies[ic] -= bandwidths[ic] bandwidths[ic] *= -1 for i in range(data_table_data[chan_name].shape[0]): data_table_data[chan_name][f, :] = data_table_data[chan_name][ f, ::-1 ] new_table[chan_name] = ( data_table_data[chan_name] * relpowers[feeds[ic]] ) new_table[chan_name + "-filt"] = np.ones( len(data_table_data[chan_name]), dtype=bool ) data_table_data.remove_column(chan_name) newmeta = { "polarization": polarizations[ic], "feed": int(f), "IF": int(ic), "frequency": fr.to("MHz"), "bandwidth": b.to("MHz"), "sample_rate": sample_rate[s], "sample_time": (1 / (sample_rate[s].to(u.Hz))).to("s"), "local_oscillator": lo.to("MHz"), "cal_mark_temp": cal.to("K"), "integration_time": integration_time.to("s"), "xoffset": xoffsets[f].to(u.rad), "yoffset": yoffsets[f].to(u.rad), "relpower": float(relpowers[f]), } new_table[chan_name].meta.update(headerdict) new_table[chan_name].meta.update(new_table.meta) new_table[chan_name].meta.update(newmeta) if is_polarized: for s in list(set(sections)): feed = feeds[sections == s][0] c = s if is_single_channel: c = None for stokes_par in "QU": chan_name = _chan_name(feed, stokes_par, c) try: new_table[chan_name] = data_table_data[chan_name] except KeyError: continue sample_time = 1 / (sample_rate[s].to(u.Hz)) newmeta = { "polarization": stokes_par, "feed": int(feed), "IF": -1, # There are two IFs for each section "frequency": frequencies[2 * s].to("MHz"), "bandwidth": bandwidths[2 * s].to("MHz"), "sample_rate": sample_rate[s], "sample_time": sample_time.to("s"), "local_oscillator": local_oscillator[2 * s].to("MHz"), "cal_mark_temp": cal_mark_temp[2 * s].to("K"), "integration_time": integration_time.to("s"), "xoffset": xoffsets[feed].to(u.rad), "yoffset": yoffsets[feed].to(u.rad), "relpower": 1.0, } new_table[chan_name].meta.update(headerdict) new_table[chan_name].meta.update(new_table.meta) new_table[chan_name].meta.update(newmeta) new_table[chan_name + "-filt"] = np.ones( len(data_table_data[chan_name]), dtype=bool ) data_table_data.remove_column(chan_name) return new_table def read_data(fname): """Read the data, whatever the format, and return them.""" kind = detect_data_kind(fname) if kind == "fitszilla": return read_data_fitszilla(fname) elif kind == "hdf5": return Table.read(fname) else: return None def root_name(fname): """Return the file name without extension.""" fn, ext = os.path.splitext(fname) if "fits" in ext and not ext.endswith("fits"): fn += ext.replace("fits", "").replace(".", "") return fn def _try_type(value, dtype): """ Examples -------- >>> _try_type("1", int) 1 >>> _try_type(1.0, int) 1 >>> _try_type("ab", float) 'ab' """ try: return dtype(value) except ValueError: return value def label_from_chan_name(ch): """ Examples -------- >>> label_from_chan_name('Feed0_LCP_1') 'LL' >>> label_from_chan_name('Feed0_Q_2') 'LR' >>> label_from_chan_name('Feed3_RCP_1') 'RR' >>> label_from_chan_name('Feed2_U_3') 'RL' """ _, polar, _ = interpret_chan_name(ch) if polar.startswith("L"): return "LL" elif polar.startswith("R"): return "RR" elif polar.startswith("Q"): return "LR" elif polar.startswith("U"): return "RL" else: raise ValueError("Unrecognized polarization") def bulk_change(file, path, value): """Bulk change keyword or column values in FITS file. Parameters ---------- file : str Input file path : str it has to be formatted as EXT,data,COLUMN or EXT,header,KEY depending on what is being changed (a data column or a header key resp.). Ex. 1,TIME to change the values of column TIME in ext. n. 1 value : any acceptable type Value to be filled in """ with fits.open(file, memmap=False) as hdul: ext, attr, key = path.split(",") ext = _try_type(ext, int) data = getattr(hdul[ext], attr) data[key] = value setattr(hdul[ext], attr, data) hdul.writeto("tmp.fits", overwrite=True) force_move_file("tmp.fits", file) def main_bulk_change(args=None): """Preprocess the data.""" import argparse description = ( "Change all values of a given column or header keyword in " "fits files" ) parser = argparse.ArgumentParser(description=description) parser.add_argument( "files", nargs="*", help="Single files to preprocess", default=None, type=str, ) parser.add_argument( "-k", "--key", type=str, default=None, help="Path to key or data column. E.g. " '"EXT,header,KEY" to change key KEY in the header' "in extension EXT; EXT,data,COL to change column" "COL in the data of extension EXT", ) parser.add_argument( "-v", "--value", default=None, type=str, help="Value to be written" ) parser.add_argument( "--apply-cal-mark", action="store_true", default=False, help='Short for -k "DATA TABLE,data,flag_cal" -v 1', ) parser.add_argument( "--recursive", action="store_true", default=False, help="Look for file in up to two subdirectories", ) parser.add_argument( "--debug", action="store_true", default=False, help="Plot stuff and be verbose", ) args = parser.parse_args(args) if args.apply_cal_mark: args.key = "DATA TABLE,data,flag_cal" args.value = 1 if args.key is None: raise ValueError( "What should I do? Please specify either key and " "value, or apply-cal-mark" ) fnames = [] for fname in args.files: if args.recursive: if not fname == os.path.basename(fname): raise ValueError( "Options recursive requires a file name, not " "a full path: {}".format(fname) ) fs = glob.glob(os.path.join("**", fname), recursive=True) fnames.extend(fs) else: fnames.append(fname) for fname in fnames: print("Updating", fname, "...", end="") bulk_change(fname, args.key, args.value) print(fname, " Done.")
36,287
12,833
from torch.utils.tensorboard import SummaryWriter TENSORBOARD_LOGGER : SummaryWriter TRAINING_LOG_STEP = 0 AUGMENTATION_LOG_STEP = 0 TI_LOG_STEP = 0 DEBUG_LOG_STEP = 0 LOG_INTERVAL = -1
187
83
#!/usr/bin/env python3 # coding=utf-8 import gc import logging import os import sys sys.path.append( # Figure out the path of the evaluation directory os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) ) from evals.base_evaluate import experiments_directory, get_evaluation_params, finish_evaluation, EvalParams, \ foreach_sweep_constellation from helpers.data_table_mappers import eval_after_24h_mapper, remove_withdrawn_jobs_from_tenant_statistics, \ eval_tasks_not_started_mapper, \ summarize_cdf from helpers.experiment_loader import load_multiple_experiments from helpers.visualization_helpers import set_paper_aesthetics, plot_rel_multiple logging.basicConfig(level=logging.INFO, format='%(levelname)8s: %(message)s -- in %(filename)s:%(lineno)d') def str_to_safe_filename(s): return "".join([c for c in s if c.isalpha() or c.isdigit() or c == ' ']).rstrip() label_x_inp = "Jobs with INC, µ [%]" if __name__ == '__main__': # Load the experiment name and output directory from program args params: EvalParams = get_evaluation_params( tmp_directory_suffix="paper-latency-cdf", name="paper-latency-cdf" ) if 'mu-inp' not in params.filter: logging.warning("strictly enforce filter of mu-inp=1.0") params.filter['mu-inp'] = ["1.0"] # We are interested in the cell and scheduler dumps types = [ "taskgroups" ] mappers = { "taskgroups": [eval_after_24h_mapper, remove_withdrawn_jobs_from_tenant_statistics, eval_tasks_not_started_mapper(params), summarize_cdf(params, 'PlacementLatency'), ], } # Which columns should be kept for each type columns = { "taskgroups": ["scheduler", "JobStatus", "TaskGroupID", "TaskGroupType", "ValidForJob", "Duration", "run", "SubmissionTime", "TotalTasks", "TasksStarted", "PlacementLatency", "PlacementLatency_cdf_bucket", "PlacementLatency_cdf_value", "PlacementLatency_cdf_cvalue", params.sweep_column] } # Load the evaluation experiment (data, config) = load_multiple_experiments( directory=experiments_directory, names=[params.experiment_name] + params.additional_experiments, types=types, run_data_mappers=mappers, run_data_columns=columns, filter_data=params.filter, ignore_cols=params.ignore_cols, done_required=params.load_running, keep_sweep_columns={params.sweep_column} if params.drop_unused_sweep_cols else {"*"}, sweep_column=params.sweep_column) # set_paper_aesthetics(font_scale=2, line_width=2.5) fig_width = 7.5 fig_height = 6.5 linewidth = 2.2 show_legend = True gc.collect() set_paper_aesthetics() def evaluate(dataframes: dict, i): df = dataframes["taskgroups"] for mu in df["mu-inp"].unique(): plot_rel_multiple( x="PlacementLatency_cdf_bucket", y="PlacementLatency_cdf_cvalue", x_label=f'Placement latency ' + r' [ms; $log_{10}$]', y_label=r"Probability ($> x$)", hue="scheduler", data=df[df["mu-inp"] == mu], name=f"latency-{mu}-ccdf", params=params, reverse_hue_order=True ) foreach_sweep_constellation( data=data, sweeps=config['sweeps'], config=config, params=params, sink=evaluate ) finish_evaluation( params=params )
3,946
1,256
import sys sys.path.append('/home/user/path/to/0-defs/') # sys.path.append('/home/xneb/Skrivbord/DTbkp.d/0-defs/') import icons_pycons icons_pycons.SAVE_current_layout()
177
85
# -*- coding: utf-8 -*- """ Functions in order to make complex overpass query through osmnx. Use get_filter_graphs to get graph for specific filter, then create the union with compose_graph. Is a proxy of the lack of "or" syntax between keys, only able to make "or" inside keys or "and" between keys. """ import osmnx as ox import networkx as nx import shapely from tqdm import tqdm def get_filtered_graphs(polygon, filter_dict): """ Get every filtered graph coming from a polygon, based on value given as a dictionary. Parameters ---------- polygon : shapely.MultiPolygon Polygon where we will search for with osmnx.graph_from_polygon. filter_dict : dict Dictionary composed of dictionaries with the following structure : {'name1' : {'custom_filter' : filter1}, 'name2' : {'network_type'} : type2} The dictionary for every key have either 'custom_filter' or 'network_type' as a key, and the following filter or type as a value. Returns ------- graph_dict : dict Dictionary of every graph. """ graph_dict = dict() for filter_info in filter_dict.values(): # add useful tag way into if 'custom_filter' in filter_info: # the osmnx settings to get them _add_tag_way(filter_info['custom_filter']) for filter_id, filter_info in tqdm(filter_dict.items(), #tqdm show progress desc='Networks', leave=True): for i in range(0, 10): # retry try: # either custom_filter or network_type, get graph with osmnx if 'custom_filter' in filter_info: graph_dict[filter_id] = ox.graph_from_polygon( polygon, custom_filter=(filter_info['custom_filter']), retain_all=True, simplify=False ) elif 'network_type' in filter_info: graph_dict[filter_id] = ox.graph_from_polygon( polygon, network_type = (filter_info['network_type']), retain_all=True, simplify=False ) except ValueError: # for empty graph because of the filter used graph_dict[filter_id] = nx.empty_graph( create_using=nx.MultiDiGraph ) break except: continue break return graph_dict def _add_tag_way(filter_string): """Add way's tag if not present in the osmnx settings""" split_string = filter_string.split(']') # mark end of one attribute for i in range(len(split_string) - 1): # avoid last void value split_string[i] = split_string[i].split('"') split_string[i] = split_string[i][1] # avoid first [ string for tag_name in split_string[:-1]: # avoid last void string if tag_name not in ox.settings.useful_tags_way: ox.settings.useful_tags_way += [tag_name] def compose_graph(graph_dict, composed_name, name_list): """ Compose multiple graph together coming from a dictionary under a new entry of the dictionary. Parameters ---------- graph_dict : dict Dictionary of every graph. composed_name : str Name of the new key of graph_dict with the composed graph. name_list : list of str Keys of the graph we want to merge. Raises ------ ValueError If the number of key is inferior to 2, can't merge one graph. Returns ------- graph_dict : dict Dictionary of every graph. """ if len(name_list) < 2: # can't compose raise ValueError('Not enough subgraph to compose, need at least 2') elif len(name_list) == 2: # if exactly 2 use networkx.compose graph_dict[composed_name] = nx.compose(graph_dict[name_list[0]], graph_dict[name_list[1]]) else: # more than 2 use networkx.compose_all subgraph_list = [] for name in name_list: subgraph_list.append(graph_dict[name]) graph_dict[composed_name] = nx.compose_all(subgraph_list) return graph_dict if __name__ == "__main__": cop = ox.geocode_to_gdf("Copenhagen Municipality") fre = ox.geocode_to_gdf("Frederiksberg Municipality") location = shapely.ops.unary_union([cop['geometry'][0], fre['geometry'][0]]) osmnxparameters = {'car30': {'custom_filter': '["maxspeed"~"^30$|^20$|^15$|^10$|^5$|^20 mph|^15 mph|^10 mph|^5 mph"]'}, 'carall': {'network_type': 'drive'}, 'bike_cyclewaytrack': {'custom_filter': '["cycleway"~"track"]'}, 'bike_highwaycycleway': {'custom_filter': '["highway"~"cycleway"]'}, 'bike_designatedpath': {'custom_filter': '["highway"~"path"]["bicycle"~"designated"]'}, 'bike_cyclewayrighttrack': {'custom_filter': '["cycleway:right"~"track"]'}, 'bike_cyclewaylefttrack': {'custom_filter': '["cycleway:left"~"track"]'}, 'bike_cyclestreet': {'custom_filter': '["cyclestreet"]'}, 'bike_bicycleroad': {'custom_filter': '["bicycle_road"]'}, 'bike_livingstreet': {'custom_filter': '["highway"~"living_street"]'} } Gs = get_filtered_graphs(location, osmnxparameters) Gs = compose_graph(Gs, 'biketrack', ['bike_cyclewaylefttrack', 'bike_cyclewaytrack', 'bike_highwaycycleway', 'bike_bicycleroad', 'bike_cyclewayrighttrack', 'bike_designatedpath', 'bike_cyclestreet']) Gs = compose_graph(Gs, 'bikeable', ['biketrack', 'car30', 'bike_livingstreet']) Gs = compose_graph(Gs, 'biketrackcarall', ['biketrack', 'carall']) #nx.write_graphml(Gs['biketrack'], 'copenhagen_biketrack.graphml') nx.write_gpickle(Gs['biketrack'], 'copenhagen_biketrack.pickle')
6,814
1,909
"""POST URLS""" from django.urls import path # Views from posts import views urlpatterns = [ path(route = '', view=views.PostsFeedView.as_view(), name='feed'), path(route = 'posts/new/', view = views.PostCreateView.as_view(), name='create'), path(route='posts<int:pk>', view=views.PostDetailView.as_view(), name='detail') ]
340
117
# -*- coding: utf-8 -*- """ HGVS Variants ~~~~~~~~~~~~~ For example, the node :code:`p(HGNC:GSK3B, var(p.Gly123Arg))` is represented with the following: .. code:: { FUNCTION: PROTEIN, NAMESPACE: 'HGNC', NAME: 'GSK3B', VARIANTS: [ { KIND: HGVS, IDENTIFIER: 'p.Gly123Arg' } ] } .. seealso:: - BEL 2.0 specification on `variants <http://openbel.org/language/version_2.0/bel_specification_version_2.0.html#_variant_var>`_ - HVGS `conventions <http://www.hgvs.org/mutnomen/recs.html>`_ - PyBEL module :py:class:`pybel.parser.modifiers.get_hgvs_language` """ from pyparsing import Word, alphanums from ..utils import nest, one_of_tags, quote from ...constants import HGVS, IDENTIFIER, KIND __all__ = [ 'get_hgvs_language', ] variant_tags = one_of_tags(tags=['var', 'variant'], canonical_tag=HGVS, name=KIND) variant_characters = Word(alphanums + '._*=?>') def get_hgvs_language(): """ :rtype: pyparsing.ParseElement """ hgvs = (variant_characters | quote)(IDENTIFIER) language = variant_tags + nest(hgvs) return language
1,168
457
from rest_framework import status from rest_framework.views import APIView from rest_framework.response import Response from campi import serializers class GetSerializerClassMixin(object): def get_queryset(self): try: return self.queryset_action_classes[self.action] except (KeyError, AttributeError): return super().get_queryset() def get_serializer_class(self): try: return self.serializer_action_classes[self.action] except (KeyError, AttributeError): return super().get_serializer_class() class CurrentUserView(APIView): def get(self, request, format=None): current_user = request.user serialized_user = serializers.UserSerializer( current_user, context={"request": request} ) return Response(serialized_user.data, status=status.HTTP_200_OK)
887
238
class N1qlException(object): CasMismatchException = "CAS mismatch" DocumentExistsException = "document already exists" DocumentNotFoundException = "document not found" DocumentAlreadyExistsException = "Duplicate Key" AtrNotFoundException = "atr not found" SyncWriteInProgressException = "durable write in progress" MemoryQuotaError = "Request has exceeded memory quota"
397
102
# Copyright (c) 2015 Jonathan M. Lange <jml@mumak.net> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple example client """ import random from pyrsistent import pmap # TODO: Export from public names. from hazard._client import ( get_game_info, get_round_info, join_game, play_turn, register_game, register_user, ) from hazard._rules import iter_valid_plays from hazard._client import _make_credentials """ The server running Hazard. https://haverer.jml.io/ in production. """ BASE_URL = 'http://localhost:3000' # TODO: These endpoints ought to be in the library, rather than something that # users need to know. USERS_ENDPOINT = BASE_URL + '/users' GAMES_ENDPOINT = BASE_URL + '/games' def get_game_endpoint(game): # TODO: This also should be in the library. return BASE_URL + game['url'] def get_round_endpoint(round_url): # TODO: This also should be in the library. return BASE_URL + round_url def player_info(round_info, player_id): for info in round_info['players']: if info['id'] == player_id: return info def get_status(player): if player['active']: if player['protected']: return ' (protected)' else: return '' else: return ' (eliminated)' def print_round_info(round_info): current_player = round_info['currentPlayer'] print 'Players:' for player in round_info['players']: status = get_status(player) if player['id'] == current_player: current = '* ' else: current = ' ' print '{}{}{}: {}'.format( current, player['id'], status, player['discards']) print def choose_play(hand, dealt_card, myself, others): valid_plays = list(iter_valid_plays(hand, dealt_card, myself, others)) try: return random.choice(valid_plays) except IndexError: return None def play_round(users, round_url): while True: # Figure out whose turn it is round_info = get_round_info(None, round_url) print_round_info(round_info) current_player_id = round_info.get('currentPlayer', None) if not current_player_id: return round_info['winners'] # Play as that person current_player = users[current_player_id] current_player_creds = _make_credentials(current_player) current_player_view = get_round_info(current_player_creds, round_url) # Figure out their hand dealt_card = current_player_view['dealtCard'] hand = player_info(current_player_view, current_player_id)['hand'] others = [ p['id'] for p in round_info['players'] if p['id'] != current_player_id] # Choose a play at random. play = choose_play(dealt_card, hand, current_player_id, others) print 'Playing: {}'.format(play) response = play_turn(current_player_creds, round_url, play) print 'Result: {}'.format(response) def main(): # Register two users, 'foo' and 'bar'. foo = register_user(USERS_ENDPOINT, 'foo') foo_creds = _make_credentials(foo) bar = register_user(USERS_ENDPOINT, 'bar') bar_creds = _make_credentials(bar) users = pmap({ foo['id']: foo, bar['id']: bar, }) # 'foo' creates a 2-player game game = register_game(foo_creds, GAMES_ENDPOINT, 2) game_url = get_game_endpoint(game) # 'bar' joins the game, and the game begins. join_game(bar_creds, game_url) while True: game = get_game_info(None, game_url) print 'Game: {}'.format(game) if game['state'] != 'in-progress': break current_round_url = get_round_endpoint(game['currentRound']) winners = play_round(users, current_round_url) print 'Round over. Winners: {}'.format(winners) print 'Game over. Winners: {}'.format(game['winners']) if __name__ == '__main__': main()
4,453
1,425
# -*- Mode: Python3; coding: utf-8; indent-tabs-mpythoode: nil; tab-width: 4 -*- import os import numpy as np from matplotlib import pyplot as plt, colors as colors PATH = "../Images/" RED = 0 GREEN = 1 BLUE = 2 def view(data, X, Y, title="2D histogram"): c = ["RED", "GREEN", "BLUE"] dataX = data[:, :, X].flatten() dataY = data[:, :, Y].flatten() bins = np.arange(0, 256) # plot plt.hist2d(dataX, dataY, bins, norm=colors.LogNorm()) plt.title(title) plt.xlabel(c[X]) plt.ylabel(c[Y]) plt.xlim([0, 255]) plt.ylim([0, 255]) plt.colorbar() plt.show() def test(filename): img_np = PATH + filename + ".npy" print("Data: ", img_np) if not os.path.exists(img_np): print("File not found!") return data = np.load(img_np) h, w, c = data.shape if c > 3: data = data[:, :, :3] view(data, RED, GREEN) view(data, RED, BLUE) view(data, GREEN, BLUE) if __name__ == '__main__': # ndArray (Imagem) test("folha_croton")
1,044
453
"""Config Flow to configure WeatherFlow Integration.""" from __future__ import annotations import logging import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_API_TOKEN, CONF_ID from homeassistant.core import callback from homeassistant.helpers.aiohttp_client import async_create_clientsession from pyweatherflowrest import ( BadRequest, Invalid, NotAuthorized, WeatherFlowApiClient, WrongStationID, ) from pyweatherflowrest.data import StationDescription from .const import ( CONF_FORECAST_HOURS, CONF_INTERVAL_FORECAST, CONF_INTERVAL_OBSERVATION, CONF_STATION_ID, DEFAULT_FORECAST_HOURS, DEFAULT_FORECAST_INTERVAL, DEFAULT_OBSERVATION_INTERVAL, DOMAIN, ) _LOGGER = logging.getLogger(__name__) class WeatherFlowFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle a WeatherFlow config flow.""" VERSION = 1 @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return OptionsFlowHandler(config_entry) async def async_step_user(self, user_input=None): """Handle a flow initiated by the user.""" if user_input is None: return await self._show_setup_form(user_input) errors = {} session = async_create_clientsession(self.hass) weatherflow = WeatherFlowApiClient( user_input[CONF_STATION_ID], user_input[CONF_API_TOKEN], session=session ) try: await weatherflow.initialize() station_data: StationDescription = weatherflow.station_data except WrongStationID as err: _LOGGER.debug(err) errors["base"] = "wrong_station_id" return await self._show_setup_form(errors) except Invalid as err: _LOGGER.debug(err) errors["base"] = "invalid_data" return await self._show_setup_form(errors) except NotAuthorized as err: _LOGGER.debug(err) errors["base"] = "wrong_token" return await self._show_setup_form(errors) except BadRequest as err: _LOGGER.debug(err) errors["base"] = "bad_request" return await self._show_setup_form(errors) unique_id = str(station_data.key) await self.async_set_unique_id(unique_id) self._abort_if_unique_id_configured() return self.async_create_entry( title=station_data.name, data={ CONF_ID: station_data.name, CONF_STATION_ID: user_input[CONF_STATION_ID], CONF_API_TOKEN: user_input[CONF_API_TOKEN], }, options={ CONF_FORECAST_HOURS: DEFAULT_FORECAST_HOURS, CONF_INTERVAL_OBSERVATION: DEFAULT_OBSERVATION_INTERVAL, CONF_INTERVAL_FORECAST: DEFAULT_FORECAST_INTERVAL, }, ) async def _show_setup_form(self, errors=None): """Show the setup form to the user.""" return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required(CONF_STATION_ID): int, vol.Required(CONF_API_TOKEN): str, } ), errors=errors or {}, ) class OptionsFlowHandler(config_entries.OptionsFlow): """Handle options.""" def __init__(self, config_entry): """Initialize options flow.""" self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage the options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) return self.async_show_form( step_id="init", data_schema=vol.Schema( { vol.Optional( CONF_INTERVAL_OBSERVATION, default=self.config_entry.options.get( CONF_INTERVAL_OBSERVATION, DEFAULT_OBSERVATION_INTERVAL ), ): vol.All(vol.Coerce(int), vol.Range(min=1, max=30)), vol.Optional( CONF_FORECAST_HOURS, default=self.config_entry.options.get( CONF_FORECAST_HOURS, DEFAULT_FORECAST_HOURS ), ): vol.All(vol.Coerce(int), vol.Range(min=24, max=240)), vol.Optional( CONF_INTERVAL_FORECAST, default=self.config_entry.options.get( CONF_INTERVAL_FORECAST, DEFAULT_FORECAST_INTERVAL ), ): vol.All(vol.Coerce(int), vol.Range(min=15, max=120)), } ), )
4,931
1,461
import mysql.connector import configparser import os import uuid class Manifest: def __init__(self): config_file = os.getcwd() + '/config.ini' self.config = configparser.ConfigParser() self.config.read(config_file, encoding='utf-8') db = self.db_connect() db.close() def db_connect(self): return mysql.connector.connect( host=self.config['db']['host'], user=self.config['db']['user'], passwd=self.config['db']['passwd'], database=self.config['db']['database'] ) def insert(self, slide_uuid=None, svs_file=None, smaller_image=None, background_mask=None): db = self.db_connect() cursor = db.cursor() if slide_uuid is None: slide_uuid = str(uuid.uuid4()) cursor.execute("SELECT * FROM MANIFEST WHERE (SVS_file = %s)", (svs_file,)) if len(cursor.fetchall()) == 0: sql = "INSERT INTO MANIFEST( UUID, SVS_file, Smaller_image, Background_mask) VALUES (%s, %s, %s, %s)" val = (slide_uuid, svs_file, smaller_image, background_mask) cursor.execute(sql, val) cursor.close() db.commit() db.close() def get_projects(self): db = self.db_connect() cursor = db.cursor() cursor.execute("SELECT * FROM MANIFEST") result = cursor.fetchall() db.close() return result def get_project_by_id(self, slide_id): db = self.db_connect() cursor = db.cursor() cursor.execute("SELECT * FROM MANIFEST WHERE (ID = %s)", (slide_id,)) result = cursor.fetchall()[0] cursor.close() db.close() return result def get_project_by_uuid(self, slide_uuid): db = self.db_connect() cursor = db.cursor() cursor.execute("SELECT * FROM MANIFEST WHERE (UUID = %s)", (slide_uuid,)) result = cursor.fetchall()[0] cursor.close() db.close() return result def get_project_by_svs_file(self, svs_file): db = self.db_connect() cursor = db.cursor() cursor.execute("SELECT * FROM MANIFEST WHERE (SVS_file = %s)", (svs_file,)) result = cursor.fetchall()[0] cursor.close() db.close() return result def get_project_by_similar_svs_file(self, svs_file): db = self.db_connect() cursor = db.cursor() svs_file = '%' + svs_file + '%' cursor.execute("SELECT * FROM MANIFEST WHERE (SVS_file like %s)", (svs_file,)) result = cursor.fetchall() cursor.close() db.close() return result def update_svs_file_by_id(self, slide_id, svs_file): db = self.db_connect() cursor = db.cursor() cursor.execute("UPDATE MANIFEST SET SVS_file= %s WHERE (ID = %s)", (svs_file, slide_id)) cursor.close() db.commit() db.close() def update_smaller_image_by_id(self, slide_id, smaller_image): db = self.db_connect() cursor = db.cursor() cursor.execute("UPDATE MANIFEST SET Smaller_image = %s WHERE (ID = %s)", (smaller_image, slide_id)) cursor.close() db.commit() db.close() def update_background_mask_by_id(self, slide_id, background_mask): db = self.db_connect() cursor = db.cursor() cursor.execute("UPDATE MANIFEST SET Background_mask = %s WHERE (ID = %s)", (background_mask, slide_id)) cursor.close() db.commit() db.close() def delete_project_by_id(self, slide_id): db = self.db_connect() cursor = db.cursor() cursor.execute("DELETE FROM MANIFEST WHERE (ID = %s)", (slide_id,)) cursor.close() db.commit() db.close() def delete_all_projects(self): db = self.db_connect() cursor = db.cursor() cursor.execute("TRUNCATE TABLE MANIFEST") cursor.close() db.commit() db.close() def continue_id(self): db = self.db_connect() cursor = db.cursor() cursor.execute("SELECT * FROM MANIFEST") result = cursor.fetchall() slide_id = 0 for wsi in result: slide_id = slide_id + 1 if wsi[0] != slide_id: cursor.execute("UPDATE MANIFEST SET ID = %s WHERE (ID = %s)", (slide_id, wsi[0])) cursor.close() db.commit() db.close()
4,436
1,427
# Opens the NWB conversion GUI # authors: Luiz Tauffer and Ben Dichter # written for Giocomo Lab # ------------------------------------------------------------------------------ from nwbn_conversion_tools.gui.nwbn_conversion_gui import nwbn_conversion_gui metafile = 'metafile.yml' conversion_module = 'conversion_module.py' source_paths = {} source_paths['spikeglx data'] = {'type': 'file', 'path': ''} source_paths['processed data'] = {'type': 'file', 'path': ''} # Other options kwargs = {'spikeglx': True, 'processed': False} nwbn_conversion_gui( metafile=metafile, conversion_module=conversion_module, source_paths=source_paths, kwargs_fields=kwargs, )
678
222
from cosrlib.document.html import HTMLDocument def test_get_title(): assert HTMLDocument( """<html><head><title>Test title</title></head><body>x</body></html>""" ).parse().get_title() == "Test title" assert HTMLDocument( """<html><title>Test title</title>XX</html>""" ).parse().get_title() == "Test title" assert HTMLDocument( """<html><head><title>Test title</title></head><body><title>x</title></body></html>""" ).parse().get_title() == "Test title" def test_get_url_words(): doc = HTMLDocument("", url="http://www.nytimes.com/2011/10/06/arts/music/maceo-parker.html?print=true#hash").parse() assert doc.get_url_words() == [ "nytimes", "com", "2011", "10", "06", "arts", "music", "maceo", "parker", "html" ] doc = HTMLDocument("", url="https://en.wikipedia.org/wiki/Nine_Inch_Nails").parse() assert doc.get_url_words() == [ "en", "wikipedia", "org", "wiki", "nine", "inch", "nails" ] def test_get_domain_paid_words(): doc = HTMLDocument("", url="http://www.bbc.co.uk/2011/10/06/arts/music/maceo-parker.html?print=true") assert doc.get_domain_paid_words() == ["bbc"] def test_get_url(): # When none is given, we take the URL html = """<html><head></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/page.html").parse() assert page.get_url().url == "http://example.com/page.html" # But when a tag is present, it has precedence html = """<html><head><link rel="canonical" href="http://example.com/page2.html" /></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/page.html").parse() assert page.get_url().url == "http://example.com/page2.html" # Including with strange caps html = """<htmL><heaD><linK reL="CANonical" hreF="http://example.com/Page2.html" /></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/page.html").parse() assert page.get_url().url == "http://example.com/Page2.html" def test_get_canonical_url(): html = """<html><head></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/page.html").parse() assert page.parse_canonical_url() is None html = """<html><head><link rel="canonical" href="" /></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/page.html").parse() assert page.parse_canonical_url() is None html = """<html><head><link rel="canonical" href="http://example.com/page2.html" /></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/page.html").parse() assert page.parse_canonical_url().url == "http://example.com/page2.html" html = """<html><head><linK reL="caNonical" hreF="http://example.com/page2.html" /></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/page.html").parse() assert page.parse_canonical_url().url == "http://example.com/page2.html" # Cross domain blocked for now html = """<html><head><linK reL="caNonical" hreF="http://example2.com/page2.html" /></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/page.html").parse() assert page.parse_canonical_url() is None # Relative URLs html = """<html><head><linK reL="caNonical" hreF="/dir2/page2.html" /></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/dir/page.html").parse() assert page.parse_canonical_url().url == "http://example.com/dir2/page2.html" html = """<html><head><linK reL="caNonical" hreF="dir2/page2.html" /></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/dir/page.html").parse() assert page.parse_canonical_url().url == "http://example.com/dir/dir2/page2.html" html = """<html><head><linK reL="caNonical" hreF="//example.com/dir2/page2.html" /></head><body>x</body></html>""" page = HTMLDocument(html, url="http://example.com/dir/page.html").parse() assert page.parse_canonical_url().url == "http://example.com/dir2/page2.html" def test_hidden_text(): html = """<html><head></head><body> <script> hello(); </script> <style> style { good } </style> <!-- comment --> text <p>p</p> <div style='display: none;'>hidden by display</div> <div hidden>hidden by html5 attribute</div> <div aria-hidden="true">hidden by aria</div> <div aria-hidden="false">not_aria</div> <div style='visibility: hidden;'>hidden by visibility</div> </body></html>""" page = HTMLDocument(html).parse() assert page.get_all_words() == set(["text", "p", "not_aria"]) def test_get_hyperlinks(): html = """<html><head></head><body> before <a href="http://example.com/page1">link text</a> after <a href="/page2">relative2</a> <a href="page3?q=1#d">relative3</a> <a href="http://other.example.com/page4">absolute4</a> <a href="//other.example.com/page5?q=1#d">absolute5</a> <a href="https://other.example.com/page6?q=1#d">absolute6</a> <a href="javascript:func()">js1</a> </body></html>""" page = HTMLDocument(html, url="http://example.com/page.html").parse() links = page.get_external_hyperlinks() assert len(links) == 3 assert links[0]["href"].url == "http://other.example.com/page4" assert links[0]["text"] == "absolute4" assert links[1]["href"].url == "http://other.example.com/page5?q=1#d" assert links[1]["text"] == "absolute5" assert links[2]["href"].url == "https://other.example.com/page6?q=1#d" assert links[2]["text"] == "absolute6" # This doesn't return URLs, it returns strings (they are paths) links = page.get_internal_hyperlinks() assert len(links) == 3 assert links[0]["path"] == "/page1" assert links[0]["text"] == "link text" assert links[1]["path"] == "/page2" assert links[1]["text"] == "relative2" assert links[2]["path"] == "page3?q=1#d" assert links[2]["text"] == "relative3" # All links in absolute links = page.get_hyperlinks() assert len(links) == 6 assert links[2]["href"].url == "http://example.com/page3?q=1#d"
6,276
2,234
from .cphttp import cphttp
26
8
import os import json import uuid import pathlib import redis import pandas as pd from nltk.corpus import stopwords from tqdm import tqdm from .conftest import RANDOM from ..api.src.apis.utils import get_key # connect via nginx to APIs and submit tests def get_stopwords(lang): try: loc = pathlib.Path(__path__).absolute().parent.parent.parent except NameError: loc = pathlib.Path.cwd().absolute().parent.parent assert lang in ["english", "german"] resourcedir = os.path.join(loc, "server", "preprocessing", "resources") stops = set(stopwords.words('english')) with open(os.path.join(resourcedir, "%s.stop" % lang), "r") as infile: add_stops = set(infile.read().splitlines()) return stops.union(add_stops) def get_cases(folder): try: loc = pathlib.Path(__path__).parent except NameError: loc = pathlib.Path.cwd() testdatadir = os.path.join(loc, "tests", folder) casefiles = [f for f in os.listdir(testdatadir) if f.startswith("testcase")] casefiles.sort() cases = [] for casefile in casefiles: with open(os.path.join(testdatadir, casefile)) as infile: testcase_ = json.load(infile) casename, _ = os.path.splitext(casefile) cases.append({"caseid": casename, "casedata": testcase_}) return cases def retrieve_results(casedata): k = str(uuid.uuid4()) casedata["params"]["raw"] = True service = casedata["params"]["service"] d = {"id": k, "params": casedata["params"], "endpoint": "search"} redis_store.rpush(service, json.dumps(d)) result = get_key(redis_store, k) return result def get_dataprocessing_result(testcase_): k = str(uuid.uuid4()) params = testcase_["params"] input_data = testcase_["input_data"] res = {} res["id"] = k res["params"] = params res["input_data"] = input_data redis_store.rpush("input_data", json.dumps(res).encode('utf8')) result = get_key(redis_store, k) return pd.DataFrame.from_records(json.loads(result)) def data_generation(KNOWNCASES, RANDOMCASES): CASENAMES = [] CASEDATA = {} print("collecting known test cases") for c in tqdm(KNOWNCASES): CASENAMES.append(c["caseid"]) CASEDATA[c["caseid"]] = c["casedata"] if RANDOM: print("collecting random test cases") for c in tqdm(RANDOMCASES): CASENAMES.append(c["caseid"]) CASEDATA[c["caseid"]] = { "input_data": retrieve_results(c["casedata"])["input_data"], "params": c["casedata"]["params"]} return CASENAMES, CASEDATA KNOWNCASES = get_cases("knowncases") RANDOMCASES = get_cases("randomcases") #TRIPLE = get_cases("triple") CASENAMES, CASEDATA = data_generation(KNOWNCASES, RANDOMCASES) CASENAMES.sort() RESULTS = {} print("collecting dataprocessing results") for c in tqdm(CASEDATA): RESULTS[c] = get_dataprocessing_result(CASEDATA[c])
2,975
1,038
import re from regex import sub as rsub from all_snippet_helpers import * # Produce roxygen documentation def render_roxygen(snip, args, signature, line): mark = "#'" # Only generate argument tabstops if function actually has them args_text = ( [mark + " @param " + arg + " $" + str(num + 6) for num, arg in enumerate(args)] if (offset := len(args)) else [] ) text = ( [ mark + " ${1:${2:@inheritParams ${3:}}}", mark, mark + " @title ${4:}", mark + " @description ${5:}", ] + args_text + [mark + " @details $" + str(offset + 6)] + [ mark, mark + " @return $" + str(offset + 7), mark + " ${" + str(offset + 8) + ":@export}", mark, mark + " @examples", mark, ] ) vim.current.window.cursor = [max(line, 1), 0] snip.expand_anon("\n" + "\n".join(text) + "\n") def get_function_name(snip, line): fun_line = line if re.match("[^<]+<-", snip.buffer[snip.line]) else line - 1 fun = re.match(r"^\s*([^ ]+).*", snip.buffer[fun_line]) return fun.group(1) if fun else None def extract_args(snip, start, fun): line = snip.buffer[start] # Handle signatures that flow ove multiple lines: not as annoying as feared! if not re.match(r"\)\s*\{\s*$", line): # stop = int(vim.eval("searchpairpos('function(', '', ')\\s*{\\s*$')")[0]) stop = int(vim.eval("search(')\\s*{\\s*$')")) vim.command("echom '" + str(stop) + "'") line = " ".join( [line] + [snip.buffer[i].lstrip() for i in range(start + 1, stop)] ) # snip.cursor.set(*orig) line = re.sub(r"^[\w.]*\s*(?:<-)?\s*function\s*\(", "", line) signature = str(fun) + "(" + re.sub(r"\s*\{\s*$", "", line) line = rsub(r"[\w.]*\((((?>[^()]+)|(?R))*)\)", "", line) line = re.sub(r"\).*$", "", line) # vim.command('echo "' + str(line) + '"') args = re.findall(r"(?:^|,)\s*([\w\.]+)", line) return args, signature # Helper to compose these tasks def document(snip): # Get rid of trigger line = get_definition(snip) # Bail out if no match if line is None: snip.cursor.set(snip.line, snip.column) return None fun = get_function_name(snip, line) args, signature = extract_args(snip, line, fun) render_roxygen(snip, args, signature, line)
2,446
872
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 from os import path import tempfile from textwrap import dedent from c7n import loader from .common import BaseTest class TestSourceLocator(BaseTest): def test_yaml_file(self): with tempfile.TemporaryDirectory() as tmpdirname: filename = path.join(tmpdirname, "testfile.yaml") with open(filename, "w") as f: f.write(dedent("""\ policies: - name: foo resource: s3 # One where name isn't the first element. - resource: ec2 name: bar """)) locator = loader.SourceLocator(filename) self.assertEqual(locator.find("foo"), "testfile.yaml:2") self.assertEqual(locator.find("bar"), "testfile.yaml:7") self.assertEqual(locator.find("non-existent"), "")
982
271
import threading import time import mod_measure_list import mod_sense_hat # Classe per l'avvio dei thread class ThreadManager(threading.Thread): def __init__(self, channel, delay, source, measure_list): threading.Thread.__init__(self) self.channel = channel # Canale di acquisizione self.delay = delay # Tempo di acquisizione in ms. self.source = source # Modalità di acquisizione self.measure_list = measure_list # Riferimento alla lista misure self.exit_flag = False # Flag per la terminazione del thread # Thread per la lettura dei sensori def acquisition_thread(self): while (self.exit_flag == False): # Rilevo il timestamp ts = time.time() # Aggiungo alla lista misure self.measure_list.add_details(self.channel, self.source.read_channel(self.channel), ts) time.sleep(self.delay) def stop_thread (self): self.exit_flag = True # # Thread per il processamento delle misure # def parse_measures(self, exit_flag, measure_list): # while self.counter: # # Se ho premuto il pulsante, esco e visualizzo # # il segno verde # if (self.exit_flag == 1): # counter = 0 # # Genero le medie per le grandezze rilevate # for ch in channels: # meas = self.measure_list.avg_by_channel(ch) # # Stampo il valore della media # print("TS:<" + str(meas.timestamp) + ">; NUM:<" + str(meas.count)+ ">; AVG:<" + str(meas.value)+ ">") # # Aggiorno il codice canale e aggiungo la media alla lista misure # meas.channel = meas.channel + 10 # self.measure_list.add_measure(meas) # # Per la temperatura, coloro il display in funzione della media rilevata # if (meas.channel == 11): # self.show_temperature(meas.value) # # Genero il JSON # main_dic = {} # main_dic[mkc.key_timestamp] = time.time() # main_dic[mkc.key_qos] = "good" # main_dic[mkc.key_values] = self.measure_list.json_dictionary() # self.measure_list.clear_list() # print("") # print("************************") # print(str(json.dumps(main_dic, # indent=4, sort_keys=True, # separators=(',', ': '), ensure_ascii=False))) # time.sleep(self.delay) # counter -= 1
2,635
803
survey_data = survey_data_unique.dropna(subset=['species']).copy()
66
22
import ClusterCreator import math c = ClusterCreator.ClusterCreator(35484770, 100, 30, 30, 20, math.floor(131072 / 132)) leaders, clusters, l, c_id = c.create_clusters() print(len(leaders)) print(len(clusters)) print("---") print(clusters[0].count) n = 0 for i in clusters: n += i.count print(n)
316
155
# copy from Leetcode, really a good way! class Solution: def singleNumber(self, nums): # using XOR method single_num = 0 for n in nums: single_num ^= n return single_num ''' if two number is different like 1010 and 1010 1010 xor 1010 = 0 so when a group of number is contered one number for many time use xor can make them to 0, but if the 1010 face 0 like this problem single_num=0 meet a single num it will be a plus, 0000 xor 1010 is 1010 so finally the single_num will remember the single number really good way to solve this problem also you need to know a^b^c^d... can can be (a^b)^(c^d)... so you don't need to worry about the problem of sequence, really good, great! '''
770
260
"""Top-level of library designed to ease use of Selenium targetting Galaxy. galaxy_selenium is purposes being designed to depend on Python selenium, six, pyyaml, and optional pyvirtualdisplay but not galaxy-lib (or any of Galaxy or Galaxy's test stuff) currently. """ __version__ = '17.9.0.dev0' PROJECT_NAME = "galaxy-selenium" PROJECT_OWNER = PROJECT_USERAME = "galaxyproject" PROJECT_URL = "https://github.com/galaxyproject/galaxy-selenium" PROJECT_AUTHOR = 'Galaxy Project and Community' PROJECT_EMAIL = 'jmchilton@gmail.com' RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % ( PROJECT_USERAME, PROJECT_NAME )
630
217
# coding: utf-8 """ THORChain API This documentation outlines the API for THORChain. NOTE: This document is a **work in progress**. # noqa: E501 OpenAPI spec version: Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from thornode_client.api_client import ApiClient class TxApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def get_a_tx_with_given_hash(self, hash, **kwargs): # noqa: E501 """Get a tx with given hash # noqa: E501 Retrieve a tx with the given hash from THORChain # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_a_tx_with_given_hash(hash, async_req=True) >>> result = thread.get() :param async_req bool :param str hash: Tx hash of an inbound transaction or outbound transaction (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_a_tx_with_given_hash_with_http_info(hash, **kwargs) # noqa: E501 else: (data) = self.get_a_tx_with_given_hash_with_http_info(hash, **kwargs) # noqa: E501 return data def get_a_tx_with_given_hash_with_http_info(self, hash, **kwargs): # noqa: E501 """Get a tx with given hash # noqa: E501 Retrieve a tx with the given hash from THORChain # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_a_tx_with_given_hash_with_http_info(hash, async_req=True) >>> result = thread.get() :param async_req bool :param str hash: Tx hash of an inbound transaction or outbound transaction (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['hash'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_a_tx_with_given_hash" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'hash' is set if self.api_client.client_side_validation and ('hash' not in params or params['hash'] is None): # noqa: E501 raise ValueError("Missing the required parameter `hash` when calling `get_a_tx_with_given_hash`") # noqa: E501 collection_formats = {} path_params = {} if 'hash' in params: path_params['hash'] = params['hash'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/thorchain/tx/{hash}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_tx_signers(self, hash, **kwargs): # noqa: E501 """Get tx signers # noqa: E501 Get tx signers that match the request hash # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_tx_signers(hash, async_req=True) >>> result = thread.get() :param async_req bool :param str hash: Tx hash of an inbound transaction or outbound transaction (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_tx_signers_with_http_info(hash, **kwargs) # noqa: E501 else: (data) = self.get_tx_signers_with_http_info(hash, **kwargs) # noqa: E501 return data def get_tx_signers_with_http_info(self, hash, **kwargs): # noqa: E501 """Get tx signers # noqa: E501 Get tx signers that match the request hash # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_tx_signers_with_http_info(hash, async_req=True) >>> result = thread.get() :param async_req bool :param str hash: Tx hash of an inbound transaction or outbound transaction (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['hash'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_tx_signers" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'hash' is set if self.api_client.client_side_validation and ('hash' not in params or params['hash'] is None): # noqa: E501 raise ValueError("Missing the required parameter `hash` when calling `get_tx_signers`") # noqa: E501 collection_formats = {} path_params = {} if 'hash' in params: path_params['hash'] = params['hash'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/thorchain/tx/{hash}/signers', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
8,367
2,462
import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression import constants as c class RegressionHandler: """ This class encapsulates the logic for calculating and plotting a linear regression. """ @staticmethod def plot_regression(dataframe, sex=c.Sex.MALE): """ Plot a linear regression with the GDP per capita being the X-variable and the mean height the Y-variable. :param dataframe: dataframe containing the data to plot :param sex: either MALE (0) or FEMALE (1) - specifies the sex for which the given dataframe contains data :return: nothing """ # sort by GDP/capita so that plot can use logarithmic scale dataframe = dataframe.sort_values([c.GDP], 0) sex_label = 'males' if sex == c.Sex.MALE else 'females' X = dataframe.loc[:, c.GDP].values.reshape(-1, 1) # values converts it into a numpy array Y = dataframe.loc[:, c.AVG_HEIGHT].values.reshape(-1, 1) # -1 means that calculate the dimension of rows, but have 1 column linear_regressor = LinearRegression() # create object for the class linear_regressor.fit(X, Y) # perform linear regression Y_pred = linear_regressor.predict(X) # make predictions plt.scatter(X, Y) plt.plot(X, Y_pred, color='red') plt.xscale('log') plt.xlabel('GDP per capita [USD]') plt.ylabel('average height of {0} aged 19 [cm]'.format(sex_label)) plt.savefig('out/regression_{0}.png'.format(sex_label)) plt.show()
1,570
480
import os import sys import pickle from dotenv import load_dotenv _ = load_dotenv() def load_utils(): MODEL_PATH = os.getenv("MODEL_PATH") VECTORIZER_PATH = os.getenv("VECTORIZER_PATH") LABEL_ENC_PATH = os.getenv("LABEL_ENC_PATH") with open(MODEL_PATH, 'rb') as f: model = pickle.load(f) f.close() with open(VECTORIZER_PATH, 'rb') as f: vectorizer = pickle.load(f) f.close() with open(LABEL_ENC_PATH, 'rb') as f: le = pickle.load(f) f.close() return (model, vectorizer, le)
561
227
from .build import ARCHs_REGISTRY from .base_arch import BaseReIDModel from .reduction_arch import ReductionReIDModel
117
37
from .TestCase import TestCase
31
9
import telegram import tornado.ioloop import tornado.web from tornado.options import define, options from settings import WEBHOOK_URL, TELEGRAM_ACCESS_TOKEN from core import bot_handler define("port", default=5000, help="run on the given port", type=int) class IndexHandler(tornado.web.RequestHandler): def get(self): self.write('wink, wink') class MainHandler(tornado.web.RequestHandler): def get(self): self.write('wink, wink') def post(self): data = tornado.escape.json_decode(self.request.body) self.set_status(200) return bot_handler(data=data) class WebHookHandler(tornado.web.RequestHandler): def get(self): # one time only operation bot = telegram.Bot(token=TELEGRAM_ACCESS_TOKEN) response = bot.setWebhook(WEBHOOK_URL) if not response: return self.write('Setting up webhook has failed') return self.write('Webhook has been successfully set') def make_app(): return tornado.web.Application([ (r'/', IndexHandler), (r'/duh', MainHandler), (r'/setwebhook', WebHookHandler) ]) if __name__ == "__main__": tornado.options.parse_command_line() app = make_app() app.listen(options.port) tornado.ioloop.IOLoop.current().start()
1,297
429
#!/usr/bin/python #Original Author: Zane J Cersovsky #Original Date: Mar 6 2016 #Last Modified By: Zane J Cersovsky #Last Modified On: Mar 7 2016 import bitstring def reverse_word(word): r""" Takes in a int and reverse its bytes """ assert type(word) == int assert word >= 0 and word < 65536 msb = word >> 8 lsb = word & 255 return (lsb << 8) | msb def twocomplement(word): r""" Switches between 2's complement binary and python signed integers """ b = bitstring.Bits(uint=word, length=16) return b.int
516
209
"""Tests for certbot_azure.authenticator.""" import os import unittest import mock import json from certbot import errors from certbot.plugins import dns_test_common_lexicon from certbot.plugins.dns_test_common import DOMAIN from certbot.tests import util as test_util from requests import Response from msrestazure.azure_exceptions import CloudError RESOURCE_GROUP = 'test-test-1' class AuthenticatorTest(test_util.TempDirTestCase, dns_test_common_lexicon.BaseLexiconAuthenticatorTest): def setUp(self): from certbot_azure.dns_azure import Authenticator super(AuthenticatorTest, self).setUp() config_path = AzureClientConfigDummy.build_config(self.tempdir) self.config = mock.MagicMock(azure_credentials=config_path, azure_resource_group=RESOURCE_GROUP) self.auth = Authenticator(self.config, "azure") self.mock_client = mock.MagicMock() # pylint: disable=protected-access self.auth._get_azure_client = mock.MagicMock(return_value=self.mock_client) def test_perform(self): self.auth.perform([self.achall]) expected = [mock.call.add_txt_record('_acme-challenge.'+DOMAIN, mock.ANY, mock.ANY)] self.assertEqual(expected, self.mock_client.mock_calls) def test_cleanup(self): # _attempt_cleanup | pylint: disable=protected-access self.auth._attempt_cleanup = True self.auth.cleanup([self.achall]) expected = [mock.call.del_txt_record('_acme-challenge.'+DOMAIN)] self.assertEqual(expected, self.mock_client.mock_calls) class AzureClientTest(test_util.TempDirTestCase): zone = "foo.com" record_name = "bar" record_content = "baz" record_ttl = 42 def _getCloudError(self): response = Response() response.status_code = 500 return CloudError(response) def setUp(self): from certbot_azure.dns_azure import _AzureClient super(AzureClientTest, self).setUp() config_path = AzureClientConfigDummy.build_config(self.tempdir) self.azure_client = _AzureClient(RESOURCE_GROUP, config_path) self.dns_client = mock.MagicMock() self.azure_client.dns_client = self.dns_client # pylint: disable=protected-access self.azure_client._find_managed_zone = mock.MagicMock() def test_add_txt_record(self): # pylint: disable=protected-access self.azure_client._find_managed_zone.return_value = self.zone self.azure_client.add_txt_record(self.record_name + "." + self.zone, self.record_content, self.record_ttl) self.dns_client.record_sets.create_or_update.assert_called_with( self.azure_client.resource_group, self.zone, self.record_name, 'TXT', mock.ANY) record = self.dns_client.record_sets.create_or_update.call_args[0][4] self.assertEqual(self.record_ttl, record.ttl) self.assertEqual([self.record_content], record.txt_records[0].value) def test_add_txt_record_error(self): # pylint: disable=protected-access self.azure_client._find_managed_zone.return_value = self.zone self.dns_client.record_sets.create_or_update.side_effect = self._getCloudError() with self.assertRaises(errors.PluginError): self.azure_client.add_txt_record(self.record_name + "." + self.zone, self.record_content, self.record_ttl) def test_add_txt_record_zone_not_found(self): # pylint: disable=protected-access self.azure_client._find_managed_zone.return_value = None # pylint: disable=protected-access self.azure_client._find_managed_zone.side_effect = self._getCloudError() with self.assertRaises(errors.PluginError): self.azure_client.add_txt_record(self.record_name + "." + self.zone, self.record_content, self.record_ttl) def test_del_txt_record(self): # pylint: disable=protected-access self.azure_client._find_managed_zone.return_value = self.zone self.azure_client.del_txt_record(self.record_name + "." + self.zone) self.dns_client.record_sets.delete.assert_called_with(self.azure_client.resource_group, self.zone, self.record_name, 'TXT') def test_del_txt_record_no_zone(self): # pylint: disable=protected-access self.azure_client._find_managed_zone.return_value = None # pylint: disable=protected-access self.azure_client._find_managed_zone.side_effect = self._getCloudError() self.azure_client.del_txt_record(self.record_name + "." + self.zone) self.dns_client.record_sets.delete.assert_not_called() class AzureClientConfigDummy(object): """Helper class to create dummy Azure configuration""" @classmethod def build_config(cls, tempdir): """Helper method to create dummy Azure configuration""" config_path = os.path.join(tempdir, 'azurecreds.json') with open(config_path, 'w') as outfile: json.dump({ "clientId": "uuid", "clientSecret": "uuid", "subscriptionId": "uuid", "tenantId": "uuid", "activeDirectoryEndpointUrl": "https://login.microsoftonline.com", "resourceManagerEndpointUrl": "https://management.azure.com/", "activeDirectoryGraphResourceId": "https://graph.windows.net/", "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/", "galleryEndpointUrl": "https://gallery.azure.com/", "managementEndpointUrl": "https://management.core.windows.net/" }, outfile) os.chmod(config_path, 0o600) return config_path if __name__ == "__main__": unittest.main() # pragma: no cover
6,454
1,874
#!/usr/bin/python3 """ Contains the Test User class """ from models.users import User from models.baseModel import BaseModel import unittest class TestUser(unittest.TestCase): """Test the User class""" def test_is_subclass(self): """Test that User is a subclass of BaseModel""" user = User() self.assertIsInstance(user, BaseModel) def test_init_(self): user = User() self.assertTrue(hasattr(user, "IdUser")) self.assertTrue(hasattr(user, "FirstName")) self.assertTrue(hasattr(user, "LastName")) self.assertTrue(hasattr(user, "Phone")) self.assertTrue(hasattr(user, "Mail")) self.assertTrue(hasattr(user, "Password")) self.assertTrue(hasattr(user, "City")) def test_to_dict_values(self): """test that values in dict returned from to_dict are correct""" eva = {'FirstName': 'Eva', 'LastName': 'DaughterOfGod', 'Mail': 'evacontacto@readIT', 'Password': 'Hola', 'Phone': '59899101010', 'City': 'Paradise'} u = User(**eva) new_d = u.to_dict() self.assertEqual(new_d["Class"], "User") self.assertEqual(type(new_d["IdUser"]), str) self.assertEqual(type(new_d["FirstName"]), str) self.assertEqual(type(new_d["LastName"]), str) self.assertEqual(type(new_d["Phone"]), str) self.assertEqual(type(new_d["Mail"]), str) self.assertEqual(type(new_d["Password"]), str) self.assertEqual(type(new_d["City"]), str) def test_str(self): """test that the str method has the correct output""" user = User() string = "[User] ({}) {}".format(user.IdUser, user.__dict__) self.assertEqual(string, str(user))
1,797
569
""" author : Ali Emre SAVAS Link : https://www.hackerrank.com/challenges/python-print/problem """ if __name__ == '__main__': n = int(input()) number = "" # n+1 -> Because in range function, n+1 is not in the range. for i in range(1, n+1): number += str(i) print(number)
306
111
# data.py import numpy as np from itertools import product, combinations from . func import * from . const import * # totally 31 hold options in two dimentional array all_hold_options = np.array([list(combinations([0,1,2,3,4], r=repeat)) for repeat in range(1, 6)]) # plus one empty option, makes it 32 options all_hold_options[0].append(()) # numbers from 1 to 6 numbers = range(1, 7) # all unique ordered hands hands = [] for hand in product(numbers, repeat=5): hand = list(hand) hand.sort(reverse=True) if hand not in hands: hands.append(hand) file = 'yatzy_probabilities.csv' # data table data = { CHANGE: {'name': 'change', 'func': isChange, 'target': 17, 'order': 9, 'score': change}, SMALLSTRAIGHT: {'name': 'smallstraight', 'func': isSmallStraight, 'target': SMALL_STRAIGHT_BONUS, 'order': 8, 'score': smallStraight}, LARGESTRAIGHT: {'name': 'largestraight', 'func': isLargeStraight, 'target': LARGE_STRAIGHT_BONUS, 'order': 7, 'score': largeStraight}, DOUBLE: {'name': 'double', 'func': isKindNumber2, 'target': 8, 'order': 6, 'score': kindNumber2}, TRIPLE: {'name': 'triple', 'func': isKindNumber3, 'target': 12, 'order': 5, 'score': kindNumber3}, PAIR: {'name': 'pair', 'func': isPair, 'target': 14, 'order': 4, 'score': pair}, QUADRUPLE: {'name': 'quadruple', 'func': isKindNumber4, 'target': 14, 'order': 1, 'score': kindNumber4}, FULLHOUSE: {'name': 'fullhouse', 'func': isFullHouse, 'target': (FULL_HOUSE_BONUS if FULL_HOUSE_BONUS else 20), 'order': 1, 'score': fullHouse}, ONE: {'name': 'one', 'func': isNumberKind1, 'target': 3, 'order': 1, 'score': numberKind1}, TWO: {'name': 'two', 'func': isNumberKind2, 'target': 6, 'order': 1, 'score': numberKind2}, THREE: {'name': 'three', 'func': isNumberKind3, 'target': 9, 'order': 1, 'score': numberKind3}, FOUR: {'name': 'four', 'func': isNumberKind4, 'target': 12, 'order': 1, 'score': numberKind4}, FIVE: {'name': 'five', 'func': isNumberKind5, 'target': 15, 'order': 1, 'score': numberKind5}, SIX: {'name': 'six', 'func': isNumberKind6, 'target': 18, 'order': 1, 'score': numberKind6}, YATZY: {'name': 'yatzy', 'func': isYatzy, 'target': YATZY_BONUS, 'order': 1, 'score': yatzy} } categories = {k:v['name'] for k,v in data.items()} functions = {k:v['func'] for k,v in data.items()} targets = {k:v['target'] for k,v in data.items()} scoring = {k:v['score'] for k,v in data.items()} #order = [v['order'] for k,v in data.items()] order = {k:v['order'] for k,v in data.items()}
2,532
990
from sqlalchemy.orm import joinedload from blue_yellow_app.data.album import Album from blue_yellow_app.data.dbsession import DbSessionFactory from blue_yellow_app.data.track import Track class AlbumsService: @staticmethod def get_albums(): session = DbSessionFactory.create_session() albums = session.query(Album) \ .options(joinedload('tracks')) \ .filter(Album.is_published) \ .order_by(Album.year.desc()) \ .all() return albums @staticmethod def old_get_albums(): return [ { 'title': 'Digital age boys and girls', 'year': 2001, 'has_preview': True, 'image': '/static/img/albums/digital_album.jpg', 'tracks': [ {'duration': '0:48', 'title': 'Welcome to the millennium'}, {'duration': '4:20', 'title': 'Renegade coders'}, {'duration': '5:01', 'title': 'Cyberpunks unite!'}, {'duration': '3:21', 'title': "We're all moving the Silicon Valley"}, {'duration': '2:22', 'title': "Tomorrow's people"}, {'duration': '4:24', 'title': 'I thought you were a robot'} ], 'url': 'digital-age-boys-and-girls' }, { 'title': 'Year of the snake', 'year': 1991, 'has_preview': False, 'image': '/static/img/albums/snake_album.jpg', 'tracks': [ {'duration': '3:02', 'title': "Code like it's 1999"}, {'duration': '2:40', 'title': "Dawn of the iterators"}, {'duration': '5:21', 'title': "Running with descriptors"}, {'duration': '2:01', 'title': "Rage against the compilers"}, {'duration': '4:41', 'title': "Another line in the program"} ], 'url': 'year-of-the-snake' } ] @classmethod def create_album(cls, title: str, year: int, album_image: str, price: float, url: str, track_titles: []): session = DbSessionFactory.create_session() album = Album(name=title, year=year, album_image=album_image, price=price, url=url, is_published=True) session.add(album) for idx, title in enumerate(track_titles): track = Track(name=title, length=60, display_order=idx + 1) album.tracks.append(track) session.commit() return album @classmethod def get_album_by_id(cls, album_id): session = DbSessionFactory.create_session() return session.query(Album). \ filter(Album.id == album_id) \ .first()
2,841
841
# Generated by Django 2.1.5 on 2019-01-30 18:53 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('calls', '0002_auto_20190130_1811'), ] operations = [ migrations.CreateModel( name='Type', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[('International', 'International'), ('National', 'National'), ('Domestic', 'Domestic')], max_length=15)), ], ), migrations.RenameField( model_name='call', old_name='call_time', new_name='duration', ), migrations.RemoveField( model_name='call', name='call_type', ), migrations.RemoveField( model_name='call', name='id', ), migrations.AddField( model_name='call', name='type', field=models.OneToOneField(default=None, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='calls.Type'), ), ]
1,229
377
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Struct Class # this is a auto generated file generated by Cheetah # Namespace: com.sun.star.util # Libre Office Version: 7.3 from ooo.oenv.env_const import UNO_NONE import typing class Duration(object): """ Struct Class represents a duration. A duration is the difference of 2 DateTimes. Note that there are no constraints on the ranges of the members, except that every member must be non-negative: for example, a Duration of 400 Days is valid. **since** OOo 3.3 See Also: `API Duration <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1util_1_1Duration.html>`_ """ __ooo_ns__: str = 'com.sun.star.util' __ooo_full_ns__: str = 'com.sun.star.util.Duration' __ooo_type_name__: str = 'struct' typeName: str = 'com.sun.star.util.Duration' """Literal Constant ``com.sun.star.util.Duration``""" def __init__(self, Negative: typing.Optional[bool] = False, Years: typing.Optional[int] = 0, Months: typing.Optional[int] = 0, Days: typing.Optional[int] = 0, Hours: typing.Optional[int] = 0, Minutes: typing.Optional[int] = 0, Seconds: typing.Optional[int] = 0, NanoSeconds: typing.Optional[int] = 0) -> None: """ Constructor Arguments: Negative (bool, optional): Negative value. Years (int, optional): Years value. Months (int, optional): Months value. Days (int, optional): Days value. Hours (int, optional): Hours value. Minutes (int, optional): Minutes value. Seconds (int, optional): Seconds value. NanoSeconds (int, optional): NanoSeconds value. """ super().__init__() if isinstance(Negative, Duration): oth: Duration = Negative self.Negative = oth.Negative self.Years = oth.Years self.Months = oth.Months self.Days = oth.Days self.Hours = oth.Hours self.Minutes = oth.Minutes self.Seconds = oth.Seconds self.NanoSeconds = oth.NanoSeconds return kargs = { "Negative": Negative, "Years": Years, "Months": Months, "Days": Days, "Hours": Hours, "Minutes": Minutes, "Seconds": Seconds, "NanoSeconds": NanoSeconds, } self._init(**kargs) def _init(self, **kwargs) -> None: self._negative = kwargs["Negative"] self._years = kwargs["Years"] self._months = kwargs["Months"] self._days = kwargs["Days"] self._hours = kwargs["Hours"] self._minutes = kwargs["Minutes"] self._seconds = kwargs["Seconds"] self._nano_seconds = kwargs["NanoSeconds"] @property def Negative(self) -> bool: """ explicit sign bit. """ return self._negative @Negative.setter def Negative(self, value: bool) -> None: self._negative = value @property def Years(self) -> int: """ contains the years. """ return self._years @Years.setter def Years(self, value: int) -> None: self._years = value @property def Months(self) -> int: """ contains the months. """ return self._months @Months.setter def Months(self, value: int) -> None: self._months = value @property def Days(self) -> int: """ contains the days. """ return self._days @Days.setter def Days(self, value: int) -> None: self._days = value @property def Hours(self) -> int: """ contains the hours. """ return self._hours @Hours.setter def Hours(self, value: int) -> None: self._hours = value @property def Minutes(self) -> int: """ contains the minutes. """ return self._minutes @Minutes.setter def Minutes(self, value: int) -> None: self._minutes = value @property def Seconds(self) -> int: """ contains the seconds. """ return self._seconds @Seconds.setter def Seconds(self, value: int) -> None: self._seconds = value @property def NanoSeconds(self) -> int: """ contains the nanoseconds. """ return self._nano_seconds @NanoSeconds.setter def NanoSeconds(self, value: int) -> None: self._nano_seconds = value __all__ = ['Duration']
5,230
1,628
import torch def compute_pairwise_cosine_distances(minibatch_embeddings, full_matrix=False): # cosine_distance = 1 - cosine_similarity # cosine similarity (A,B)= cos(theta) = (A . B ) / (||A||*||B||) , # constrainining embeddings into a hypersphere (unit-sphere) so all norms are 1 reduces this to a matrix multiplication (A.B) D = 1 - torch.mm(minibatch_embeddings, torch.transpose(minibatch_embeddings, 0, 1)) if not full_matrix: tri_idx = torch.triu_indices(minibatch_embeddings.shape[0],minibatch_embeddings.shape[0],1) pairwise_dist_vector = D[tri_idx[0],tri_idx[1]] return pairwise_dist_vector else: return D def compute_pairwise_euclidean_distances(minibatch_embeddings, d, n, full_matrix=False ): # as per https://www.robots.ox.ac.uk/~albanie/notes/Euclidean_distance_trick.pdf alg.1 X_view1 = minibatch_embeddings.reshape(d, n, 1) X_view2 = minibatch_embeddings.reshape(d,1,n) diff_mat = X_view1-X_view2 D = torch.sum(diff_mat**2,dim=0) if not full_matrix: tri_idx = torch.triu_indices(n,n,1) pairwise_dist_vector = D[tri_idx[0],tri_idx[1]] return torch.sqrt(pairwise_dist_vector) else : return torch.sqrt(D)
1,250
490
import numpy as np import os import sys import progressbar import gc import pickle import matplotlib.pyplot as plt from LSTM import LSTM from wrapper import Bidirectional from Regularization import regularization from attention_model import attention_model from data_preprocessing import song_preprocessing from functions import activations as act, helper_func as func from sklearn.preprocessing import normalize, minmax_scale class model: def __init__(self, X, Y, S, Tx, Ty, lr = 0.005, n_a = 64, n_s = 32, jump_step = 100, epoch = 100, sec = 5, optimizer = None): self.X = X self.Y = Y self.S = S self.Tx = Tx self.Ty = Ty self.lr = lr self.m = X.shape[0] self.n_x = X.shape[2] self.n_y = Y.shape[2] self.n_a = n_a self.n_s = n_s self.n_c = self.n_a * 2 # *2 when using bidirectional self.hidden_dimension = [10] self.jump_step = jump_step self.epoch = epoch self.sec = sec self.last_layer_hidden_state = None self.Att_As = [] self.Att_caches = [] self.Att_alphas = [] # Wy shape = (n_s,n_y) self.Wy = func.xavier((self.n_s, self.n_y)) self.by = np.zeros((1, self.n_y)) self.optimizer = optimizer self.s_weight = 0 self.s_bias = 0 self.v_weight = 0 self.v_bias = 0 self.TRAINING_THRESHOLD = 0 self.non_random_circle = 10 self._params = {"Wy": self.Wy, "by": self.by} self.pre_LSTM = LSTM("pre_LSTM", (self.Tx, self.n_x), (self.Tx, self.n_a), optimizer = optimizer) self.pre_bi_LSTM = Bidirectional("pre_bi_LSTM", self.pre_LSTM, is_dropout = True) self.attention = attention_model("attention", self.n_c, self.S, self.n_s, self.n_c, self.hidden_dimension, optimizer = optimizer) self.post_LSTM = LSTM("post_LSTM", (self.Ty, self.n_c), (self.Ty, self.n_s), is_attention = True, optimizer = optimizer) def forward_propagation_one_ex(self, i, e): """ description: forward propagation for one training example; data x label y ---parameter--- i: index """ # self.gradient_checking() # X = minmax_scale(self.X[i,:,:], feature_range = (0, 1), axis = 0) X = normalize(self.X[i,:,:], axis=1) A = self.pre_bi_LSTM.concatLSTM(X) # shape = (Tx, 2 * n_a) # A = self.pre_LSTM.forward_propagation(X) self.attention._A = A # attention and post_LSTM start = 0 end = self.S prev_s = np.zeros((1, self.n_s)) prev_a = np.zeros((1, self.n_s)) lstm_S = [] for t in range(self.Ty): alphas, c, _energies, _caches_t, current_A = self.attention.nn_forward_propagation(prev_s, start, end) start = start + self.jump_step end = end + self.jump_step # for backpropagation use ***** this step take 30% of RAM in total ******* self.Att_As.append(current_A) self.Att_caches.append(_caches_t) self.Att_alphas.append(alphas) st, at, cache = self.post_LSTM.cell_forward(prev_s, prev_a, c) lstm_S.append(st) prev_s = st prev_a = at # convert lstm_S(list) to lstm_S(np array) lstm_S = np.array(lstm_S).reshape((self.Ty, self.n_s)) self.last_layer_hidden_state = lstm_S del lstm_S # TODO: dropout lstm_S # lstm_S = act.dropout(lstm_S, level = 0.5) # initialize last layer Wy # st shape = (1,n_s) Y_hat = [] for t in range(self.Ty): # st shape = (1, n_s) Zy = np.matmul(np.atleast_2d(self.last_layer_hidden_state[t,:]), self._params["Wy"]) + self._params["by"] # shape = (1, n_y) yt_hat = act.softmax(Zy) Y_hat.append(yt_hat.reshape(-1)) # yt_hat after reshape = (n_y,) # Y_hat shape = (Ty, n_y) Y_true = np.array(self.Y[i,:,:]) # (Ty, n_y) Y_hat = np.array(Y_hat) total_lost = 0 for t in range(self.Ty): lost = func.t_lost(Y_true[t,:], Y_hat[t,:]) total_lost = total_lost + lost total_lost = (total_lost/self.Ty) return total_lost, Y_hat, Y_true def backward_propagation_one_ex(self, Y_hat, Y_true, i, e, lr): """ Description: backward propagation for one training example; data x label y ----parameter--- Y_hat: predicted value given training data X Y_true: True label value of training data X """ # dL = (1/self.Ty) # shape (Ty, n_y) dZ = (Y_hat - Y_true) assert(dZ.shape == (self.Ty, self.n_y)) # calculate dWy and dby dWy = np.matmul(np.transpose(self.last_layer_hidden_state.reshape(self.Ty, self.n_s)), dZ) dby = np.atleast_2d(np.sum(dZ, axis = 0)) self.update_weight(dWy, dby, e, lr, optimizer = self.optimizer) assert(dWy.shape == (self.n_s, self.n_y) and dby.shape == (1, self.n_y)) #shape = (Ty, n_s) dS = np.matmul(dZ, np.transpose(self.Wy)) d_AS_list = self.post_LSTM.backward_propagation(dS, self.Att_As, self.Att_caches, self.Att_alphas, self.attention) self.post_LSTM.update_weight(lr, e) self.attention.update_weight(lr, e) self.Att_As = [] self.Att_caches = [] self.Att_alphas = [] self.pre_bi_LSTM.cell_backpropagation(d_AS_list, self.jump_step, self.Ty, self.Tx) self.pre_bi_LSTM.update_weight(lr, e) def update_weight(self, dWy, dby, i ,lr=0.005, optimizer = None, beta1 = 0.9, beta2 = 0.999, eps = 1e-8): i = i + 1 lr = lr * np.sqrt(1-beta2**i)/(1-beta1**i) s_corrected_weight = None s_corrected_bias = None v_corrected_weight = None v_corrected_bias = None if optimizer == "Adam": self.s_weight = beta2 * self.s_weight + (1 - beta2) * (dWy ** 2) s_corrected_weight = self.s_weight / (1 - beta2**i) self.s_bias = beta2 * self.s_bias + (1 - beta2) * (dby ** 2) s_corrected_bias = self.s_bias / (1 - beta2**i) self.v_weight = beta1 * self.v_weight + (1 - beta1) * dWy v_corrected_weight = self.v_weight / (1 - beta1**i) self.v_bias = beta1 * self.v_bias + (1 - beta1) * dby v_corrected_bias = self.v_bias / (1 - beta1**i) self.Wy = self.Wy - lr*(v_corrected_weight/(np.sqrt(s_corrected_weight) + eps)) self.by = self.by - lr*(v_corrected_bias/(np.sqrt(s_corrected_bias) + eps)) else: self.Wy = self.Wy - lr*dWy self.by = self.by - lr*dby self._params["Wy"] = self.Wy self._params["by"] = self.by self.save_weights() def train(self, songs): lr = self.lr loss = [] print("Starting to train Detector..........") for e in range(self.epoch): print("Epoch {}/{}".format(e, self.epoch)) lost = 0 for i in progressbar.progressbar(range(self.m)): total_lost, Y_hat, Y_true = self.forward_propagation_one_ex(i, e) lost = lost + total_lost self.backward_propagation_one_ex(Y_hat, Y_true, i, e, lr) self.predict(self.X[i,:,:], songs, "weights") loss.append(lost/self.m) if e % 100 == 0: print(loss) print("Total Loss: ", lost/self.m) def save_weights(self): with open("weights/predict_layer.pickle", "wb") as f: pickle.dump(self._params, f, protocol = pickle.HIGHEST_PROTOCOL) def predict(self, data, songs, folder): Tx, n_x = data.shape assert(Tx >= self.S) pre_LSTM = LSTM("pre_LSTM", (Tx, n_x), (Tx, self.n_a), optimizer = self.optimizer) pre_bi_LSTM = Bidirectional("pre_bi_LSTM", pre_LSTM) attention = attention_model("attention", self.n_c, self.S, self.n_s, self.n_c, self.hidden_dimension, optimizer = self.optimizer) post_LSTM = LSTM("post_LSTM", (self.Ty, self.n_c), (self.Ty, self.n_s), is_attention = True, optimizer = self.optimizer) LSTM_forward_params = pickle.load(open(folder + "/biDirectional_pre_LSTM_forward.pickle", "rb")) LSTM_backward_params = pickle.load(open(folder +"/biDirectional_pre_LSTM_backward.pickle", "rb")) attention_params = pickle.load(open(folder + "/attention.pickle", "rb")) post_LSTM_params = pickle.load(open(folder + "/post_LSTM.pickle", "rb")) params = pickle.load(open(folder + "/predict_layer.pickle", "rb")) pre_bi_LSTM.forward._params = LSTM_forward_params pre_bi_LSTM.backward._params = LSTM_backward_params attention._params = attention_params post_LSTM._params = post_LSTM_params Ty = song_preprocessing.get_Ty(Tx, self.S, self.jump_step) # data = minmax_scale(data, feature_range=(0, 1), axis=0) data = normalize(data) A = pre_bi_LSTM.concatLSTM(data) attention._A = A start = 0 end = self.S prev_s = np.zeros((1, self.n_s)) prev_a = np.zeros((1, self.n_s)) lstm_S = [] for t in range(Ty): alphas, c, _energies, _caches_t, current_A = attention.nn_forward_propagation(prev_s, start, end) start = start + self.jump_step end = end + self.jump_step st, at, cache = post_LSTM.cell_forward(prev_s, prev_a, c) lstm_S.append(st) prev_s = st prev_a = at lstm_S = np.array(lstm_S) # TODO: dropout lstm_S # lstm_S = act.dropout(lstm_S, level = 0.5) # initialize last layer Wy # st shape = (1,n_s) y_predict = 0 for t in range(Ty): # st shape = (1, n_s) Zy = np.matmul(np.atleast_2d(lstm_S[t,:]), params["Wy"]) + params["by"] # shape = (1, n_y) yt_hat = act.softmax(Zy) y_predict = y_predict + yt_hat y_predict = y_predict / Ty print(y_predict) index = np.argmax(y_predict) print("Song predict: ", songs[index]) return songs[index] def gradient_checking(self, dby, dWy, i, eps = 1e-4): model_vec, model_keys_shape = func.dictionary_to_vector(self._params) LSTM_forward_vec, LSTM_forward_keys_shape = func.dictionary_to_vector(self.pre_bi_LSTM.forward._params) LSTM_backward_vec, LSTM_backward_keys_shape = func.dictionary_to_vector(self.pre_bi_LSTM.backward._params) attention_vec, attention_keys_shape = func.dictionary_to_vector(self.attention._params) post_LSTM_vec, post_LSTM_keys_shape = func.dictionary_to_vector(self.post_LSTM._params) params_vector = np.concatenate([model_vec, LSTM_forward_vec, LSTM_backward_vec, attention_vec, post_LSTM_vec]) remain_vector = None model_dict = {"dby": dby, "dWy": dWy} model_grads, model_grads_keys_shape = func.dictionary_to_vector(model_dict) LSTM_forward_grads, LSTM_forward_grads_keys_shape = func.dictionary_to_vector(self.pre_bi_LSTM.forward.gradients) LSTM_backward_grads, LSTM_backward_grads_keys_shape = func.dictionary_to_vector(self.pre_bi_LSTM.backward.gradients) attention_grads, attention_grads_keys_shape = func.dictionary_to_vector(self.attention.gradients_layer) post_LSTM_grads, post_LSTM_grads_keys_shape = func.dictionary_to_vector(self.post_LSTM.gradients) grads_vector = np.concatenate([model_grads, LSTM_forward_grads, LSTM_backward_grads, attention_grads, post_LSTM_grads]) num_parameters = params_vector.shape[0] J_plus = np.zeros((num_parameters, 1)) J_minus = np.zeros((num_parameters, 1)) gradapprox = np.zeros((num_parameters, 1)) for n in range(num_parameters): print("{}/{}".format(n,num_parameters)) thetaplus = np.copy(params_vector) thetaplus[n] = thetaplus[n] + eps remain_vector, model_params = func.vector_to_dictionary(thetaplus, model_keys_shape) remain_vector, LSTM_forward_params = func.vector_to_dictionary(remain_vector, LSTM_forward_keys_shape) remain_vector, LSTM_backward_params = func.vector_to_dictionary(remain_vector, LSTM_backward_keys_shape) remain_vector, attention_params = func.vector_to_dictionary(remain_vector, attention_keys_shape) remain_vector, post_LSTM_params = func.vector_to_dictionary(remain_vector, post_LSTM_keys_shape) self._params = model_params self.pre_bi_LSTM.forward._params = LSTM_forward_params self.pre_bi_LSTM.backward._params = LSTM_backward_params self.attention._params = attention_params self.post_LSTM._params = post_LSTM_params J_plus[n], _, _ = self.forward_propagation_one_ex(i) thetaminus = np.copy(params_vector) thetaminus[n] = thetaminus[n] + eps remain_vector, model_params = func.vector_to_dictionary(thetaminus, model_keys_shape) remain_vector, LSTM_forward_params = func.vector_to_dictionary(remain_vector, LSTM_forward_keys_shape) remain_vector, LSTM_backward_params = func.vector_to_dictionary(remain_vector, LSTM_backward_keys_shape) remain_vector, attention_params = func.vector_to_dictionary(remain_vector, attention_keys_shape) remain_vector, post_LSTM_params = func.vector_to_dictionary(remain_vector, post_LSTM_keys_shape) self._params = model_params self.pre_bi_LSTM.forward._params = LSTM_forward_params self.pre_bi_LSTM.backward._params = LSTM_backward_params self.attention._params = attention_params self.post_LSTM._params = post_LSTM_params J_minus[n], _, _ = self.forward_propagation_one_ex(i) gradapprox[n] = (J_plus[n] - J_minus[n]) / (2 * eps) numerator = np.linalg.norm(grads_vector - gradapprox) demoninator = np.linalg.norm(grads_vector) + np.linalg.norm(gradapprox) difference = numerator / demoninator if difference > 1e-7: print("Wrong") else: print("Right")
14,309
5,286
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from click.testing import CliRunner from demo import cli import os import pytest def test_import_toplevel(): try: import demo except ImportError: pytest.fail("Unable to import `demo`.") def test_fail_env(): token = os.getenv('IT_OAUTH_GIT_TOKEN') print(token) assert 'LkoLiyLqnhMCAa4or5qa' == token def test_command_line_interface(): runner = CliRunner() result = runner.invoke(cli.main) assert result.exit_code == 0 assert 'demo.cli.main' in result.output help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 assert '--help Show this message and exit.' in help_result.output
763
261
from hausse import Hausse from hausse.plugins import ( Assets, DiscoverPartials, Drop, Handlebars, Markdown, MetadataMarkdown, Relations, Collection, Collections ) # Collections preparations # By default, all files in "src/formations" folder will be grouped in this collection Links = Collection("links") # Using `indexBy` enables indexation, which is useful for building relations Projects = Collection("projects", indexBy="title") Skills = Collection("skills", indexBy="name") h = Hausse("examples/portfolio") h.clean() h.use( # `use()` method register plugins into the Hausse project # It is possible to call `use()` once or multiple times, with one or a list of Plugins # In any cases, Plugins will be called in order. [ # Assets plugin is used to simply dump static files, like stylesheets or icons # As it bypass all other plugins by copying directly files in "dist" folder, # it does not retrives files from "src/assets" but directly from "assets" Assets("assets"), # Markdown parses all markdown files found in "src" # Note that this plugin will also load as metadata all key-values present in headers Markdown(), # MetadataMarkdown parses markdown string found in files metadata MetadataMarkdown("summary"), # Collections (with a s) auto-creates collections based on files' "collections" metadata Collections(), # Each of the following defines a Collection and fill it with according files Links, Skills, Projects, # Relations helps making links between files in different collections # That's why Collections have been defined before Hausse() call # Other solution is to use CollectionSelector(collection_name) instead of the Collection Relations(Projects, Skills), # DiscoverPartials registers partials templates for Handlebars layout processing DiscoverPartials("templates"), # Handlebars does the actual layout processing to html files Handlebars("layouts", "layout.hbs", "index.md"), # Drop removes useless files from the project, before writing them in "dist" # Note that it does not remove the actual files from "src" folder # Here, it is used because we build a single page from multiple markdown files # Once the layout plugin processed them, used markdown files are no longer wanted Drop("*.md"), ] ) # And here the magic happens. When `build()` is called, Hausse project generation begins # Files from "src" directory are loaded and stored in a elements structure # Every registered Plugin is called in order on the same set of elements, metadata and settings # When all Plugins have been called, all files from elements are written in "dist" directory h.build() # Save will store the Hausse project configuration into a `hausse.json` file, # which can be used later by Hausse in CLI mode operation : `python -m hausse # hausse.json`. It is useful to simplify the project setup when development is # done and it goes to production. h.save()
3,151
790
#!/usr/bin/env python # Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest import codegen class CodegenTest(unittest.TestCase): def testHash(self): # Must match those in //base/metrics/metrics_hashes_unittest.cc. self.assertEqual(codegen.HashName('Back'), 0x0557fa923dcee4d0) self.assertEqual(codegen.HashName('Forward'), 0x67d2f6740a8eaebf) self.assertEqual(codegen.HashName('NewTab'), 0x290eb683f96572f1) if __name__ == '__main__': unittest.main()
601
238
# content
13
7
from yahoo_fin.stock_info import ( get_data, tickers_sp500, tickers_nasdaq, tickers_other, get_quote_table, ) """ pull historical data for Netflix (NFLX) """ # nflx = get_data("NFLX") """ pull data for Apple (AAPL) """ """case sensitivity does not matter""" # aapl = get_data("aapl") """ get list of all stocks currently traded on NASDAQ exchange """ # nasdaq_ticker_list = tickers_nasdaq() """ get list of all stocks currently in the S&P 500 """ sp500_ticker_list = tickers_sp500() print(sp500_ticker_list) """ get other tickers not in NASDAQ (based off nasdaq.com)""" # other_tickers = tickers_other() """ get information on stock from quote page """ # info = get_quote_table("amzn")
716
277
#!/usr/bin/env python import json import os import sys directory = os.getcwd() + "/" + sys.argv[1] print ("Stats for" + directory) def getTxBytes(json): return json["networks"]["eth0"]["tx_bytes"] def getRxBytes(json): return json["networks"]["eth0"]["rx_bytes"] def getCPUUsage(json): return json["cpu_stats"]["cpu_usage"]["total_usage"] with open(directory + "/stats-after.json") as after: afterJson = json.load(after) with open(directory + "/stats-before.json") as before: beforeJson = json.load(before) print "Total CPU Usage in seconds: " + "{:.2f}".format( (getCPUUsage(afterJson) - getCPUUsage(beforeJson)) / float(1000000000)) print "Total RX in MB: " + "{:.2f}".format( (getRxBytes(afterJson) - getRxBytes(beforeJson)) / (float(1024) * float(1024))) print "Total TX in MB: " + "{:.2f}".format( (getTxBytes(afterJson) - getTxBytes(beforeJson)) / (float(1024) * float(1024))) print "Total RX+TX in MB: " + "{:.2f}".format( (getTxBytes(afterJson) - getTxBytes(beforeJson)) / (float(1024) * float(1024)) + ( getRxBytes(afterJson) - getRxBytes(beforeJson)) / (float(1024) * float(1024)))
1,158
460
""" __init__.py: Definitions for the video call client. Created by Perry Naseck on 7/1/21. Copyright (c) 2021, The CONIX Research Center All rights reserved. This source code is licensed under the BSD-3-Clause license found in the LICENSE file in the root directory of this source tree. """ # Some code originally from: # https://github.com/peppelinux/videodrone/blob/97f867bd39d9dfa4c4335487074e77a855858cd1/src/videodrone/drones/__init__.py#L20 import subprocess # nosec B404 from functools import partial from os import setpgrp from arena import Scene from selenium.webdriver import common from selenium.webdriver.remote.webdriver import WebDriver _selenium_orig_start = common.service.Service.start class VideoCall(): """Video Call class for the ARENA.""" _selenium_start_orig = staticmethod(common.service.Service.start) _subprocess_popen_orig = staticmethod(subprocess.Popen) def __init__(self, scene: Scene, browser: WebDriver, options=None): """Initialize the video call class.""" self.scene = scene self.browser = browser self.options = options self.instance = None def __exit__(self, exception_type, exception_value, traceback): """Safely exit 'with' statements.""" self.close() def __del__(self): """Safely exit if class deleted.""" self.close() def is_open(self) -> bool: """Return if the browser is running.""" return self.instance is not None @staticmethod def _selenium_start(*args, **kwargs): """ Start Selenium but ignore handlers like SIGINT. Modified from https://stackoverflow.com/a/62430234 This allows for manually closing Selenium on CTRL+C so that the Jitsi call can be properly hung up. """ try: subprocess.Popen = partial(subprocess.Popen, preexec_fn=setpgrp) VideoCall._selenium_start_orig(*args, **kwargs) finally: subprocess.Popen = VideoCall._subprocess_popen_orig def open(self): """Start the browser.""" token = self.scene.remote_auth_token['token'] url = 'https://jitsi0.andrew.cmu.edu:8443/' url += f'{self.scene.namespace}_{self.scene.scene}' print(f"arena-robot VideoCall: opening {url}") url += f'?jwt={token}' url += '#config.channelLastN=0&config.resolution=1080' # Temporarily override the start function to not pass SIGINT try: common.service.Service.start = self._selenium_start self.instance = self.browser(options=self.options) finally: common.service.Service.start = self._selenium_start_orig self.instance.get(url) def set_name(self, name: str): """Set the Jitsi display name.""" script = f"APP.conference.changeLocalDisplayName('{name}');" self.instance.execute_script(script) def video_mute(self, mute: bool = True): """Set the Jitsi video mute state.""" script = f"APP.conference.muteVideo({str(mute).lower()});" self.instance.execute_script(script) def audio_mute(self, mute: bool = True): """Set the Jitsi audio mute state.""" script = f"APP.conference.muteAudio({str(mute).lower()});" self.instance.execute_script(script) def close(self): """Close and quit the browser.""" print("arena-robot VideoCall: closing") if self.instance is not None: self.instance.execute_script('APP.conference.hangup();') self.instance.quit() self.instance = None
3,657
1,121
import typing import pandas as pd class BaseProcessor: @staticmethod def run( input_data: typing.Any, context: typing.Dict[str, typing.Any] ) -> pd.DataFrame: raise NotImplemented()
212
64
# -*- coding: utf-8 -*- """ Created on Sat Feb 23 00:14:38 2019 @author: RV Purpose: use Ridge, Lasso and Elastic Net models on Titanic data """ #%% Lasso from sklearn.linear_model import Lasso useAlpha = 0.00001 lasso_1 = Lasso(alpha = useAlpha).fit(X, y) print(lasso_1.coef_) # OK, got the coefficients, but what do they represent??? X.columns # OK - can we align the two though? #??????? yes we can... what is zip function? [i for i in zip(X.columns, lasso_1.coef_)] # predictions lasso_1_is_pred = lasso_1.predict(X) # attempt to identify a good cutoff cutoffgrid = np.linspace(min(lasso_1_is_pred), max(lasso_1_is_pred), 100) tcm1 = [confusionMatrixInfo(lasso_1_is_pred < i, y, labels=[1,0])['accuracy'] for i in cutoffgrid] plt.figure() plt.plot(tcm1) plt.show() #%% # Ridge classifier from sklearn.linear_model import RidgeClassifierCV as RCCV RCCV_1 = RCCV(alphas=[np.exp(i) for i in np.linspace(-10,0,50)]).fit(X,y) RCCV_1.score(X,y) # not that great ? RCCV_1_is_pred = RCCV_1.predict(X) confusionMatrixInfo(RCCV_1_is_pred,y) # attempt to identify a good cutoff cutoffgrid = np.linspace(min(RCCV_1_is_pred), max(RCCV_1_is_pred), 100) tcm1 = [confusionMatrixInfo(RCCV_1_is_pred < i, y, labels=[1,0])['accuracy'] for i in cutoffgrid] plt.figure() plt.plot(tcm1) plt.show() from sklearn.datasets import load_breast_cancer from sklearn.linear_model import RidgeClassifierCV X, y = load_breast_cancer(return_X_y=True) clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y) clf.score(X, y) #%% from sklearn.linear_model import ElasticNet as ENet a = 0.0001 b = 0.0001 alpha = a+b l1_ratio = a/(a+b) ENet_1 = ENet(alpha = alpha, l1_ratio= l1_ratio).fit(X,y) ENet_1_is_pred = ENet_1.predict(X) cutoffgrid = np.linspace(min(ENet_1_is_pred), max(ENet_1_is_pred), 100) tcm1 = [confusionMatrixInfo(ENet_1_is_pred < i, y, labels=[1,0])['accuracy'] for i in cutoffgrid] plt.figure() plt.plot(tcm1) plt.show()
2,006
938
# A utility library for getting information about a Python executable. # # This may be used as a script. import importlib.util import json import os import os.path import sys import sysconfig INFO = { # sys 'executable (sys)': 'sys.executable', 'executable (sys;realpath)': 'executable_realpath', 'prefix (sys)': 'sys.prefix', 'exec_prefix (sys)': 'sys.exec_prefix', 'stdlib_dir (sys)': 'sys._stdlib_dir', 'base_executable (sys)': 'sys._base_executable', 'base_prefix (sys)': 'sys.base_prefix', 'base_exec_prefix (sys)': 'sys.base_exec_prefix', 'version_str (sys)': 'sys.version', 'version_info (sys)': 'sys.version_info', 'hexversion (sys)': 'sys.hexversion', 'api_version (sys)': 'sys.api_version', 'implementation_name (sys)': 'sys.implementation.name', 'implementation_version (sys)': 'sys.implementation.version', 'platform (sys)': 'sys.platform', # sysconfig 'stdlib_dir (sysconfig)': 'sysconfig.paths.stdlib', 'is_dev (sysconfig)': 'sysconfig.is_python_build', # other 'base_executable': 'base_executable', 'stdlib_dir': 'stdlib_dir', 'pyc_magic_number': 'pyc_magic_number', 'is_venv': 'is_venv', } def get_info(python=sys.executable): """Return an object with details about the given Python executable. Most of the details are grouped by their source. By default the current Python is used. """ if python and python != sys.executable: # Run _pythoninfo.py to get the raw info. import subprocess argv = [python, __file__] try: text = subprocess.check_output(argv, encoding='utf-8') except subprocess.CalledProcessError: raise Exception(f'could not get info for {python or sys.executable}') data = _unjsonify_info(text) else: data = _get_current_info() return _build_info(data) def _build_info(data): # Map the data into a new types.SimpleNamespace object. info = type(sys.implementation)() for key, value in data.items(): try: field = INFO[key] except KeyError: raise NotImplementedError(repr(key)) parent = info while '.' in field: pname, _, field = field.partition('.') try: parent = getattr(parent, pname) except AttributeError: setattr(parent, pname, type(sys.implementation)()) parent = getattr(parent, pname) setattr(parent, field, value) return info def _get_current_info(): is_venv = (sys.prefix != sys.base_prefix) base_executable = getattr(sys, '_base_executable', None) if is_venv: # XXX There is probably a bug related to venv, since # sys._base_executable should be different. if base_executable == sys.executable: # Indicate that we don't know. base_executable = None elif not base_executable: base_executable = sys.executable info = { # locations 'executable (sys)': sys.executable, 'executable (sys;realpath)': os.path.realpath(sys.executable), 'prefix (sys)': sys.prefix, 'exec_prefix (sys)': sys.exec_prefix, 'stdlib_dir': os.path.dirname(os.__file__), 'stdlib_dir (sys)': getattr(sys, '_stdlib_dir', None), 'stdlib_dir (sysconfig)': (sysconfig.get_path('stdlib') if 'stdlib' in sysconfig.get_path_names() else None), # base locations 'base_executable': base_executable, 'base_executable (sys)': getattr(sys, '_base_executable', None), 'base_prefix (sys)': sys.base_prefix, 'base_exec_prefix (sys)': sys.base_exec_prefix, # version 'version_str (sys)': sys.version, 'version_info (sys)': sys.version_info, 'hexversion (sys)': sys.hexversion, 'api_version (sys)': sys.api_version, # implementation 'implementation_name (sys)': sys.implementation.name, 'implementation_version (sys)': sys.implementation.version, # build 'is_dev (sysconfig)': sysconfig.is_python_build(), # host 'platform (sys)': sys.platform, # virtual envs 'is_venv': is_venv, # import system # importlib.util.MAGIC_NUMBER has been around since 3.5. 'pyc_magic_number': importlib.util.MAGIC_NUMBER, } return info def _jsonify_info(info): data = dict(info) if isinstance(data['pyc_magic_number'], bytes): data['pyc_magic_number'] = data['pyc_magic_number'].hex() return data def _unjsonify_info(data): if isinstance(data, str): data = json.loads(data) info = dict(data) for key in ('version_info (sys)', 'implementation_version (sys)'): if isinstance(info[key], list): # We would use type(sys.version_info) if it allowed it. info[key] = tuple(info[key]) for key in ('pyc_magic_number',): if isinstance(info[key], str): info[key] = bytes.fromhex(data[key]) return info ####################################### # use as a script if __name__ == '__main__': info = _get_current_info() data = _jsonify_info(info) json.dump(data, sys.stdout, indent=4) print()
5,340
1,679
class DriverException(Exception): pass
43
11
"""tokens table Revision ID: 6d2dd11ac2fb Revises: 908de7ed5813 Create Date: 2019-03-07 20:42:36.328479 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '6d2dd11ac2fb' down_revision = '908de7ed5813' branch_labels = None depends_on = None def upgrade(): op.create_table( 'jwt_revoked_tokens', sa.Column('id', sa.Integer(), nullable=False), sa.Column('jti', sa.String(120), nullable=False, index=True), sa.PrimaryKeyConstraint('id') ) def downgrade(): op.drop_table('jwt_revoked_tokens')
592
255
f = open("input.txt") total = 0 for number in f.readlines(): total += int(number[:-1]) f.close() print("Answer:", total)
127
50
from abc import ABCMeta, abstractmethod from numpy.core.records import ndarray from graphtiny.domain import Chart, DataStreamWindow class IChart(metaclass=ABCMeta): @abstractmethod def set_data_stream(self, chart: Chart, x: ndarray, y: ndarray) -> None: """ Introduces data stream in the chart :param chart: A Chart object :param x: A numpy array representing data stream of the x-axis :param y: A numpy array representing data stream of the y-axis """ class IDataStreamWindow(metaclass=ABCMeta): @abstractmethod def launch_window(self, window: DataStreamWindow) -> None: """ Displays the window with its graphic content :param window: A DataStreamWindow object """
768
213
#~ version one of the mailMorth api which would hold all the functionalities of our version one #(@: Name): "mailMorth" #(@:Description): "email Management, and automation api code" #(@:Author): "inteliJence development team" #under the license of Apache License 2.0 and intelijence Protective Rights please edit and use it with all the care you can give #import blueprint class from flask import Blueprint,request from flask_restplus import Api, reqparse from views import connect,connectMail #end all import #---------------------------------------------------------------------------------- #Create BluePrint and API #---------------------------------------------------------------------------------- api_v1 = Blueprint('api_v1', __name__)#create a blueprint structure of the flask class mailApi=Api(api_v1)#initialize the api class by passing the flask object to it # cursor=mysql.connect() #---------------------------------------------------------------------------------- #End all BluePrint #---------------------------------------------------------------------------------- #start routin pages here mailApi.add_resource(connect,"/start/<string:app>/<string:Key>") mailApi.add_resource(connectMail,"/startService/<string:app>/<string:clientKey>")
1,307
345
# -*- coding: utf-8 -*- from PyQt5 import QtCore from pineboolib.flcontrols import ProjectClass from pineboolib import decorators class FLSettings(ProjectClass): s = QtCore.QSettings(QtCore.QSettings.NativeFormat, QtCore.QSettings.UserScope, "Eneboo", "Pineboo") @decorators.BetaImplementation def readListEntry(self, key, retOk=False): ret = [] if key in self.s: ret = self.s.value(key) return ret def readEntry(self, _key, _def=None, retOk=False): ret = self.s.value(_key, None) # devuelve un QVariant !!!! if "geo" in _key: # print("Geo vale", str(ret)) # ret = ret.toSize() # print("Geo vale", str(ret)) if not ret: ret = _def else: if str(ret) == "": ret = _def # print("Retornando %s ---> %s" % (_key, ret)) return ret @decorators.BetaImplementation def readNumEntry(self, key, _def=0, retOk=False): ret = self.s.value(key) return int(ret) @decorators.BetaImplementation def readDoubleEntry(self, key, _def=0, retOk=False): ret = self.s.value(key) return float(ret) def readBoolEntry(self, key, _def=False, retOk=False): ret = self.s.value(key) if isinstance(ret, str): ret = ret == "true" if ret is None: ret = _def return ret def writeEntry(self, key, value): self.s.setValue(key, value) @decorators.BetaImplementation def writeEntryList(self, key, value): self.s.setValue(key, value)
1,655
546
# -*- coding: utf-8 -*- # Copyright 2021 Tianmian Tech. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import json import os import numpy as np from google.protobuf.json_format import MessageToJson from common.python import RuntimeInstance from common.python import session from common.python.calculation.fc.fc_source import FCSource from common.python.calculation.fc.fc_storage import FCStorage from common.python.common.consts import NAMESPACE, TaskResultDataType, \ ProjectStatus, ModelType, TaskStatus from common.python.common.enums import FlowQueueActionType from common.python.db.data_set_dao import DataSetDao from common.python.db.data_set_column_dao import DataSetColumnDao from common.python.db.db_models import * from common.python.db.job_member_dao import JobMemberDao from common.python.db.project_dao import ProjectDao from common.python.db.project_data_set_dao import ProjectDataSetDao from common.python.db.task_dao import TaskDao from common.python.db.task_progress_dao import TaskProgressDao from common.python.db.flow_action_queue_dao import FlowActionQueueDao from common.python.db.task_result_dao import TaskResultDao from common.python.db.current_best_model_dao import CurrentBestModelDao from common.python.db.provider_model_param_dao import ProviderModelParamsDao from common.python.db.job_dao import JobDao from common.python.protobuf.pyproto import default_empty_fill_pb2 from common.python.utils import file_utils from common.python.utils.core_utils import current_datetime, timestamp_to_date, get_commit_id, md5, get_delta_seconds from kernel.tracker import model_manager from kernel.tracker import model_utils from kernel.utils.decorator_utils import update_task_status_env LOGGER = log_utils.get_logger() def generate_unit_id(task_id): str_list = task_id.split("_") for item in str_list: if item in ["arbiter", "promoter", "provider"]: str_list.remove(item) return "_".join(str_list) class Tracking(object): METRIC_DATA_PARTITION = 48 METRIC_LIST_PARTITION = 48 JOB_VIEW_PARTITION = 8 def __init__(self, project_id: str, job_id: str, role: str, member_id: int, model_id: str = None, model_version: str = None, component_name: str = None, module_name: str = None, task_id: str = None, oot: bool = False): self.is_serving_model = False self.show_name = "" self.source_type = "" self.project_id = project_id self.job_id = job_id self.role = role self.member_id = member_id self.component_name = component_name if component_name else 'pipeline' self.module_name = module_name if module_name else 'Pipeline' self.task_id = task_id if task_id else Tracking.generate_task_id(job_id=self.job_id, role=self.role, component_name=self.component_name) self.table_namespace = '_'.join( ['wefe', 'tracking', 'data', self.job_id, self.role, str(self.member_id), self.component_name]) self.job_table_namespace = '_'.join( ['wefe', 'tracking', 'data', self.job_id, self.role, str(self.member_id)]) self.model_id = model_id self.member_model_id = model_utils.gen_member_model_id(model_id=model_id, role=role, member_id=member_id) self.model_version = model_version self.oot = oot def set_is_serving_model(self, flag): self.is_serving_model = flag def set_show_name(self, name): self.show_name = name def set_source_type(self, source_type): self.source_type = source_type def _get_task_result_type(self, data_type, data_name=None): """ Get type for task result Parameters ---------- data_type:TaskResultDataType data_name:str train、eval Returns ------- """ if data_name: # In oot mode, in order to avoid primary key conflicts, # only the type field can be used to de-duplicate if self.oot: return '_'.join([data_type, data_name, self.component_name, 'oot']) return '_'.join([data_type, data_name]) return data_type + '_' + self.component_name + "_oot" if self.oot else data_type def save_output_data_table(self, data_table, data_name: str = 'component', save_dataset=False): if data_table: save_name = '{}_persistent'.format(data_table._name) save_namespace = NAMESPACE.DATA save_partitions = data_table.get_partitions() async_save = False fcs_info = None if RuntimeInstance.BACKEND.is_fc() and isinstance(data_table, FCSource) and data_table.get_exist_fcs(): async_save = True fcs_info = data_table.get_exist_fcs().to_dict() params = { "fcs_info": fcs_info, "name": save_name, "namespace": save_namespace, "partitions": save_partitions } # save data asynchronously flow_action_queue = FlowActionQueue() flow_action_queue.id = get_commit_id() flow_action_queue.producer = 'kernel' flow_action_queue.action = FlowQueueActionType.SAVE_OUTPUT_DATA flow_action_queue.params = json.dumps(params) flow_action_queue.channel = '' FlowActionQueueDao.save(flow_action_queue,force_insert=True) if not async_save: # save data synchronously data_table.save_as(namespace=save_namespace, name=save_name) # save meta header_list = data_table.schema.get('header', []) session.save_data_table_meta( {'schema': data_table.schema, 'header': header_list, 'sid': data_table.schema.get('sid_name', '')}, data_table_namespace=save_namespace, data_table_name=save_name) data_input = {'table_name': save_name, 'table_namespace': save_namespace, 'partition': save_partitions, 'table_create_count': data_table.count() if data_table else 0, 'fcs_info': fcs_info} # self.save_data_info(data_input=data_input, mark=True, data_name=data_name) self.save_task_result(data_input, self._get_task_result_type(TaskResultDataType.DATA, data_name)) if save_dataset: self.save_dataset(data_input, data_table.schema, data_table) def get_output_data_table(self, data_name: str = 'component'): """ Get output data Parameters ---------- data_name Returns ------- table of dsource """ task_result = self.get_task_result(self._get_task_result_type(TaskResultDataType.DATA, data_name)) if task_result and task_result.result: data_table_info = json.loads(task_result.result) if data_table_info and data_table_info.get("table_name") and data_table_info.get("table_namespace"): data_table = session.table(name=data_table_info.get('table_name', ''), namespace=data_table_info.get('table_namespace', '')) data_table_meta = data_table.get_metas() if data_table_meta.get('schema', None): data_table.schema = data_table_meta['schema'] # If fcs exists, load fcs directly if 'fcs_info' in data_table_info and isinstance(data_table, FCSource): fcs_info = data_table_info.get('fcs_info') fcs = FCStorage.from_fcs_info(fcs_info) if fcs: fc_source = FCSource.from_fcs(fcs, session.get_session_id(), data_table.get_namespace(), data_table.get_name()) fc_source.schema = data_table.schema data_table = fc_source return data_table else: return None def save_output_model(self, model_buffers: dict, model_name: str, data_name, save_to_storage=False): if model_buffers: if save_to_storage: name_space = 'wefe_data' name = self.task_id + '_' + self.job_id model_manager.save_component_model(component_model_key='{}.{}'.format(self.component_name, model_name), model_buffers=model_buffers, member_model_id=name_space, model_version=name) # save to task result model_json_obj = self._model_buffers_to_json_obj(model_buffers, self.member_model_id, self.model_version, component_model_key='{}.{}'.format(self.component_name, model_name)) self.save_task_result(model_json_obj, self._get_task_result_type(TaskResultDataType.MODEL, model_name)) def _model_buffers_to_json_obj(self, model_buffers: dict, member_model_id, model_version, component_model_key): """ Model buffers to json obj Parameters ---------- model_buffers member_model_id model_version component_model_key Returns ------- """ model = {'member_model_id': member_model_id, 'model_version': model_version, 'component_model_key': component_model_key} for buffer_name, buffer_object in model_buffers.items(): json_obj = MessageToJson(buffer_object, including_default_value_fields=True) if not json_obj: fill_message = default_empty_fill_pb2.DefaultEmptyFillMessage() fill_message.flag = 'set' json_obj = MessageToJson(fill_message, including_default_value_fields=True) if 'meta' in buffer_name.lower(): model['model_meta'] = json.loads(json_obj) if 'param' in buffer_name.lower(): model['model_param'] = json.loads(json_obj) return model def save_task_result(self, task_result: dict, result_type, component_name=None): """ Save task result Parameters ---------- task_result result_type component_name:str Component name, special case can be specified separately Returns ------- """ model = TaskResultDao.get( TaskResult.job_id == self.job_id, TaskResult.task_id == self.task_id, TaskResult.role == self.role, TaskResult.type == result_type ) task = TaskDao.get( Task.job_id == self.job_id, Task.task_id == self.task_id ) # Compatible with local test without task information if not task: task = Task() task.flow_id = "local_test_flow_id" task.flow_node_id = "local_test_flow_node_id" is_insert = True if model: is_insert = False else: model = TaskResult() model.id = get_commit_id() model.created_time = datetime.datetime.now() model.job_id = self.job_id model.name = component_name or self.component_name model.task_id = self.task_id model.role = self.role model.type = result_type model.updated_time = datetime.datetime.now() model.result = json.dumps(task_result) model.component_type = self.component_name.rsplit('_')[0] model.flow_id = task.flow_id model.flow_node_id = task.flow_node_id model.project_id = task.project_id if self.is_serving_model and model.type.split("_")[0] == "model": model.serving_model = 1 TaskResultDao.save(model, force_insert=is_insert) return model def get_task_result(self, result_type, task_id=None): """ Get task result Parameters ---------- result_type task_id Returns ------- """ where_condition = [TaskResult.job_id == self.job_id, TaskResult.name == self.component_name, TaskResult.role == self.role, TaskResult.type == result_type] if task_id: where_condition.append(TaskResult.task_id == task_id) return TaskResultDao.get(*tuple(where_condition)) def save_training_best_model(self, model_buffers): # save to task_result model_json_obj = self._model_buffers_to_json_obj(model_buffers, self.member_model_id, self.model_version, component_model_key='{}.{}'.format(self.component_name, "default")) self.save_task_result(model_json_obj, self._get_task_result_type(TaskResultDataType.TRAINING_MODEL, "default")) def save_cur_best_model(self, model_buffers, iteration): model = CurrentBestModelDao.get( CurrentBestModel.job_id == self.job_id, CurrentBestModel.component_name == self.component_name, CurrentBestModel.role == self.role, CurrentBestModel.member_id == self.member_id ) is_insert = True if model: is_insert = False else: model = CurrentBestModel() model.id = get_commit_id() model.created_time = current_datetime() model.job_id = self.job_id model.component_name = self.component_name model.task_id = self.task_id model.role = self.role model.member_id = self.member_id model.updated_time = current_datetime() model.iteration = iteration for buffer_name, buffer_object in model_buffers.items(): json_obj = MessageToJson(buffer_object, including_default_value_fields=True) if not json_obj: fill_message = default_empty_fill_pb2.DefaultEmptyFillMessage() fill_message.flag = 'set' json_obj = MessageToJson(fill_message, including_default_value_fields=True) if 'meta' in buffer_name.lower(): model.model_meta = json_obj if 'param' in buffer_name.lower(): model.model_param = json_obj CurrentBestModelDao.save(model, force_insert=is_insert) return model def save_provider_model_params(self, model_buffers, provider_member_id): model = ProviderModelParamsDao.get( ProviderModelParams.job_id == self.job_id, ProviderModelParams.component_name == self.component_name, ProviderModelParams.role == self.role, ProviderModelParams.member_id == self.member_id ) is_insert = True if model: is_insert = False else: model = ProviderModelParams() model.id = get_commit_id() model.created_time = datetime.datetime.now() model.job_id = self.job_id model.component_name = self.component_name model.task_id = self.task_id model.role = self.role model.member_id = self.member_id model.updated_time = datetime.datetime.now() model.provider_member_id = provider_member_id # model.updated_by = "" # model.created_by = "" json_obj = MessageToJson(model_buffers, including_default_value_fields=True) if not json_obj: fill_message = default_empty_fill_pb2.DefaultEmptyFillMessage() fill_message.flag = 'set' json_obj = MessageToJson(fill_message, including_default_value_fields=True) model.provider_model_param = json_obj ProviderModelParamsDao.save(model, force_insert=is_insert) return model def get_output_model(self, model_name=ModelType.BINNING_MODEL): model = TaskResultDao.get( TaskResult.task_id == self.task_id, TaskResult.role == self.role, TaskResult.type == self._get_task_result_type(TaskResultDataType.MODEL, model_name) ) if model: model = json.loads(model.result) return {"Model_Meta": model["model_meta"], "Model_Param": model["model_param"]} else: return None def get_training_best_model(self): model = TaskResultDao.get( TaskResult.task_id == self.task_id, TaskResult.role == self.role, TaskResult.type == self._get_task_result_type(TaskResultDataType.TRAINING_MODEL, "default") ) if model: model = json.loads(model.result) return {"Model_Meta": model["model_meta"], "Model_Param": model["model_param"]} else: return None def get_statics_result(self, type='data_feature_statistic'): model = TaskResultDao.get_last_statics_result(self.job_id, self.role, type) if model: max = {} min = {} mean = {} median = {} missing_count = {} std_variance = {} count = 0 mode = {} result = json.loads(model.result) LOGGER.info("mysql result:{}".format(result)) members = result['members'] feature_statistic = None for member in members: if member['role'] == self.role: feature_statistic = member['feature_statistic'] if feature_statistic: for feature, value in feature_statistic.items(): max[feature] = value['max'] min[feature] = value['min'] mean[feature] = value['mean'] if '50' in value['percentile']: median[feature] = value['percentile']['50'] missing_count[feature] = value['missing_count'] std_variance[feature] = value['std_variance'] count = value['count'] mode[feature] = value.get('mode') statics = {"max": max, "min": min, "mean": mean, "median": median, "missing_count": missing_count, "std_variance": std_variance, "std": std_variance, 'count': count, "mode": mode} return statics return None def get_binning_result(self): model = TaskResultDao.get_last_task_result(self.job_id, self.role, 'model_train') if model: result = json.loads(model.result) LOGGER.debug("mysql result:{}".format(result)) binning_result = result.get('model_param').get('binningResult').get('binningResult') binning_results = {} for feature, value in binning_result.items(): binning_results[feature] = {'woe': value.get('woeArray'), 'split_points': value.get('splitPoints')} model_meta = result.get('model_meta') model_param = {'header': model_meta.get('cols')} transform_cols = model_meta.get('transformParam').get('transformCols') model_param['transform_bin_indexes'] = [int(x) for x in transform_cols] return model_param, binning_results return None, None def saveSingleMetricData(self, metric_name: str, metric_namespace: str, metric_meta, kv, job_level=False): self.save_metric_data_to_task_result(metric_name, metric_namespace, metric_meta, kv, job_level) def saveMetricData(self, metric_name: str, metric_namespace: str, metric_meta, kv, job_level=False): self.save_metric_data_to_task_result(metric_name, metric_namespace, metric_meta, kv, job_level) def _get_item_metric(self, metric_name: str, metric_namespace: str, metric_meta: {}, data: {}): """ Get metric item Parameters ---------- metric_name metric_namespace metric_meta data Returns ------- """ return {"metric_name": metric_name, "metric_namespace": metric_namespace, "metric_meta": metric_meta, "data": data} def _get_metric_data_value(self, v): # return {'value': v, 'create_time': timestamp_to_date()} if isinstance(v, dict): return {'value': v} if np.isinf(v): return {'value': 'Infinity'} if type(v) == float: return {'value': str(v)} return {'value': v} def save_metric_data_to_task_result(self, metric_name: str, metric_namespace: str, metric_meta, kv, job_level=False, need_value=True): """ Save metric data to task result Parameters ---------- metric_name metric_namespace metric_meta kv job_level need_value Returns ------- """ result_type = self._get_task_result_type(TaskResultDataType.METRIC, metric_namespace) metric_task_result = self.get_task_result(result_type, self.task_id) result = {} if metric_task_result and metric_task_result.result: result = json.loads(metric_task_result.result) metric_key = '_'.join([metric_namespace, metric_name]) component_name = self.component_name if not job_level else 'dag' if metric_key in result.keys(): item_metric = result.get(metric_key) else: item_metric = self._get_item_metric(metric_name, metric_namespace, metric_meta, {}) if not need_value: item_metric['data'] = kv elif isinstance(kv, list): for k, v in kv: item_metric['data'].update({k: self._get_metric_data_value(v)}) else: item_metric['data'].update({kv[0]: self._get_metric_data_value(kv[1])}) result[metric_key] = item_metric self.save_task_result(result, result_type, component_name) def save_dataset(self, data_input, schema, data_table): header_list = schema.get("header") # Determine whether the task exists task = TaskDao.find_one_by_task_id(self.task_id) if not task: return # Determine whether the job exists job = JobDao.find_one_by_job_id(self.job_id, self.role) if not job: return # Determine whether the project exists project = ProjectDao.get(self.project_id == Project.project_id, Project.my_role == self.role) if not project: return job_member = JobMemberDao.get( JobMember.job_id == self.job_id, JobMember.member_id == self.member_id, JobMember.job_role == self.role ) if not job_member: return data_set_old = DataSetDao.get( DataSet.id == job_member.data_set_id ) if not data_set_old: return data_set = DataSet() # data_set_id = get_commit_id() unit_id = generate_unit_id(self.task_id) data_set.id = md5(unit_id) data_set.created_time = current_datetime() data_set.updated_time = current_datetime() data_set.name = job.name + self.show_name data_set.source_type = self.module_name data_set.source_job_id = job.job_id data_set.name = data_set.name + '_' + timestamp_to_date(format_string='%Y%m%d%H%M%S') data_set.storage_type = data_set_old.storage_type data_set.public_member_list = data_set_old.public_member_list data_set.tags = data_set_old.tags data_set.description = data_set_old.description data_set.source_flow_id = data_set_old.source_flow_id data_set.source_task_id = self.task_id data_set.y_name_list = data_set.y_name_list data_set.usage_count_in_job = 0 data_set.usage_count_in_flow = 0 data_set.usage_count_in_project = 0 data_set.namespace = data_input['table_namespace'] data_set.table_name = data_input['table_name'] data_set.row_count = data_input['table_create_count'] data_set.feature_name_list = ",".join(header_list) data_set.y_name_list = data_set_old.y_name_list data_set.primary_key_column = data_set_old.primary_key_column # column = primary_key + y + feature if data_set.y_name_list is None: data_set.column_name_list = data_set.primary_key_column + "," + ",".join(header_list) else: data_set.column_name_list = f"{data_set.primary_key_column},{data_set.y_name_list},{','.join(header_list)}" # y positive count y_positive_count = data_table.filter(lambda k, v: int(v.label) > 0).count() y_positive_ratio = round(y_positive_count / data_input['table_create_count'], 4) data_set.y_positive_example_count = y_positive_count data_set.y_positive_example_ratio = y_positive_ratio if len(header_list) == 0: data_set.column_name_list = data_set.column_name_list[1:] data_set.contains_y = data_set_old.contains_y data_set.column_count = len(data_set.column_name_list.split(",")) data_set.feature_count = len(data_set.feature_name_list.split(",")) DataSetDao.save(data_set, force_insert=True) self.save_project_data_set(data_set.id, self.job_id, self.task_id, self.component_name) self.save_data_set_column(data_set, schema, data_set_old.id) return data_set @staticmethod def generate_task_id(job_id, role, component_name): return '{}_{}_{}'.format(job_id, role, component_name) def get_job_log_directory(job_id): return os.path.join(log_utils.get_log_root_path(), job_id) def get_job_directory(job_id): return os.path.join(file_utils.get_project_base_directory(), 'jobs', job_id) def save_project_data_set(self, data_set_id, job_id, task_id, component_name): project_data_set = ProjectDataSet() project_data_set.id = get_commit_id() project_data_set.member_role = self.role project_data_set.created_by = self.member_id project_data_set.created_time = current_datetime() project_data_set.updated_by = self.member_id project_data_set.updated_time = current_datetime() project_data_set.project_id = self.project_id project_data_set.member_id = self.member_id project_data_set.data_set_id = data_set_id project_data_set.audit_status = ProjectStatus.AGREE project_data_set.status_updated_time = current_datetime() project_data_set.source_task_id = task_id project_data_set.source_type = component_name.split("_")[0] project_data_set.source_job_id = job_id ProjectDataSetDao.save(project_data_set, force_insert=True) return project_data_set @staticmethod def get_data_set_column_type(data_set_id): data_set_columns = DataSetColumnDao.list_by_data_set_id(data_set_id) column_types = [] for item_column in data_set_columns: column_types.append(item_column.data_type) return column_types @staticmethod def save_data_set_column(data_set, schema, old_data_set_id): column_types = schema.get("column_types") header = schema.get("header") if column_types: def get_new_column_json(data_set_id, index, name, data_type): return { "data_set_id": data_set_id, "id": get_commit_id(), "created_time": current_datetime(), "index": index, "name": name, "data_type": data_type } index = 0 data_set_id = data_set.id # get old data set id column type id_column = DataSetColumnDao.get(DataSetColumn.data_set_id == old_data_set_id, DataSetColumn.name == data_set.primary_key_column) # id column id_column_type = id_column.data_type if id_column else "String" column_list = [get_new_column_json(data_set_id, index, data_set.primary_key_column, id_column_type)] index += 1 # label column if data_set.contains_y == 1: for item_y in data_set.y_name_list.split(','): column_list.append(get_new_column_json(data_set_id, index, item_y, "Integer")) index += 1 # feature column for i in range(len(header)): column_list.append(get_new_column_json(data_set_id, index, header[i], column_types[i])) index += 1 DataSetColumnDao.batch_insert(column_list) def _calc_progress(self, model): """ Calculation progress According to the total engineering quantity, the current completion engineering quantity calculation progress If there is actual engineering quantity, calculate the percentage based on actual engineering quantity, that is, it is finished Otherwise, calculate the progress percentage according to the estimated engineering quantity Parameters ---------- model Returns ------- """ if model.progress is None: model.progress = 0 if model.progress > model.expect_work_amount: model.progress = model.expect_work_amount work_amount = model.really_work_amount or model.expect_work_amount model.progress_rate = round(model.progress / work_amount * 100, 2) if model.progress_rate > 100: model.progress_rate = 100 if model.updated_time is not None and model.progress_rate > 0: model.spend = int((model.updated_time - model.created_time).total_seconds() * 1000) need_time = int(model.spend * 100 / model.progress_rate - model.spend) model.expect_end_time = model.updated_time + datetime.timedelta(milliseconds=need_time) return model def init_task_progress(self, work_amount: int): """ Initialize the total engineering quantity of the task schedule eg. Logistic regression algorithm parameters need to run 300 iterations, then work_amount can be set to 300, then after each iteration is completed, the current work amount needs to be +1 Parameters ---------- work_amount:int Total engineering Returns ------- """ if self.oot: return is_insert = True model = TaskProgressDao.get_by_unique_id(self.task_id, self.role) if model: is_insert = False # reset model.progress = 0 model.really_work_amount = None model.created_time = datetime.datetime.now() model.updated_time = None model.expect_end_time = None model.spend = None else: model = TaskProgress() model.id = get_commit_id() model.progress = 0 model.created_time = datetime.datetime.now() # get task info task_info = TaskDao.get( Task.task_id == self.task_id, Task.role == self.role ) if task_info: model.flow_id = task_info.flow_id model.flow_node_id = task_info.flow_node_id else: model.flow_id = 0 model.flow_node_id = 0 model.project_id = self.project_id model.job_id = self.job_id model.role = self.role model.task_id = self.task_id model.task_type = self.component_name.split('_')[0] model.expect_work_amount = work_amount self._calc_progress(model) TaskProgressDao.save(model, force_insert=is_insert) def set_task_progress(self, work_amount: int): """ Update the progress according to the specified work amount Parameters ---------- work_amount:int The amount of work currently completed Returns ------- """ if self.oot: return if work_amount >= 0: model = TaskProgressDao.get_by_unique_id(self.task_id, self.role) if model: model.progress = work_amount model.updated_time = datetime.datetime.now() self._calc_progress(model) TaskProgressDao.save(model) def add_task_progress(self, step: int = 1): """ Increase progress according to step Parameters ---------- step:int Returns ------- """ if self.oot: return model = TaskProgressDao.get_by_unique_id(self.task_id, self.role) if model.progress is not None: work_amount = model.progress + step else: work_amount = step # Reserve one amount for use when the finish call if work_amount > model.expect_work_amount - 1: work_amount = model.expect_work_amount - 1 self.set_task_progress(work_amount) def finish_task_progress(self): """ Finish task progress Returns ------- """ model = TaskProgressDao.get_by_unique_id(self.task_id, self.role) if model: model.progress = model.progress + 1 model.really_work_amount = model.progress if model.really_work_amount > model.expect_work_amount: model.really_work_amount = model.expect_work_amount model.updated_time = datetime.datetime.now() self._calc_progress(model) model.pid_success = 1 TaskProgressDao.save(model) @update_task_status_env() def set_task_success(self): """ Set task success Returns ------- """ running_task = TaskDao.find_one_by_task_id(self.task_id) if running_task: running_task.status = TaskStatus.SUCCESS running_task.message = "任务运行完成" running_task.updated_time = datetime.datetime.now() running_task.finish_time = datetime.datetime.now() running_task.spend = get_delta_seconds( running_task.finish_time, running_task.start_time) TaskDao.save(running_task) if __name__ == '__main__': task = TaskDao.find_one_by_task_id('69ccd7ca9ff444f3a93a7e950fbf432d_promoter_Intersection_16238974992057754') a = task.start_time b = task.finish_time print(type(a)) c = b - a print(type(c)) print(c.seconds)
35,874
10,496
class ${KF}
12
8
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('person', '0007_auto_20160214_2019'), ] operations = [ migrations.AddField( model_name='patient', name='date_created', field=models.DateField(default=datetime.datetime(2016, 2, 15, 1, 6, 14, 723509, tzinfo=utc), auto_now_add=True), preserve_default=False, ), ]
570
204
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/6/25 18:59 # @Author : Kay # @Site : # @File : test_01_openapp.py # @Software: PyCharm Community Edition import uiautomator2 as u2 import unittest from Public.Decorator import * from Public.BasePage import BasePage from Public.ReadConfig import ReadConfig from Public.JugementSensorData import JugementSensorData from TestSuit_SenSorData.ExpectResult.OpenApp import OpenApp_Expection event_name = ReadConfig().get_testEvent("打开App") apkpage = ReadConfig().get_pkg_name() apkActivity = ReadConfig().get_pkg_activity() class OpenApp(unittest.TestCase,BasePage): @classmethod @setupclass def setUpClass(cls): cls.set_fastinput_ime() cls.unlock_device() cls.d.app_stop_all() @classmethod @setupclass def tearDownClass(cls): cls.d.app_stop(apkpage) @testcase def test_01_coldapp(self): self.d.app_start(apkpage,apkActivity) server = OpenApp_Expection() JugementSensorData.JugementData("test_01_coldapp",server) @testcase def test_01_hotapp(self): self.d.app_start(apkpage, apkActivity) time.sleep(5) self.d.app_stop(apkpage) self.d.app_start(apkpage, apkActivity) server = OpenApp_Expection() JugementSensorData.JugementData("test_01_hotapp",server)
1,358
500
import random class Plane(): name = None bearing = None captain = None origin = None destination = None img = None squawk = None def __init__(self): self.name = self.createName() self.bearing = self.setBearing() self.captain = self.createCaptain() self.origin = self.setOrigin() self.destination = self.setDestination() self.squawk = self.setSquawk() def createName(self): airlines = {'TAA':'Trans American Airlines','AIA':'Atlantic International Airlines', 'WA':'Windsor Airlines','AA':'Ajira Airways','OA':'Oceanic Airlines','JA':'JetAir', 'GA':'Gamma Air','CA':'Conglomerated Airlines','OLA':'Olive Airways'} airline = random.choice(airlines.keys()) id = random.randint(101,1024) return '%s-%s' % (airline,str(id)) def setBearing(self): return random.randint(10,270) def createCaptain(self): pass def setOrigin(self): pass def setDestination(self): pass def setSquawk(self): with open('transcript.txt','r') as f: contents = f.read() lines = contents.split("\r\n") return random.choice(lines).strip()
1,077
420
from species.analysis.fit_model import FitModel from species.analysis.fit_spectrum import FitSpectrum from species.analysis.photometry import SyntheticPhotometry # from species.analysis.retrieval import AtmosphericRetrieval from species.read.read_calibration import ReadCalibration from species.read.read_filter import ReadFilter from species.read.read_isochrone import ReadIsochrone from species.read.read_model import ReadModel from species.read.read_planck import ReadPlanck # from species.read.read_radtrans import ReadRadtrans from species.read.read_spectrum import ReadSpectrum from species.read.read_color import ReadColorMagnitude, \ ReadColorColor from species.read.read_object import ReadObject from species.core.box import create_box from species.core.constants import * from species.core.setup import SpeciesInit from species.data.companions import get_data from species.data.database import Database from species.plot.plot_color import plot_color_magnitude, \ plot_color_color from species.plot.plot_mcmc import plot_posterior, \ plot_walkers, \ plot_mag_posterior, \ plot_size_distributions, \ plot_extinction # from species.plot.plot_retrieval import plot_pt_profile from species.plot.plot_spectrum import plot_spectrum from species.util.phot_util import apparent_to_absolute, \ absolute_to_apparent, \ multi_photometry, \ get_residuals from species.util.query_util import get_parallax from species.util.read_util import add_luminosity, \ get_mass, \ powerlaw_spectrum, \ update_spectra __author__ = 'Tomas Stolker' __license__ = 'MIT' __version__ = '0.3.1' __maintainer__ = 'Tomas Stolker' __email__ = 'tomas.stolker@phys.ethz.ch' __status__ = 'Development'
2,157
593