id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6457185 | """Implementation of :class:`MPmathComplexDomain` class. """
from sympy.polys.domains.realdomain import RealDomain
class MPmathComplexDomain(RealDomain): # XXX: tmp solution
"""Complex domain. """
alias = 'CC_mpmath'
def __init__(self):
pass
| StarcoderdataPython |
1951730 | <filename>Connections/admin.py
from django.contrib import admin
from .models import GroupUserConnection, GroupTaskConnection
@admin.register(GroupTaskConnection)
class GroupTaskConnectionAdminConfig(admin.ModelAdmin):
list_display = ('group','task')
@admin.register(GroupUserConnection)
class GroupUserConnection(admin.ModelAdmin):
list_display = ('group','user') | StarcoderdataPython |
11379423 | from pystatic.error.errorcode import *
| StarcoderdataPython |
8102380 | import io
import logging
import struct
from . import headers, errors, evlrs
from .compression import laszip_decompress
from .lasdatas import las14, las12
from .point import dims, record
from .vlrs import rawvlr
from .vlrs.vlrlist import VLRList
logger = logging.getLogger(__name__)
def _raise_if_wrong_file_signature(stream):
""" Reads the 4 first bytes of the stream to check that is LASF"""
file_sig = stream.read(len(headers.LAS_FILE_SIGNATURE))
if file_sig != headers.LAS_FILE_SIGNATURE:
raise errors.PylasError(
"File Signature ({}) is not {}".format(file_sig, headers.LAS_FILE_SIGNATURE)
)
class LasReader:
""" This class handles the reading of the different parts of a las file.
As the Header is necessary to be able to understand how the data is structured,
it will be read during initialisation of the instance
"""
def __init__(self, stream, closefd=True):
self.start_pos = stream.tell()
_raise_if_wrong_file_signature(stream)
self.stream = stream
self.closefd = closefd
self.header = self.read_header()
def read_header(self):
""" Reads the head of the las file and returns it
"""
self.stream.seek(self.start_pos)
return headers.HeaderFactory().read_from_stream(self.stream)
def read_vlrs(self):
""" Reads and return the vlrs of the file
"""
self.stream.seek(self.start_pos + self.header.size)
return VLRList.read_from(self.stream, num_to_read=self.header.number_of_vlr)
def read(self):
""" Reads the whole las data (header, vlrs ,points, etc) and returns a LasData
object
"""
vlrs = self.read_vlrs()
self._warn_if_not_at_expected_pos(
self.header.offset_to_point_data, "end of vlrs", "start of points"
)
self.stream.seek(self.start_pos + self.header.offset_to_point_data)
try:
points = self._read_points(vlrs)
except (RuntimeError, errors.LazPerfNotFound) as e:
logger.error("LazPerf failed to decompress ({}), trying laszip.".format(e))
self.stream.seek(self.start_pos)
self.__init__(io.BytesIO(laszip_decompress(self.stream)))
return self.read()
if dims.format_has_waveform_packet(self.header.point_format_id):
self.stream.seek(
self.start_pos + self.header.start_of_waveform_data_packet_record
)
if self.header.global_encoding.are_waveform_flag_equal():
raise errors.PylasError(
"Incoherent values for internal and external waveform flags, both are {})".format(
"set"
if self.header.global_encoding.waveform_internal
else "unset"
)
)
if self.header.global_encoding.waveform_internal:
# TODO: Find out what to do with these
_, _ = self._read_internal_waveform_packet()
elif self.header.global_encoding.waveform_external:
logger.info(
"Waveform data is in an external file, you'll have to load it yourself"
)
if self.header.version >= "1.4":
evlrs = self.read_evlrs()
return las14.LasData(
header=self.header, vlrs=vlrs, points=points, evlrs=evlrs
)
return las12.LasData(header=self.header, vlrs=vlrs, points=points)
def _read_points(self, vlrs):
""" private function to handle reading of the points record parts
of the las file.
the header is needed for the point format and number of points
the vlrs are need to get the potential laszip vlr as well as the extra bytes vlr
"""
try:
extra_dims = vlrs.get("ExtraBytesVlr")[0].type_of_extra_dims()
except IndexError:
extra_dims = None
if self.header.are_points_compressed:
laszip_vlr = vlrs.pop(vlrs.index("LasZipVlr"))
points = self._read_compressed_points_data(laszip_vlr, extra_dims)
else:
points = record.PackedPointRecord.from_stream(
self.stream,
self.header.point_format_id,
self.header.point_count,
extra_dims,
)
return points
def _read_compressed_points_data(self, laszip_vlr, extra_dims):
""" reads the compressed point record
"""
offset_to_chunk_table = struct.unpack("<q", self.stream.read(8))[0]
size_of_point_data = offset_to_chunk_table - self.stream.tell()
if offset_to_chunk_table <= 0:
logger.warning(
"Strange offset to chunk table: {}, ignoring it..".format(
offset_to_chunk_table
)
)
size_of_point_data = -1 # Read everything
points = record.PackedPointRecord.from_compressed_buffer(
self.stream.read(size_of_point_data),
self.header.point_format_id,
self.header.point_count,
laszip_vlr,
extra_dims=extra_dims
)
return points
def _read_internal_waveform_packet(self):
""" reads and returns the waveform vlr header, waveform record
"""
# This is strange, the spec says, waveform data packet is in a EVLR
# but in the 2 samples I have its a VLR
# but also the 2 samples have a wrong user_id (LAS_Spec instead of LASF_Spec)
b = bytearray(self.stream.read(rawvlr.VLR_HEADER_SIZE))
waveform_header = rawvlr.RawVLRHeader.from_buffer(b)
waveform_record = self.stream.read()
logger.debug(
"Read: {} MBytes of waveform_record".format(len(waveform_record) / 10 ** 6)
)
return waveform_header, waveform_record
def read_evlrs(self):
""" Reads the EVLRs of the file, will fail if the file version
does not support evlrs
"""
self.stream.seek(self.start_pos + self.header.start_of_first_evlr)
return evlrs.EVLRList.read_from(self.stream, self.header.number_of_evlr)
def _warn_if_not_at_expected_pos(self, expected_pos, end_of, start_of):
""" Helper function to warn about unknown bytes found in the file"""
diff = expected_pos - self.stream.tell()
if diff != 0:
logger.warning(
"There are {} bytes between {} and {}".format(diff, end_of, start_of)
)
def close(self):
""" closes the file object used by the reader
"""
self.stream.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.closefd:
self.close()
| StarcoderdataPython |
6658854 | <filename>xsoccer/venues/management/commands/build_venue_table_ALL_FILES.py
### Read from F9 files and construct Teams models
import utils.xmls as xml_utils
import utils.unicode as unicode_utils
import os
from venues.models import Venue
from django.core.management.base import BaseCommand
def is_venue(xml_obj):
"""Return true if the XML object has a Team tag"""
return xml_obj.tag == "Venue"
class Command(BaseCommand):
"""
Sample usage:
python manage.py build_venue_table_ALL_FILES \
--dry_run \
--data_filepath=data/f9/
"""
help = "Populate venue table"
def add_arguments(self, parser):
"""Add custom CLI arguments"""
parser.add_argument(
"--dry_run",
action="store_true",
dest="dry_run",
default=False,
help="Don't save and just print venues",
)
parser.add_argument(
"--data_filepath",
dest="data_filepath",
type=str,
required=True,
help="Filename with data",
)
def handle(self, *args, **options):
data_filepath = options["data_filepath"]
is_dry_run = options["dry_run"]
print "Importing venues from %s" % data_filepath
if is_dry_run:
print "This is a dry run and will not save any data"
for root_dir, sub_dirs, filenames in os.walk(data_filepath):
for f in filenames:
xml_file = os.path.join(data_filepath, f)
new_venues = []
xml_data_root = xml_utils.get_root_from_file(xml_file)
for child in xml_utils.get_children(xml_data_root):
# Iterate over each item
for item in xml_utils.get_children(child):
if is_venue(item) == False:
continue
name = xml_utils.get_tag(item, "Name").text
name = unicode_utils.remove_accents(name)
uuid = xml_utils.get_attrib(item, "uID")
country = xml_utils.get_tag(item, "Country").text
venue = Venue(name=name, uuid=uuid, country=country)
new_venues.append(venue)
# log out for audit and save if not dry run and it is a new venue
for venue in new_venues:
# get all existing uuids
existing_venue_uuids = Venue.objects.all().values_list("uuid")
if is_dry_run == False and venue.uuid not in [u[0] for u in existing_venue_uuids]:
venue.save()
print venue.__dict__
| StarcoderdataPython |
5180102 | <reponame>zceekja/colour_tree_comp2823
"""
Test Tree
=========
Checks that your tree performs basic functionality.
"""
import unittest
from colours import Colours
from node import Node
from tree import Tree
class TestTree(unittest.TestCase):
"""
Checks super basic tree functionality
"""
def test_put(self):
"""
Can we insert into tree?
"""
root = Node(Colours.CYAN)
t = Tree(root)
a = Node(Colours.CYAN)
t.put(root, a)
assert len(root.children) == 1, \
"[tree.put] should add child to node."
assert root.children[0] == a, \
"[tree.put] should add the correct node, yours did not."
t.put(a, Node(Colours.YELLOW))
assert len(root.children) == 1, \
"[tree.put] should add child to node."
assert root.children[0] == a, \
"[tree.put] should add the correct node, yours did not."
assert len(a.children) == 1, \
"[tree.put] should add child to node."
def test_put_propagate(self):
"""
Does the colour propagate?
"""
root = Node(Colours.CYAN)
t = Tree(root)
a = Node(Colours.BLUE)
# Nothing should propagate yet
assert Colours.CYAN.cmp(root.propagated_colour) == 0, \
"[propagate] Your colour didn't propagate correctly."
t.put(root, a)
# It should now be blue!
assert Colours.BLUE.cmp(root.propagated_colour) == 0, \
"[propagate] Your colour didn't propagate correctly."
t.put(a, Node(Colours.RED))
# It should now be red!
assert Colours.RED.cmp(root.propagated_colour) == 0, \
"[propagate] Your colour didn't propagate correctly."
assert Colours.RED.cmp(a.propagated_colour) == 0, \
"[propagate] Your colour didn't propagate correctly."
def test_update_colour_propagates(self):
"""
Does the colour propagate when changed?
"""
root = Node(Colours.CYAN)
t = Tree(root)
a = Node(Colours.BLUE)
t.put(root, a)
t.put(a, Node(Colours.RED))
# It should now be red!
assert Colours.RED.cmp(root.propagated_colour) == 0, \
"[propagate] Your colour didn't propagate correctly."
assert Colours.RED.cmp(a.propagated_colour) == 0, \
"[propagate] Your colour didn't propagate correctly."
t.update_node_colour(a.children[0], Colours.NYAN)
assert Colours.NYAN.cmp(root.propagated_colour) == 0, \
"[propagate] Your colour didn't propagate correctly."
assert Colours.NYAN.cmp(a.propagated_colour) == 0, \
"[propagate] Your colour didn't propagate correctly."
def test_can_rm(self):
"""
Can we remove a child?
"""
root = Node(Colours.CYAN)
t = Tree(root)
a = Node(Colours.GREEN)
b = Node(Colours.RED)
t.put(root, a)
t.put(root, b)
assert len(root.children) == 2
t.rm(b)
assert len(root.children) == 1, \
"[tree.rm] did not remove the node."
assert b not in root.children, \
"[tree.rm] did not remove the correct child."
def test_rm_propagate(self):
"""
Can we remove a child and the colour propagates?
"""
root = Node(Colours.CYAN)
t = Tree(root)
a = Node(Colours.GREEN)
b = Node(Colours.RED)
t.put(root, a)
t.put(root, b)
assert Colours.RED.cmp(root.propagated_colour) == 0, \
"Colour did not propagate with .put"
assert Colours.GREEN.cmp(a.propagated_colour) == 0, \
"Colour of sibling changed?"
t.rm(b)
assert Colours.GREEN.cmp(root.propagated_colour) == 0, \
"Colour did not propagate when removing a child!"
def test_can_swap_example(self):
"""
Can you perform the swap in the comments?
"""
A = Node(Colours.GREEN)
B = Node(Colours.RED)
C = Node(Colours.BLUE)
D = Node(Colours.CYAN)
J = Node(Colours.CYAN)
K = Node(Colours.YELLOW)
t = Tree(A)
t.put(A, B)
t.put(A, C)
t.put(B, D)
t.put(C, J)
t.put(C, K)
# Let's swap
t.swap(D, C)
# Let's check if it worked!
assert D.parent == A, \
"[tree.swap] Did not change parent."
assert C.parent == B, \
"[tree.swap] Did not change parent."
assert D not in B.children, \
"[tree.swap] Did not remove child from old parent."
assert C not in A.children, \
"[tree.swap] Did not remove child from old parent."
assert C in B.children, \
"[tree.swap] child incorrectly swapped to children list."
assert D in A.children, \
"[tree.swap] child incorrectly swapped to children list."
def test_depth_example(self):
"""
Can you perform the is_coloured function?
(start)---> G
/ \
(A) G G (B)
/| \
(A1) G R(A2) G (B1)
|
R (A21)
"""
root = Node(Colours.GREEN)
A = Node(Colours.GREEN)
B = Node(Colours.GREEN)
A1 = Node(Colours.GREEN)
A2 = Node(Colours.RED)
A21 = Node(Colours.RED)
B1 = Node(Colours.GREEN)
t = Tree(root)
t.put(root, A)
t.put(root, B)
t.put(A, A1)
t.put(A, A2)
t.put(A2, A21)
t.put(B, B1)
assert t.is_coloured_to_depth_k(root, Colours.GREEN, 0), \
"[is_coloured] Returned false, should be true!"
assert not t.is_coloured_to_depth_k(root, Colours.RED, 0), \
"[is_coloured] Returned true, should be false!"
assert not t.is_coloured_to_depth_k(root, Colours.GREEN, 2), \
"[is_coloured] Returned true, should be false!"
assert t.is_coloured_to_depth_k(root, Colours.GREEN, 1), \
"[is_coloured] Returned false, should be true!"
def test_is_until_example_1(self):
"""
Can you perform the example from the comments?
(start)---> G (root)
/ \
(B) G G (A)
/| \
(C) R R (E) R (D)
|
R (F)
"""
root = Node(Colours.GREEN)
A = Node(Colours.GREEN)
B = Node(Colours.GREEN)
C = Node(Colours.RED)
D = Node(Colours.RED)
E = Node(Colours.RED)
F = Node(Colours.RED)
t = Tree(root)
t.put(root, A)
t.put(root, B)
t.put(B, C)
t.put(A, D)
t.put(B, E)
t.put(E, F)
res, path = t.is_colour_until_condition(
root,
Colours.GREEN,
Colours.RED
)
assert res, \
"[tree.is_coloured_until] Didn't return true"
def test_is_until_example_2(self):
"""
Can you perform the example from the comments?
(start)---> G (root)
/ \
(B) G G (A)
/| \
(C) R R (E) G (D)
|
R (F)
"""
root = Node(Colours.GREEN)
A = Node(Colours.GREEN)
B = Node(Colours.GREEN)
C = Node(Colours.RED)
D = Node(Colours.GREEN)
E = Node(Colours.RED)
F = Node(Colours.RED)
t = Tree(root)
t.put(root, A)
t.put(root, B)
t.put(B, C)
t.put(A, D)
t.put(B, E)
t.put(E, F)
res, path = t.is_colour_until_condition(
root,
Colours.GREEN,
Colours.RED
)
assert not res, \
"[tree.is_coloured_until] Didn't return false"
assert path is not None, \
"[tree.is_coloured_until] Didn't return a witness path."
assert len(path) > 0, \
"[tree.is_coloured_until] Didn't return a witness path."
expected_path = [root, A, D]
assert len(path) == len(expected_path), \
"[tree.is_coloured_until] Didn't return a long enough witness path."
for i in range(0, len(expected_path)):
assert path[i] == expected_path[i], \
"[tree.is_coloured_until] Path didn't match expected"
# Check it's a real path
curr = path[-1]
assert curr.parent == A and curr.parent == path[-2], \
"[tree.is_coloured_until] Path isn't valid!"
curr = curr.parent
assert curr.parent == root and curr.parent == path[-3], \
"[tree.is_coloured_until] Path isn't valid!"
| StarcoderdataPython |
6427179 | <reponame>wolfy1339/Kenni<gh_stars>0
#!/usr/bin/env python3
import base64
def irc_cap (kenni, input):
cap, value = input.args[1], input.args[2]
rq = ''
if kenni.is_connected:
return
if cap == 'LS':
if 'multi-prefix' in value:
rq += ' multi-prefix'
if 'sasl' in value:
rq += ' sasl'
if not rq:
irc_cap_end(kenni, input)
else:
if rq[0] == ' ':
rq = rq[1:]
kenni.write(('CAP', 'REQ', ':' + rq))
elif cap == 'ACK':
if 'sasl' in value:
kenni.write(('AUTHENTICATE', 'PLAIN'))
else:
irc_cap_end(kenni, input)
elif cap == 'NAK':
irc_cap_end(kenni, input)
else:
irc_cap_end(kenni, input)
return
irc_cap.rule = r'(.*)'
irc_cap.event = 'CAP'
irc_cap.priority = 'high'
def irc_authenticated (kenni, input):
auth = False
if hasattr(kenni.config, 'nick') and kenni.config.nick is not None and hasattr(kenni.config, 'password') and kenni.config.password is not None:
nick = kenni.config.nick
password = <PASSWORD>
# If provided, use the specified user for authentication, otherwise just use the nick
if hasattr(kenni.config, 'user') and kenni.config.user is not None:
user = kenni.config.user
else:
user = nick
auth = "\0".join((nick, user, password))
auth = base64.b64encode(auth.encode('utf-8'))
if not auth:
kenni.write(('AUTHENTICATE', '+'))
else:
while len(auth) >= 400:
out = auth[0:400]
auth = auth[401:]
kenni.write(('AUTHENTICATE', out))
if auth:
kenni.write(('AUTHENTICATE', auth))
else:
kenni.write(('AUTHENTICATE', '+'))
return
irc_authenticated.rule = r'(.*)'
irc_authenticated.event = 'AUTHENTICATE'
irc_authenticated.priority = 'high'
def irc_903 (kenni, input):
kenni.is_authenticated = True
irc_cap_end(kenni, input)
return
irc_903.rule = r'(.*)'
irc_903.event = '903'
irc_903.priority = 'high'
def irc_904 (kenni, input):
irc_cap_end(kenni, input)
return
irc_904.rule = r'(.*)'
irc_904.event = '904'
irc_904.priority = 'high'
def irc_905 (kenni, input):
irc_cap_end(kenni, input)
return
irc_905.rule = r'(.*)'
irc_905.event = '905'
irc_905.priority = 'high'
def irc_906 (kenni, input):
irc_cap_end(kenni, input)
return
irc_906.rule = r'(.*)'
irc_906.event = '906'
irc_906.priority = 'high'
def irc_907 (kenni, input):
irc_cap_end(kenni, input)
return
irc_907.rule = r'(.*)'
irc_907.event = '907'
irc_907.priority = 'high'
def irc_001 (kenni, input):
kenni.is_connected = True
return
irc_001.rule = r'(.*)'
irc_001.event = '001'
irc_001.priority = 'high'
def irc_cap_end (kenni, input):
kenni.write(('CAP', 'END'))
return
if __name__ == '__main__':
print(__doc__.strip())
| StarcoderdataPython |
3200013 | <reponame>nickspinale/vim-signed-local-rc<filename>python/slrc/vimsupport.py
import os.path
from slrc.crypto import sign_file, verify_file
from slrc.persist import check_pub_key, trust_pub_key, untrust_pub_key
import vim
def checked_source():
if (
os.path.isfile('.vimrc')
and os.path.isfile('.vimrc.pub')
and os.path.isfile('.vimrc.sig')
):
if check_pub_key('.vimrc.pub'):
if verify_file('.vimrc.pub', '.vimrc', '.vimrc.sig'):
vim.command('source .vimrc')
else:
vim.command('echo "Slrc: invalid signature"')
else:
vim.command('echo "Slrc: .vimrc.pub not trusted"')
def sign_vimrc(priv_key_path):
if os.path.isfile('.vimrc'):
sign_file(priv_key_path, '.vimrc', '.vimrc.sig')
else:
vim.command('echo "Slrc: .vimrc does not exist"')
| StarcoderdataPython |
4812133 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import re
import parldata_crawler.items
from cgi import valid_boundary
from collections.abc import Sequence
def _seq_but_not_str(obj):
return isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray))
def _bulk_normalize(item):
keys_to_remove = []
for key, value in item.items():
if value is None:
keys_to_remove.append(key)
elif _seq_but_not_str(value):
if len(value) == 0:
keys_to_remove.append(key)
elif isinstance(value, str):
v = value.replace('\xa0', ' ').strip(' \n')
if v == '':
keys_to_remove.append(key)
else:
item[key] = v
elif isinstance(value, parldata_crawler.items.PlenarySitting):
_bulk_normalize(item[key])
for key in keys_to_remove:
del item[key]
class ParldataCrawlerPipeline(object):
speaker_patterns = ["-\s*Elnök:\s*([^\-\)\r\n]+)\s*[\-\)\r\n]", "Az elnöki széket (.*) foglalja"]
def process_item(self, item, spider):
# Normalize parsed speech text
if 'text' in item:
item['text'] = item['text'].replace('A felszólalás szövege:', '').replace(u'\xa0', u' ')
if 'speaker' in item and item['speaker']:
item['speaker'] = item['speaker'].strip(' :')
header_match_probe = True
# 1990-94/31
speaker_with_party_match = re.search("([^\(]+)\(([^\)]+)\)", item['speaker'])
if speaker_with_party_match:
header_match_probe = False
g1 = speaker_with_party_match.group(1).strip()
g2 = speaker_with_party_match.group(2).strip()
if g1.lower() == 'elnök':
item['speaker'] = g2
item['speaker_title'] = 'Elnök'
else:
item['speaker'] = g1
item['speaker_party'] = g2
# 1990-1994: chairman info in header
if header_match_probe and 'header' in item['plenary_sitting_details']:
if item['speaker'].lower() == 'elnök':
item['speaker_title'] = item['speaker']
h = item['plenary_sitting_details']['header']
for p in self.speaker_patterns:
match = re.search(p, h)
if match:
item['speaker'] = match.group(1).strip(' .\r\n')
break
if 'speaker' in item and item['speaker'].isupper():
item['speaker'] = item['speaker'].title()
item['text'] = re.sub('\s*[\r\n]+', '\n', item['text']).strip(' \n')
if 'bill_title' in item:
if _seq_but_not_str(item['bill_title']):
item['bill_title'] = [t.strip(' \n') for t in item['bill_title']]
_bulk_normalize(item)
return item
| StarcoderdataPython |
12865058 | '''
@Author: Hata
@Date: 2020-05-24 15:30:19
@LastEditors: Hata
@LastEditTime: 2020-05-24 15:32:04
@FilePath: \LeetCode\230.py
@Description: https://leetcode-cn.com/problems/kth-smallest-element-in-a-bst/
'''
class Solution:
def kthSmallest(self, root, k):
def gen(r):
if r is not None:
yield from gen(r.left)
yield r.val
yield from gen(r.right)
it = gen(root)
for _ in range(k):
ans = next(it)
return ans
| StarcoderdataPython |
5154209 | import turbodbc
import _config as config
C = turbodbc.connect(**config.turbodbc_connection_options)
cur = C.cursor()
cur.execute("ALTER SESSION SET QUERY_CACHE = 'OFF'")
cur.execute(f"SELECT * FROM {config.table_name}")
df = cur.fetchallarrow().to_pandas()
df.info()
| StarcoderdataPython |
8115180 | <gh_stars>1-10
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.unbatch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UnbatchBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.unbatch()`."""
def benchmarkNativeUnbatch(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.apply(batching.unbatch())
dataset = dataset.skip(elems_per_trial)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
print("Unbatch (native) batch size: %d Median wall time per element:"
" %f microseconds" % (batch_size, median_wall_time * 1e6))
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="native_batch_size_%d" %
batch_size)
# Include a benchmark of the previous `unbatch()` implementation that uses
# a composition of more primitive ops. Eventually we'd hope to generate code
# that is as good in both cases.
def benchmarkOldUnbatchImplementation(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
dataset = dataset.skip(elems_per_trial)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
print("Unbatch (unfused) batch size: %d Median wall time per element:"
" %f microseconds" % (batch_size, median_wall_time * 1e6))
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="unfused_batch_size_%d" %
batch_size)
if __name__ == "__main__":
test.main()
| StarcoderdataPython |
8044572 | import os
file_path = os.path.join('.', 'colab_file.txt')
if os.path.exists(file_path):
os.remove(file_path)
count = 0
with open(file_path, 'a') as to_write:
for root, dirs, _ in os.walk(os.path.join("..", "..", "data")):
for dir in dirs:
dir_path = os.path.join(root, dir)
for root1, _, files1 in os.walk(dir_path):
for file_path in files1:
if not file_path.endswith('.pt'):
continue
with open(os.path.join(root1, file_path), 'r') as poem_file:
lines = poem_file.readlines()[2:]
lines = list(filter(lambda l: len(l), lines))
lines = list(map(lambda l: l.replace('',' '), lines))
to_write.writelines(''.join(lines))
count = count + 1
print(count/100)
| StarcoderdataPython |
1636804 | # Each segment has another segment of the image showing (not black)
# As if you are slowly lookinng at someone's face from above
# Segment numbers are as follows:
# 1. Forehead (dowm to eyebrows)
# 2. Eyebrows (down to eyes)
# 3. Eyes
# 4. Nose
# 5. Mouth
# 6. Chin
# 7. Full
import logging
import numpy as np
import os
import pandas as pd
import cv2
from tqdm import tqdm
import segments_helpers as helpers
from datetime import date
today = date.today()
# yy-mm-dd
today = today.strftime("%y-%m-%d")
# global variables
BLACK = [0,0,0]
IMAGE_HEIGHT = 218
IMAGE_WIDTH = 178
landmarks_file_name = "/home/nthom/Documents/datasets/UNR_Facial_Attribute_Parsing_Dataset/landmarks.csv"
path_to_images = "/home/nthom/Documents/datasets/CelebA/Img/img_celeba/"
path_to_masks = "./data/segmented_images/"
path_to_corners = "/home/nthom/Documents/CelebA_Partial_Blackout/crop_points.csv"
# create the directory to store the images if it doesn't exist
if not os.access(path_to_masks, os.F_OK):
os.mkdir(path_to_masks)
# get a sorted list of the images in the given directory
image_name_list = sorted(os.listdir(path_to_images))
# get the landmarks
landmarks_df = pd.read_csv(landmarks_file_name, index_col="image_name")
landmarks_df_head = landmarks_df.head
print(f"Landmarks Dataframe Head:\n {landmarks_df_head} \n")
# get the bounding boxes
corners_df = pd.read_csv(path_to_corners, index_col=0)
print(f"Bounding Boxes Dataframe Head: \n {corners_df.head}")
print(f"Bounding Boxes Dataframe headers: {corners_df.columns}")
# change this list if only certain segments are desired
segment_numbers = [1, 2, 3, 4, 5, 6, 7]
for image_name in tqdm(image_name_list):
# Sanity checks
landmark_exists = image_name in landmarks_df.index
corners_exists = image_name in corners_df.index
if not landmark_exists:
print(f"Image {image_name} not found in landmarks file!\n")
if not corners_exists:
print(f"Image {image_name} not found in corners file!\n")
if landmark_exists and corners_exists:
# get the landmarks
landmarks = helpers.top_to_bottom_segments(image_name, landmarks_df)
# get the info about the bounding box
x_min, x_max, y_min, y_max = corners_df.loc[image_name]
landmarks = np.append(landmarks, [y_max])
# create the segments
for segment_number in segment_numbers:
# get the image
img = cv2.imread(path_to_images + image_name)
# create the segment directory to store the images if it doesn't exist
directory = "segment" + str(segment_number)
if not os.access(path_to_masks + directory, os.F_OK):
os.mkdir(path_to_masks + directory)
# The image will be black from the landmark point down to the bottom
landmark = landmarks[segment_number - 1]
logging.info(f"The landmark for segment {segment_number} is {landmark}.")
img[landmark:, :] = BLACK
# crop the image
cropped_img = img[y_min:y_max, x_min:x_max]
cropped_img = cv2.resize(cropped_img, (178, 218))
# save the cropped image
if cv2.imwrite(path_to_masks + directory + "/segment" + str(segment_number) + "_" + image_name, cropped_img) == False:
print(f"Image {segment_number}_{image_name} not saved successfully!")
| StarcoderdataPython |
4807647 | # 符号反転
import numpy as np
import npu
import matplotlib.pyplot as plt
def save_img(fname: object, V: object, q_V: object, deq_V: object, n: object = 1) -> object:
x = np.arange(0, 1024)
plt.figure(n)
plt.plot(x, V, color="green")
plt.plot(x, q_V, color="b")
plt.plot(x, deq_V, color="r")
plt.savefig('img/' + fname)
def save_mtx(fname, x):
with open(fname, "w") as fp:
for a in x:
fp.write("%02X\n" % a)
A = np.arange(0, 1024, 1.0) + 50
a_min = 50.0
a_max = 1024+50.0
q_A = npu.Quantize(A, a_min, a_max)
deq_A = npu.deQuantize(q_A,a_min, a_max)
save_mtx('lin.dat', q_A)
B = 100 * np.sin(np.arange(0, 1024, 1.0) / 100) + 10
b_min = np.min(B)
b_max = np.max(B)
q_B = npu.Quantize(B, b_min, b_max)
deq_B = npu.deQuantize(q_B, b_min, b_max)
save_mtx('sin.dat', q_B)
save_img('lin.png', A, q_A, deq_A)
save_img('sin.png', B, q_B, deq_B, n=2)
# inv
q_Ainv, a_inv_min, a_inv_max = npu.q_inv(q_A, a_min, a_max)
deq_Ainv = npu.deQuantize(q_Ainv, a_inv_min, a_inv_max)
save_img('lin_inv.png', A, q_Ainv, deq_Ainv, n=3)
q_Binv, b_inv_min, b_inv_max = npu.q_inv(q_B, b_min, b_max)
deq_Binv = npu.deQuantize(q_Binv, b_inv_min, b_inv_max)
save_img('sin_inv.png', B, q_Binv, deq_Binv, n=4)
| StarcoderdataPython |
200032 | <reponame>loveorchids/deformable_detection
from imgaug import augmenters
def aug_temp(args, bg_color=255):
aug_list = []
stage_0, stage_1, stage_2, stage_3 = 2048, 2048, 512, 512
# Pad the height to stage_0
aug_list.append(augmenters.PadToFixedSize(width=1, height=stage_0, pad_cval=bg_color))
# Resize its height to stage_1, note that stage_0 is smaller than stage_1
# so that the font size could be increased for most cases.
aug_list.append(augmenters.Resize(size={"height": stage_1, "width": "keep-aspect-ratio"}))
# increase the aspect ratio
aug_list.append(augmenters.Sometimes(args.augment_zoom_probability,
augmenters.Affine(scale=(args.augment_zoom_lower_bound, args.augment_zoom_higher_bound))
))
# Crop a stage_2 x stage_2 area
aug_list.append(augmenters.CropToFixedSize(width=stage_2, height=stage_2))
# In case the width is not enough, pad it to stage_2 x stage_2
aug_list.append(augmenters.PadToFixedSize(width=stage_2, height=stage_2, pad_cval=bg_color))
# Resize to stage_3 x stage_3
#aug_list.append(augmenters.Resize(size={"height": stage_3, "width": stage_3}))
# Perform Flip
aug_list.append(augmenters.Fliplr(0.33, name="horizontal_flip"))
#aug_list.append(augmenters.Flipud(0.33, name="vertical_flip"))
# Perform Contrast Augmentation
aug_list.append(augmenters.Sometimes(0.5, augmenters.GammaContrast(gamma=(0.5, 1.2))))
aug_list.append(augmenters.Sometimes(0.5, augmenters.LinearContrast(alpha=(0.4, 1.2))))
return aug_list | StarcoderdataPython |
5041265 | """FASTAPI OpenAPI/REST app."""
import logging
from pathlib import Path
from typing import List
from fastapi import FastAPI, HTTPException, Response, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel, Field
log = logging.getLogger("uvicorn.error")
description = """
Minimal fastapi app for the peerfetch Hello World example.
"""
app = FastAPI(
# FastAPI OpenAPI docs metadata
# ref: https://fastapi.tiangolo.com/tutorial/metadata/
title="Hello World OpenAPI",
description=description,
version="2021.10.13",
license_info={
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
)
# CORS (Cross-Origin Resource Sharing) Section
# ref: https://fastapi.tiangolo.com/tutorial/cors/
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# [Sitemap]
# sitemap definitions follow
class HelloResponse(BaseModel):
message: str = "Hello World!"
@app.get("/api/hello", response_model=HelloResponse)
def get_hello():
"""Returns Hello World!."""
return HelloResponse()
| StarcoderdataPython |
11373638 | # coding: utf-8
"""
Phaxio API
API Definition for Phaxio
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into sdk package
from .models.account_status import AccountStatus
from .models.account_status_data import AccountStatusData
from .models.area_code import AreaCode
from .models.country import Country
from .models.error import Error
from .models.fax_info import FaxInfo
from .models.generate_phax_code_json_response import GeneratePhaxCodeJsonResponse
from .models.generate_phax_code_json_response_data import GeneratePhaxCodeJsonResponseData
from .models.get_area_codes_response import GetAreaCodesResponse
from .models.get_countries_response import GetCountriesResponse
from .models.get_fax_info_response import GetFaxInfoResponse
from .models.get_faxes_response import GetFaxesResponse
from .models.list_phone_numbers_response import ListPhoneNumbersResponse
from .models.operation_status import OperationStatus
from .models.paging import Paging
from .models.phax_code import PhaxCode
from .models.phax_code_data import PhaxCodeData
from .models.phone_number import PhoneNumber
from .models.phone_number_response import PhoneNumberResponse
from .models.recipient import Recipient
from .models.send_fax_response import SendFaxResponse
from .models.send_fax_response_data import SendFaxResponseData
from .models.status import Status
# import apis into sdk package
from .apis.default_api import DefaultApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
| StarcoderdataPython |
8137174 | <reponame>HiAwesome/python-algorithm<filename>c07/p296.py<gh_stars>1-10
from pythonds.graphs import PriorityQueue, Graph, Vertex
def prim(G: Graph, start: Vertex):
pq = PriorityQueue()
for v in G:
v.setDistance(sys.maxsize)
v.setPred(None)
start.setDistance(0)
pq.buildHeap([(v.getDistance(), v) for v in G])
while not pq.isEmpty():
currentVert = pq.delMin()
for nextVert in currentVert.getConnections():
newCost = currentVert.getWeight(nextVert)
if nextVert in pq and newCost < nextVert.getDistance():
newVert.setPred(currentVert)
nextVert.setDistance(newCost)
pd.decreaseKey(nextVert, newCost)
| StarcoderdataPython |
6511184 | """Module for Testing the Meetup Endpoint."""
import json
# Local Import
from .basecase import TestBaseCase as base
class TestMeetup(base):
"""Testing the Meetup Endpoints with valid input."""
def setUp(self):
base.setUp(self)
def test_create_meetup(self):
"""Testing Creation of a Meetup."""
response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type,
)
response_data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertEqual(response_data["message"], "Meetup was created successfully.")
def test_fetching_all_meetups(self):
"""Testing Fetching of all meetups."""
post_response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type
)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(
post_response_data["message"], "Meetup was created successfully."
)
response = self.client.get("/api/v1/meetups/upcoming", content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_fetch_single_meetup(self):
"""Test fetching a single meetup."""
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
# Fetching Single Question.
response = self.client.get('api/v1/meetups/{}'.format(post_response_data["data"]["id"]), content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_rsvp_to_meetup(self):
"""Test RSVPing to a meetup."""
"""Test fetching a single meetup."""
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
# Posting RSVP.
response = self.client.post('/api/v1/meetups/{}/rsvps'.format(post_response_data["data"]["id"]), data=json.dumps(self.rsvp_payload), content_type=self.content_type)
self.assertEqual(response.status_code, 201)
| StarcoderdataPython |
45545 | <filename>src/modules/podcast/tasks/rss.py
import os
from jinja2 import Template
from core import settings
from common.storage import StorageS3
from common.utils import get_logger
from modules.podcast.models import Podcast, Episode
from modules.podcast.tasks.base import RQTask, FinishCode
logger = get_logger(__name__)
__all__ = ["GenerateRSSTask"]
class GenerateRSSTask(RQTask):
"""Allows to recreate and upload RSS for specific podcast or for all of exists"""
storage: StorageS3 = None
async def run(self, *podcast_ids: int) -> FinishCode:
"""Run process for generation and upload RSS to the cloud (S3)"""
self.storage = StorageS3()
filter_kwargs = {"id__in": map(int, podcast_ids)} if podcast_ids else {}
podcasts = await Podcast.async_filter(self.db_session, **filter_kwargs)
results = {}
for podcast in podcasts:
results.update(await self._generate(podcast))
logger.info("Regeneration results: \n%s", results)
if FinishCode.ERROR in results.values():
return FinishCode.ERROR
return FinishCode.OK
async def _generate(self, podcast: Podcast) -> dict:
"""Render RSS and upload it"""
logger.info("START rss generation for %s", podcast)
result_path = await self._render_rss_to_file(podcast)
result_url = self.storage.upload_file(result_path, dst_path=settings.S3_BUCKET_RSS_PATH)
if not result_url:
logger.error("Couldn't upload RSS file to storage. SKIP")
return {podcast.id: FinishCode.ERROR}
await podcast.update(self.db_session, rss_link=str(result_url))
logger.info("RSS file uploaded, podcast record updated")
logger.info("FINISH generation for %s | URL: %s", podcast, podcast.rss_link)
return {podcast.id: FinishCode.OK}
async def _render_rss_to_file(self, podcast: Podcast) -> str:
"""Generate rss for Podcast and Episodes marked as "published" """
logger.info(f"Podcast #{podcast.id}: RSS generation has been started.")
episodes = await Episode.async_filter(
self.db_session,
podcast_id=podcast.id,
status=Episode.Status.PUBLISHED,
published_at__ne=None,
)
context = {"episodes": episodes, "settings": settings}
with open(os.path.join(settings.TEMPLATE_PATH, "rss", "feed_template.xml")) as fh:
template = Template(fh.read())
rss_filename = os.path.join(settings.TMP_RSS_PATH, f"{podcast.publish_id}.xml")
logger.info(f"Podcast #{podcast.publish_id}: Generation new file rss [{rss_filename}]")
with open(rss_filename, "w") as fh:
result_rss = template.render(podcast=podcast, **context)
fh.write(result_rss)
logger.info(f"Podcast #{podcast.id}: RSS generation has been finished.")
return rss_filename
| StarcoderdataPython |
3232510 | import numpy as np
import pylas
def test_mmap(mmapped_file_path):
with pylas.mmap(mmapped_file_path) as las:
las.classification[:] = 25
assert np.all(las.classification == 25)
las = pylas.read(mmapped_file_path)
assert np.all(las.classification == 25)
| StarcoderdataPython |
150072 | from . import channels
from . import paillier
| StarcoderdataPython |
3289303 | <gh_stars>0
import pytest
import falcon
from falcon import MEDIA_TEXT
def test_response_set_content_type_set():
resp = falcon.Response()
resp._set_media_type(MEDIA_TEXT)
assert resp._headers['content-type'] == MEDIA_TEXT
def test_response_set_content_type_not_set():
resp = falcon.Response()
assert 'content-type' not in resp._headers
def test_response_get_headers():
resp = falcon.Response()
resp.append_header('x-things1', 'thing-1')
resp.append_header('x-things2', 'thing-2')
resp.append_header('X-Things3', 'Thing-3')
resp.set_cookie('Chocolate', 'Chip')
headers = resp.headers
assert headers['x-things1'] == 'thing-1'
assert headers['x-things2'] == 'thing-2'
assert headers['x-things3'] == 'Thing-3'
assert 'set-cookie' not in headers
def test_response_attempt_to_set_read_only_headers():
resp = falcon.Response()
resp.append_header('x-things1', 'thing-1')
resp.append_header('x-things2', 'thing-2')
resp.append_header('x-things3', 'thing-3a')
resp.append_header('X-Things3', 'thing-3b')
with pytest.raises(AttributeError):
resp.headers = {'x-things4': 'thing-4'}
headers = resp.headers
assert headers['x-things1'] == 'thing-1'
assert headers['x-things2'] == 'thing-2'
assert headers['x-things3'] == 'thing-3a, thing-3b'
| StarcoderdataPython |
8144328 | <filename>netforce_account_report/netforce_account_report/models/report_cash_flow.py<gh_stars>10-100
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from datetime import *
from dateutil.relativedelta import *
from pprint import pprint
from netforce.access import get_active_company
class ReportCashFlow(Model):
_name = "report.cash.flow"
_transient = True
_fields = {
"date_from": fields.Date("From"),
"date_to": fields.Date("To"),
}
def default_get(self, field_names=None, context={}, **kw):
defaults = context.get("defaults", {})
date_from = defaults.get("date_from")
date_to = defaults.get("date_to")
if not date_from and not date_to:
date_from = date.today().strftime("%Y-%m-01")
date_to = (date.today() + relativedelta(day=31)).strftime("%Y-%m-%d")
elif not date_from and date_to:
date_from = get_model("settings").get_fiscal_year_start(date=date_to)
return {
"date_from": date_from,
"date_to": date_to,
}
def get_report_data(self, ids, context={}):
company_id = get_active_company()
comp = get_model("company").browse(company_id)
if ids:
params = self.read(ids, load_m2o=False)[0]
else:
params = self.default_get(load_m2o=False, context=context)
settings = get_model("settings").browse(1)
date_from = params.get("date_from")
date_to = params.get("date_to")
accounts = {}
ctx = {
"date_to": date_from,
"excl_date_to": True,
}
for acc in get_model("account.account").search_browse([["type", "!=", "view"]], context=ctx):
accounts[acc.id] = {
"id": acc.id,
"code": acc.code,
"name": acc.name,
"type": acc.type,
"begin_balance": acc.balance,
}
ctx = {
"date_from": date_from,
"date_to": date_to,
}
for acc in get_model("account.account").search_browse([["type", "!=", "view"]], context=ctx):
accounts[acc.id].update({
"period_balance": acc.balance,
})
ctx = {
"date_to": date_to,
}
for acc in get_model("account.account").search_browse([["type", "!=", "view"]], context=ctx):
accounts[acc.id].update({
"end_balance": acc.balance,
})
accounts = sorted(accounts.values(), key=lambda acc: acc["code"])
net_income = 0
pl_types = ["revenue", "cost_sales", "other_income", "expense", "other_expense"]
invest_types = ["fixed_asset", "noncur_asset"]
finance_types = ["equity"]
cash_types = ["cash", "cheque", "bank"]
for acc in accounts:
if acc["type"] in pl_types:
net_income -= acc["period_balance"]
lines = []
line = {
"string": "Net Income",
"amount": net_income,
}
lines.append(line)
cash_flow = net_income
line = {
"string": "Operating activities",
}
lines.append(line)
for acc in accounts:
if acc["type"] not in pl_types and acc["type"] not in cash_types and acc["type"] not in invest_types and acc["type"] not in finance_types and abs(acc["period_balance"]) > 0.001:
line = {
"string": "[%s] %s" % (acc["code"], acc["name"]),
"amount": -acc["period_balance"],
}
lines.append(line)
cash_flow -= acc["period_balance"]
line = {
"string": "Investing activities",
}
lines.append(line)
for acc in accounts:
if acc["type"] not in pl_types and acc["type"] not in cash_types and acc["type"] in invest_types and abs(acc["period_balance"]) > 0.001:
line = {
"string": "[%s] %s" % (acc["code"], acc["name"]),
"amount": -acc["period_balance"],
}
lines.append(line)
cash_flow -= acc["period_balance"]
line = {
"string": "Financing activities",
}
lines.append(line)
for acc in accounts:
if acc["type"] not in pl_types and acc["type"] not in cash_types and acc["type"] in finance_types and abs(acc["period_balance"]) > 0.001:
line = {
"string": "[%s] %s" % (acc["code"], acc["name"]),
"amount": -acc["period_balance"],
}
lines.append(line)
cash_flow -= acc["period_balance"]
cash_begin = 0
cash_end = 0
for acc in accounts:
if acc["type"] in cash_types:
cash_begin += acc["begin_balance"]
cash_end += acc["end_balance"]
line = {
"string": "Net cash flow for period",
"amount": cash_flow,
}
lines.append(line)
line = {
"string": "Cash at beginning of period",
"amount": cash_begin,
}
lines.append(line)
line = {
"string": "Cash at end of period",
"amount": cash_end,
}
lines.append(line)
data = {
"date_from": date_from,
"date_to": date_to,
"lines": lines,
"company_name": comp.name,
}
return data
ReportCashFlow.register()
| StarcoderdataPython |
8072551 | <gh_stars>1-10
from pathlib import Path
test_input = """[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]"""
# test_input = """<{([{{}}[<[[[<>{}]]]>[]]"""
def part_1(puzzle_input: str) -> float:
lines = [line for line in puzzle_input.splitlines()]
answer = 0
stack = []
for line in lines:
for paren in line:
if paren in "[{(<":
stack.append(paren)
elif stack[-1] == {">": "<",
")": "(",
"]": "[",
"}": "{",
}[paren]:
stack.pop()
else:
score = {">": 25137,
")": 3,
"]": 57,
"}": 1197,
}
answer += score[paren]
break
return answer
def part_2(puzzle_input: str) -> float:
answers = []
score = {"<": 4,
"(": 1,
"[": 2,
"{": 3,
}
close_to_open = {">": "<",
")": "(",
"]": "[",
"}": "{",
}
for line in puzzle_input.splitlines():
stack = []
for paren in line:
if paren in "[{(<":
stack.append(paren)
elif stack[-1] == close_to_open[paren]:
stack.pop()
else:
break
else:
# line is incomplete
print(line)
print(stack)
completion_score = 0
while stack:
completion_score *= 5
completion_score += score[stack.pop()]
answers.append(completion_score)
print(completion_score)
answers.sort()
answer = answers[len(answers) // 2]
return answer
def test_part_1():
assert part_1(test_input) == 26397
def test_part_2():
assert part_2(test_input) == 288957
def main():
print("🎄 Advent of code 2021 🎄")
puzzle_input = Path("input.txt").read_text()
print(f"Part 1: {part_1(puzzle_input)}")
print(f"Part 2: {part_2(puzzle_input)}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
348818 | #1
f = open('Grade2.txt', "a")
Score = open('Score1.txt',"r")
g=0
ll = Score.readline()
while ll != "":
l = ll.split(",")
#print(l)
eee = l[4][0:2]
e = int(eee)
if g <= e:
g=e
else:
g=g
ll = Score.readline()
print(ll)
f.write(str(g))
f.close()
Score.close() | StarcoderdataPython |
11257322 | friendNames = ['Joan', 'Ronan', 'Nikita', 'Augustus']
friendIndex = 0
while friendIndex < len(friendNames):
friendName = friendNames[friendIndex]
hero.say(friendName + ', go home!')
friendIndex += 1
hero.moveXY(20, 30)
hero.buildXY("fence", 30, 30)
| StarcoderdataPython |
4977279 | <filename>cnosolar/gui_config.py
###############################
# CONFIGURATION GUI #
###############################
import json
import pytz
import pvlib
import requests
import traitlets
import numpy as np
import pandas as pd
import ipywidgets as widgets
from tkinter import Tk, filedialog
from IPython.display import display
from cnosolar.pvsyst_tools import pvsyst
def execute():
'''
Graphical user interface for the configuration of the PV plant
and download of the corresponding .JSON file.
The JSON file data structure contains the following parameters:
1. latitude : float
Latitude based on the location of the PV plant in decimal degrees notation.
2. longitude : float
Longitude based on the location of the PV plant in decimal degrees notation.
3. tz : string
Time zone of the location of the PV plant.
4. altitude : float
Altitude based on the location of the PV plant from sea level in [m].
5. surface_type : string
Surface type to determine the albedo. Optional if albedo is not known.
6. surface_albedo : float
Albedo.
7. inverters_database : string
Repository of inverters arranged by PVlib. Valid options are:
CECInverter, SandiaInverter or ADRInverter. If the configuration
method is PVsyst or Manual, the value is set to null.
8. inverter_name : string
Inverter name following the according repository format. If the
configuration method is PVsyst or Manual, the value is set to null.
9. inverter : dict
Set of technical parameters that defines the inverter.
- Main Parameters of SNL PVlib Method
1. Paco: Inverter rated AC power in W.
2. Pdco: Inverter rated DC power in W.
3. Vdco: DC voltage at which the nominal AC Power is reached
with the DC Power input in V.
4. Pso: DC power required to start the inversion process in W.
5. C0: Parameter that defines the curvature of the relationship
between AC Power and DC Power in STC condition in 1/W.
6. C1: Empirical coefficient that allows the Nominal DC Power
to vary linearly with the DC Voltage in 1/V.
7. C2: Empirical coefficient that allows the DC Starting Power
to vary linearly with the DC Voltage in 1/V.
8. C3: Empirical coefficient that allows $C_0$ to vary linearly
with the DC Voltage by 1/V.
9. Pnt: AC power consumed by the inverter during the night in W.
- Main Parameters of NREL PVWatts Method
1. Pdco: Inverter rated DC power in W.
2. eta_inv_nom: Dimensionless nominal efficiency of the inverter.
10. ac_model : string
Inverter modeling method to be used. Valid options are: sandia or
pvwatts.
11. modules_database : string
Repository of PV modules arranged by PVlib. Valid options are: pvmodule
or cecmodul). If the configuration method is PVFree, PVsyst or Manual,
the value is set to null.
12. module_name : string
PV module name following the according repository format. If the configuration
method is PVFree, PVsyst or Manual, the value is set to null.
13. module : dict
Set of technical parameters that defines the PV module.
- Main Parameters
1. T_NOCT: Nominal operating cell temperature in ºC.
2. Technology: PV cell technology. Valid options are: monosi, multisi,
cigs, cdte, asi or None.
3. N_s: Number of PV cells in series.
4. I_sc_ref: Short circuit current under STC conditions in A.
5. V_oc_ref: Open circuit voltage under STC conditions in V.
6. I_mp_ref: Current at the point of maximum power under STC
conditions in A.
7. V_mp_ref: Voltage at the point of maximum power under STC
conditions in V.
8. alpha_sc: Temperature coefficient of the short-circuit
current in A/ºC.
9. beta_oc: Open circuit voltage temperature coefficient in V/ºC.
10. gamma_r: Temperature coefficient of the power at the maximum
point in %/ºC.
11. STC: Nominal power of the PV module under STC conditions in W.
14. bifacial : bool
Defines if the PV module is bifacial or not.
15. bifaciality : float
Fraction between the efficiency of the front and rear side
of the PV module, measured under STC conditions.
16. row_height : float
Height of the rows of photovoltaic panels measured at their
center in units of meters.
17. row_width : float
Width of the rows of photovoltaic panels in the 2D plane considered
in units of meters (e.g., 1P, 2P, 4L).
18. with_tracker : bool
Parameter that checks if the mounting of the array is either on
fixed-tilt racking or horizontal single axis tracker.
19. surface_azimuth : float or list
Azimuth angle of the module surface. North = 0, East = 90,
South = 180 and West = 270. If with_tracker = true, the value is
set to null.
20. surface_tilt : float or list
Surface tilt angles. The tilt angle is defined as degrees from
horizontal (e.g. surface facing up = 0, surface facing
horizon = 90). If with_tracker = true, the value is set to null.
21. axis_tilt : float
Tilt of the axis of rotation with respect to horizontal (e.g. a value of
0º indicates that the support axis of the photovoltaic panels is horizontal)
in [degrees]. If with_tracker = false, the value is set to null.
22. axis_azimuth : float
Perpendicular angle to the axis of rotation by right hand rule (e.g., a
value of 180º indicates a rotation from east to west) in [degrees]. If
with_tracker = false, the value is set to null.
23. max_angle : float
Maximum angle of rotation of the tracker from its horizontal position (e.g., a
value of 90º allows the tracker to rotate to and from a vertical position where
the panel faces the horizon) in [degrees]. If with_tracker = false, the value
is set to null.
24. module_type : string
PV module mounting and front and back insolation sheets materials. Valid options
are: open_rack_glass_glass, close_mount_glass_glass or insulated_back_glass_polymer.
25. racking_model : string, optional
Racking of the PV modules. Valid strings are 'open_rack', 'close_mount',
and 'insulated_back'. Used to identify a parameter set for the SAPM cell
temperature model.
26. num_arrays : int
Set of arrangements connected to an inverter. Each subarray consists of modules
in series per string, strings in parallel, and the number of inputs to the inverter
(either full inputs per inverter or number of MPPT inputs).
27. modules_per_string : int or list
Number of modules in series per string in each subarray.
28. strings_per_inverter : int or list
Number of strings in parallel in each subarray.
29. num_inverter : int
Number of inverters with electrical configuration exactly equal to the one defined.
It allows to scale theproduction calculations.
30. loss : float
Overall DC system losses in percentage.
Default = 14.6
31. kpc : float
Transmission losses up to the common coupling point of the inverters.
Default = 0.0
32. kt : float
Losses associated with the transformation (voltage rise).
Default = 0.0
33. kin : float
Interconnection losses, transmission up to the trade border.
Default = 0.0
34. name : string
Suffix to the name of the configuration file (system_config_suffix).
Default = 'system_config'
'''
###############################
# DOCUMENTATION TAB #
###############################
gui_layout = widgets.Layout(display='flex',
flex_flow='row',
justify_content='space-between')
doc_location = widgets.HTML('''
<h5>Información Geográfica</h5>
<ul>
<li> <b>Latitud:</b> Utilice la notación de grados decimales.</li>
<li> <b>Longitud:</b> Utilice la notación de grados decimales.</li>
<li> <b>Altitud:</b> Altitud desde el nivel del mar en metros (m.s.n.m).</li>
<li> <b>Huso Horario:</b> Con referencia a UTC. Por defecto: América/Bogotá (UTC-5).</li>
<li> <b>Superficie:</b> Tipo de superficie para determinar el albedo. <span style='color:red'>Opcional si desconoce el albedo</span>.</li>
<li> <b>Albedo:</b> Utilice un valor porcentual en escala entre 0 y 1.</li>
</ul>''', layout=widgets.Layout(height='auto'))
doc_inverter = widgets.HTMLMath('''
<h5>Método de Configuración: Repositorio</h5>
<ul>
<li> <b>Repositorio:</b> Repositorio de inversores dispuestos por PVlib.</li>
<li> <b>Fabricantes:</b> Lista de fabricantes del repositorio seleccionado.</li>
<li> <b>Inversores:</b> Lista de equipos disponibles en el repositorio según el fabricante seleccionado.</li>
</ul>
<h5>Método de Configuración: PVsyst</h5>
<ul>
<li> Seleccione el archivo del inversor generado por PVsyst (extensión .OND) y dé clic en 'Cargar OND'.</li>
</ul>
<h5>Método de Configuración: Manual</h5>
<ul>
<li> <b>SNL PVlib</b>
<ul class='square'>
<li> <b>$P_{AC}$ Nominal:</b> Potencia AC nominal del inversor en W.</li>
<li> <b>$P_{DC}$ Nominal:</b> Potencia DC nominal del inversor en W.</li>
<li> <b>$V_{DC}$ Nominal:</b> Voltaje DC al que se alcanza la Potencia AC nominal con la entrada de Potencia DC en V.</li>
<li> <b>$P_{DC}$ de Arraque:</b> Potencia DC necesaria para iniciar el proceso de inversión en W.</li>
<li> <b>$C_0$:</b> Parámetro que define la curvatura de la relación entre la Potencia AC y Potencia DC en condición STC en 1/W.</li>
<li> <b>$C_1$:</b> Coeficiente empírico que permite que la Potencia DC Nominal varíe linealmente con el Voltaje DC en 1/V.</li>
<li> <b>$C_2$:</b> Coeficiente empírico que permite que la Potencia DC de Arranque varíe linealmente con el Voltaje DC en 1/V.</li>
<li> <b>$C_3$:</b> Coeficiente empírico que permite que $C_0$ varíe linealmente con el Voltaje DC en 1/V.</li>
<li> <b>$P_{AC}$ Consumo Nocturno:</b> Potencia AC consumida por el inversor durante la noche en W.</li>
</ul>
</li>
<li> <b>NREL PVWatts</b>
<ul class='square'>
<li> <b>$P_{DC}$ Nominal:</b> Potencia DC nominal del inversor en W.</li>
<li> <b>Eficiencia Nominal:</b> Eficiencia nominal del inversor en magnitud adimensional.</li>
</ul>
</li>
</ul>
''', layout=widgets.Layout(height='auto'))
doc_module = widgets.HTMLMath('''
<h5>Método de Configuración: Repositorio</h5>
<ul>
<li> <b>Repositorio:</b> Repositorio de módulos fotovoltaicos dispuestos por PVlib (CEC y Sandia).</li>
<li> <b>PVFree</b>
<ul class='square'>
<li> <b>Base de Datos:</b> Repositorio de módulos fotovoltaicos dispuestos en PVFree.</li>
<li> <b>ID:</b> Número de identificación del módulo indicado en PVFree.</li>
</ul>
</li>
<li> <b>CEC y Sandia</b>
<ul class='square'>
<li> <b>Fabricantes:</b> Lista de fabricantes del repositorio seleccionado.</li>
<li> <b>Módulos:</b> Lista de equipos disponibles en el repositorio según el fabricante seleccionado.</li>
</ul>
</li>
</ul>
<h5>Método de Configuración: PVsyst</h5>
<ul>
<li> Seleccione el archivo del módulo fotovoltaico generado por PVsyst (extensión .PAN) y dé clic en 'Cargar PAN'.</li>
</ul>
<h5>Método de Configuración: Manual</h5>
<ul>
<li> <b>$T_{NOCT}$:</b> Temperatura nominal de funcionamiento de la celda en ºC. </li>
<li> <b>Tecnología:</b> Tecnología de la celda fotovoltaica. </li>
<li> <b>Número Celdas:</b> Número de celdas fotovoltaicas en serie. </li>
<li> <b>$I_{SC}$ en STC:</b> Corriente de corto circuito en condiciones STC en A. </li>
<li> <b>$V_{OC}$ en STC:</b> Voltaje de circuito abierto en condiciones STC en V. </li>
<li> <b>$I_{MP}$ en STC:</b> Corriente en el punto de máxima potencia en condiciones STC en A. </li>
<li> <b>$V_{MP}$ en STC:</b> Voltaje en el punto de máxima potencia en condiciones STC en V.</b> </li>
<li> <b>Coef. Temp. $I_{SC}$:</b> Coeficiente de temperatura de la corriente de cortocircuito en A/ºC. </li>
<li> <b>Coef. Temp. $V_{OC}$:</b> Coeficiente de temperatura de voltaje de circuito abierto en V/ºC. </li>
<li> <b>Coef. Temp. $P_{MP}$:</b> Coeficiente de temperatura de la potencia en el punto máximo en %/ºC. </li>
<li> <b>$P_{Nominal}$ en STC:</b> Potencia nominal del módulo fotovoltaico en condiciones STC en W.</li>
</ul>
<h5>Parámetros Bifacialidad</h5>
<ul>
<li> <b>Panel Bifacial:</b> Si el panel fotovoltaico es bifacial o no. </li>
<li> <b>Bifacialidad:</b> Relación entre la eficiencia del lado frontal y posterior del módulo fotovoltaico, medida en condiciones STC. Utilice un valor porcentual en escala entre 0 y 1. </li>
<li> <b>Alto Fila Paneles:</b> Altura de las filas de paneles fotovoltaicos medida en su centro en unidades de metros. </li>
<li> <b>Ancho Fila Paneles:</b> Ancho de las filas de paneles fotovoltaicos en el plano 2D considerado en unidades de metros (e.g., 1P, 2P, 4L). </li>
</ul>
''', layout=widgets.Layout(height='auto'))
doc_sysdesign = widgets.HTMLMath('''
<h5>Subarrays</h5>
<ul>
<li> <b>Cantidad Subarrays:</b> Conjunto de arreglos conectados a un inversor. Cada subarray se compone de módulos en serie por string, strings en paralelo y el número de entradas al inversor (ya sea entradas completas por inversor o número de entradas MPPT).</li>
</ul>
<h5>Configuración Eléctrica</h5>
<ul>
<li> <b>Módulos por String:</b> Cantidad de módulos en serie por string en cada subarray. Para múltiples subarrays, separe los valores con una coma de manera ordenada.</li>
<li> <b>Strings por Inversor:</b> Cantidad de strings en paralelo en cada subarray. Para múltiples subarrays, separe los valores con una coma de manera ordenada.</li>
<li> <b>Número de Inversores:</b> Cantidad de inversores con configuración eléctrica exactamente igual a la definida. Permite escalar los cálculos de producción.</li>
</ul>
<h5>Seguidores y Orientación</h5>
<ul>
<li> <b>Sin Seguidor</b>
<ul class='square'>
<li> <b>Azimutal:</b> Ángulo azimutal en grados decimales (Norte = 0, Sur = 180, Este = 90, Oeste = 270). Para múltiples subarrays, separe los valores con una coma de manera ordenada (también aplica si el azimutal es el mismo).</li>
<li> <b>Elevación:</b> Ángulos de inclinación desde la horizontal en grados decimales. Para múltiples subarrays, separe los valores con una coma de manera ordenada (también aplica si la elevación es la misma).</li>
<li> <b>Racking:</b> Tipo de ventilación del montaje. Se utiliza para identificar un conjunto de parámetros para el modelo de temperatura de la celda.</li>
</ul>
</li>
<li> <b>Seguidor 1-Eje</b><br>
El ángulo de rotación se determina en un sistema de coordenadas diestro. El seguidor define el eje-y positivo, el eje-x positivo está a 90º en sentido horario desde el eje-y y es paralelo a la superficie, y el eje-z positivo es normal a ambos ejes (-x y -y), y está orientado hacia el cielo. El ángulo de rotación es una rotación hacia la derecha alrededor del eje-y en el sistema de coordenadas e indica la posición del seguidor en relación con la horizontal. Por ejemplo, si Azimutal Eje es 180º (orientado al sur) y Elevación Eje es 0º, entonces un ángulo del seguidor de 0º es horizontal, de 30º es una rotación hacia el oeste, y -90º es una rotación al plano vertical hacia el este.
<ul class='square'>
<li> <b>Elevación Eje:</b> Elevación del eje de rotación con respecto a la horizontal en grados decimales (e.g., un valor de 0º indica que el eje de soporte de los paneles fotovoltaicos está horizontal). Para múltiples subarrays, separe los valores con una coma de manera ordenada (también aplica si la elevación del eje es la misma).</li>
<li> <b>Azimutal Eje:</b> Ángulo perpendicular por regla de la mano derecha al eje de rotación en grados decimales (e.g., un valor de 180º --i.e., dirección sur-- indica una rotación de este a oeste). Para múltiples subarrays, separe los valores con una coma de manera ordenada (también aplica si el azimutal del eje es el mismo).</li>
<li> <b>Ángulo Máximo:</b> Ángulo de rotación máximo del seguidor desde su posición horizontal en grados decimales (e.g., un valor de 90º permite que el seguidor gire desde y hasta una posición vertical en la que el panel mira hacia el horizonte). Para múltiples subarrays, separe los valores con una coma de manera ordenada (también aplica si el ángulo máximo es el mismo).</li>
<li> <b>Racking:</b> Tipo de ventilación del montaje. Se utiliza para identificar un conjunto de parámetros para el modelo de temperatura de la celda.</li>
</ul>
</li>
</ul>
<h5>Parámetros Globales</h5>
<ul>
<li> <b>Pérdidas DC:</b> Porcentaje de pérdidas globales DC del sistema. Por defecto: 14.6%.</li>
<li> <b>$k_{pc}$:</b> Pérdidas de transmisión hasta el punto común de acople de los inversores. Por defecto: 0%.</li>
<li> <b>$k_{t}$:</b> Pérdidas asociadas a la transformación (elevación de tensión). Por defecto: 0%.</li>
<li> <b>$k_{in}$:</b> Pérdidas de interconexión, transmisión hasta la frontera comercial. Por defecto: 0%.</li>
<li> <b>Nombre Planta:</b> Sufijo al nombre del archivo de configuración (system_config_<i>sufijo</i>). Por defecto: system_config.</li>
</ul>
<h5>Archivo Configuración</h5>
<ul>
<li> <b>Generar Configuración:</b> Dé clic en este botón para que el algoritmo genere internamente el archivo de configuración con los parámetros previamente asignados. El ícono y la descripción del botón cambiarán para notificar la ejecución de la configuración.</li>
<li> <b>Descargar Configuración:</b> Dé clic en este botón para descargar el archivo de configuración genererado con el botón 'Generar Configuración' (una vez este haya notificado su ejecución). Se descargará un archivo .JSON que se alojarán en la carpeta <i>cno_solar/configurations/<span style='color:blue'>system_config.json</span></i>. El ícono y la descripción del botón cambiarán para notificar la descarga del archivo.</li>
</ul>
''', layout=widgets.Layout(height='auto'))
ac_documentation = widgets.Accordion(children=[doc_location, doc_inverter, doc_module, doc_sysdesign])
ac_documentation.set_title(0, 'Tab Ubicación')
ac_documentation.set_title(1, 'Tab Inversor')
ac_documentation.set_title(2, 'Tab Módulo')
ac_documentation.set_title(3, 'Tab Diseño Planta')
tab_doc = widgets.Box([widgets.HTML('<h4>Documentación</h4>', layout=widgets.Layout(height='auto')),
widgets.VBox([widgets.Box([ac_documentation], layout=gui_layout)])],
layout=widgets.Layout(display='flex',
flex_flow='column',
border='solid 0px',
align_items='stretch',
width='100%'))
###############################
# LOCATION TAB #
###############################
surfaces = {'': None,
'Urbano': 'urban',
'Césped': 'grass',
'Césped Fresco': 'fresh grass',
'Tierra': 'soil',
'Arena': 'sand',
'Nieve': 'snow',
'Nieve Fresca': 'fresh snow',
'Asfalto': 'asphalt',
'Hormigón': 'concrete',
'Aluminio': 'aluminum',
'Cobre': 'copper',
'Acero': 'fresh steel',
'Acero Sucio': 'dirty steel',
'Mar': 'sea'}
gui_layout = widgets.Layout(display='flex',
flex_flow='row',
justify_content='space-between')
w_latitude = widgets.FloatText(value=0,
step=0.001,
description='',
disabled=False,
style={'description_width': 'initial'})
w_longitude = widgets.FloatText(value=0,
step=0.01,
description='',
disabled=False,
style={'description_width': 'initial'})
w_altitude = widgets.FloatText(value=0,
step=1,
description='',
disabled=False,
style={'description_width': 'initial'})
w_timezone = widgets.Dropdown(options=pytz.all_timezones,
value='America/Bogota',
description='',
style={'description_width': 'initial'})
w_surface = widgets.Dropdown(options=surfaces,
value=None,
description='',
style={'description_width': 'initial'})
w_albedo = widgets.BoundedFloatText(value=None,
step=0.01,
min=0,
max=1,
description='',
disabled=False,
style={'description_width': 'initial'})
def handle_surface_change(change):
if change.new != None:
w_albedo.value = pvlib.irradiance.SURFACE_ALBEDOS[change.new]
w_surface.observe(handle_surface_change, names='value')
widget_location = [widgets.Box([widgets.HTML('<h4>Información Geográfica</h4>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('Latitud'), w_latitude], layout=gui_layout),
widgets.Box([widgets.Label('Longitud'), w_longitude], layout=gui_layout),
widgets.Box([widgets.Label('Altitud [m.s.n.m]'), w_altitude], layout=gui_layout),
widgets.Box([widgets.Label('Huso Horario'), w_timezone], layout=gui_layout),
widgets.Box([widgets.Label('Superficie'), w_surface], layout=gui_layout),
widgets.Box([widgets.Label('Albedo [%]'), w_albedo], layout=gui_layout)]
tab_location = widgets.Box(widget_location, layout=widgets.Layout(display='flex',
flex_flow='column',
border='solid 0px',
align_items='stretch',
width='50%'))
###############################
# INVERTER TAB #
###############################
inv_repo = {'': None,
'CEC': 'CECInverter',
'Sandia': 'SandiaInverter',
'<NAME>': 'ADRInverter'}
gui_layout = widgets.Layout(display='flex',
flex_flow='row',
justify_content='space-between')
inverter_btn = widgets.ToggleButtons(value=None,
options=['Repositorio', 'PVsyst', 'Manual'],
description='',
disabled=False,
button_style='',
tooltips=['Base de datos de PVlib',
'Importar desde PVsyst',
'Configuración manual'])
# REPOSITORY
# Repository Widgets
inverter_vbox = widgets.VBox([inverter_btn])
dropdown_invrepo = widgets.Dropdown(options=inv_repo,
value=None,
description='',
style={'description_width': 'initial'})
dropdown_manufac = widgets.Dropdown(options='',
value=None,
disabled=True,
description='',
style={'description_width': 'initial'})
w_dropinvrepo = widgets.VBox([widgets.Box([widgets.Label('Repositorio'), dropdown_invrepo], layout=gui_layout)])
w_dropmanufac = widgets.VBox([widgets.Box([widgets.Label('Fabricantes'), dropdown_manufac], layout=gui_layout)])
# PVsyst Widgets
class SelectFilesButton(widgets.Button):
'''A file widget that leverages tkinter.filedialog'''
def __init__(self):
super(SelectFilesButton, self).__init__()
# Add the selected_files trait
self.add_traits(files=traitlets.traitlets.Any()) # List()
# Create the button
self.description = 'Seleccionar'
self.icon = 'square-o'
self.layout = widgets.Layout(width='34%', height='auto')
# Set on click behavior
self.on_click(self.select_files)
@staticmethod
def select_files(b):
'''Generate instance of tkinter.filedialog '''
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected fileswill be set to b.value
b.files = filedialog.askopenfilename(filetypes=(('OND Files', '.OND'),),
multiple=False,
title='Select OND Data File')
b.description = 'Seleccionado'
b.icon = 'check-square-o'
upload_btn = SelectFilesButton()
btn = widgets.Button(value=False,
description='Cargar OND',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Cargar los archivos .OND',
icon='circle',
layout=widgets.Layout(width='34%', height='auto'))
btn.add_traits(files=traitlets.traitlets.Dict())
w_upload = widgets.VBox([widgets.Box([widgets.HTML('<h5> </h5>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('Archivo Inversor (.OND)'), upload_btn, btn], layout=gui_layout)])
# Manual Widgets
dropdown_manual = widgets.Dropdown(options=['', 'SNL PVlib', 'NREL PVWatts'],
value=None,
description='')
w_dropmanual = widgets.VBox([widgets.Box([widgets.Label('Formato de Configuración'), dropdown_manual], layout=gui_layout)])
def handle_toggle(change):
if change['new'] == 'Repositorio':
inverter_vbox.children = [inverter_btn, w_dropinvrepo, w_dropmanufac]
elif change['new'] == 'PVsyst':
inverter_vbox.children = [inverter_btn, w_upload]
elif change['new'] == 'Manual':
inverter_vbox.children = [inverter_btn, w_dropmanual]
def handle_dropdown_manuf(change):
inverters = pvlib.pvsystem.retrieve_sam(change['new'])
manufacturers = []
manufacturers.append('')
for string in inverters.transpose().index:
manufacturers.append(string[:string.index('__')])
manufacturers.append(change['new'])
dropdown_manufac.options = list(pd.unique(manufacturers))
dropdown_manufac.disabled = False
inverter_vbox.children = [inverter_btn, w_dropinvrepo, w_dropmanufac]
def handle_dropdown_repo(change):
inverters = pvlib.pvsystem.retrieve_sam(dropdown_manufac.options[-1])
matching = [s for s in inverters.transpose().index if change['new'] in s]
inv_options = list(inverters[matching].transpose().index)
inv_options.insert(0, '')
inv_drop = widgets.Dropdown(options=inv_options,
value=None,
description='',
style={'description_width': 'initial'})
w_dropinv = widgets.VBox([widgets.Box([widgets.Label('Inversores'), inv_drop], layout=gui_layout)])
inverter_vbox.children = [inverter_btn, w_dropinvrepo, w_dropmanufac, w_dropinv]
# PVSYST
def on_button_clicked(obj):
btn.description = 'OND Cargado'
btn.icon = 'check-circle'
with output:
output.clear_output()
ond = pvsyst.ond_to_inverter_param(path=upload_btn.files)
inverter = {'Vac': float(ond['pvGInverter']['TConverter']['VOutConv']), # Voltaje de red (Parámetros principales)
'Pso': float(ond['pvGInverter']['TConverter']['PLim1']), # Pthresh
'Paco': float(ond['pvGInverter']['TConverter']['PNomConv'])*1000, # Potencia CA máxima
'Pdco': float(ond['pvGInverter']['TConverter']['PNomDC'])*1000, # Potencia FV nominal
'pdc0': float(ond['pvGInverter']['TConverter']['PNomDC'])*1000,
'Vdco': float(ond['pvGInverter']['TConverter']['VNomEff'].split(',')[1]), # Voltaje medio
'Pnt': float(ond['pvGInverter']['Night_Loss']), # Night Loss
'Vdcmax': float(ond['pvGInverter']['TConverter']['VAbsMax']), # Alto voltaje -- Voltaje de entrada (Curva de eficiencia)
'Idcmax': float(ond['pvGInverter']['TConverter']['IMaxDC']),
'Mppt_low': float(ond['pvGInverter']['TConverter']['VMppMin']), # Vmín@Pnom
'Mppt_high': float(ond['pvGInverter']['TConverter']['VMPPMax']), # Alto Voltaje
'eta_inv_nom': float(ond['pvGInverter']['TConverter']['EfficEuro']),
'eta_inv_ref': 0.9637,
'Name': ond['pvGInverter']['pvCommercial']['Model']}
btn.files = {'inv': inverter}
# MANUAL
def handle_dropdown_manual(change):
if change['new'] == 'SNL PVlib':
w_Paco = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_Pdco = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_Vdco = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_Pso = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_C0 = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_C1 = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_C2 = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_C3 = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_Pnt = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
inv_conf = widgets.VBox([widgets.Box([widgets.HTML('<h5>Configuración SNL PVlib</h5>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('$P_{AC}$ Nominal [W]'), w_Paco], layout=gui_layout),
widgets.Box([widgets.Label('$P_{DC}$ Nominal [W]'), w_Pdco], layout=gui_layout),
widgets.Box([widgets.Label('$V_{DC}$ Nominal [V]'), w_Vdco], layout=gui_layout),
widgets.Box([widgets.Label('$P_{DC}$ de Arraque [W]'), w_Pso], layout=gui_layout),
widgets.Box([widgets.Label('$C_0$ [1/W]'), w_C0], layout=gui_layout),
widgets.Box([widgets.Label('$C_1$ [1/V]'), w_C1], layout=gui_layout),
widgets.Box([widgets.Label('$C_2$ [1/V]'), w_C2], layout=gui_layout),
widgets.Box([widgets.Label('$C_3$ [1/V]'), w_C3], layout=gui_layout),
widgets.Box([widgets.Label('$P_{AC}$ Consumo Nocturno [W]'), w_Pnt], layout=gui_layout)])
inverter_vbox.children = [inverter_btn, w_dropmanual, inv_conf]
else:
w_pdc0 = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_eta_inv_nom = widgets.BoundedFloatText(value=None, min=0, max=1, step=0.01, description='', style={'description_width': 'initial'})
inv_conf = widgets.VBox([widgets.Box([widgets.HTML('<h5>Configuración NREL PVWatts</h5>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('$P_{DC}$ Nominal [W]'), w_pdc0], layout=gui_layout),
widgets.Box([widgets.Label('Eficiencia Nominal [ad.]'), w_eta_inv_nom], layout=gui_layout)])
inverter_vbox.children = [inverter_btn, w_dropmanual, inv_conf]
# OBSERVE
inverter_btn.observe(handle_toggle, 'value')
dropdown_invrepo.observe(handle_dropdown_manuf, 'value')
dropdown_manufac.observe(handle_dropdown_repo, 'value')
btn.on_click(on_button_clicked)
dropdown_manual.observe(handle_dropdown_manual, 'value')
# TAB
tab_inverter = widgets.Box([widgets.HTML("<h4>Método de Configuración</h4>", layout=widgets.Layout(height='auto')),
inverter_vbox],
layout=widgets.Layout(display='flex',
flex_flow='column',
border='solid 0px',
align_items='stretch',
width='50%'))
###############################
# MODULE TAB #
###############################
mod_repo = {'': None,
'PVFree': 'PVFree',
'CEC': 'CECMod',
'Sandia': 'SandiaMod'}
gui_layout = widgets.Layout(display='flex',
flex_flow='row',
justify_content='space-between')
module_btn = widgets.ToggleButtons(value=None,
options=['Repositorio', 'PVsyst', 'Manual'],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Base de datos de PVlib',
'Importar desde PVsyst',
'Configuración manual'])
# REPOSITORY
# Repository Widgets
module_vbox = widgets.VBox([module_btn])
dropdown_modrepo = widgets.Dropdown(options=mod_repo,
value=None,
description='',
style={'description_width': 'initial'})
dropdown_modmanu = widgets.Dropdown(options='',
value=None,
disabled=True,
description='',
style={'description_width': 'initial'})
w_dropmodrepo = widgets.VBox([widgets.Box([widgets.Label('Repositorio'), dropdown_modrepo], layout=gui_layout)])
w_dropmodmanu = widgets.VBox([widgets.Box([widgets.Label('Fabricantes'), dropdown_modmanu], layout=gui_layout)])
# PVsyst Widgets
class SelectPANButton(widgets.Button):
'''A file widget that leverages tkinter.filedialog'''
def __init__(self):
super(SelectPANButton, self).__init__()
# Add the selected_files trait
self.add_traits(files=traitlets.traitlets.Any()) # List()
# Create the button
self.description = 'Seleccionar'
self.icon = 'square-o'
self.layout = widgets.Layout(width='34%', height='auto')
# Set on click behavior
self.on_click(self.select_files)
@staticmethod
def select_files(b):
'''Generate instance of tkinter.filedialog '''
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected fileswill be set to b.value
b.files = filedialog.askopenfilename(filetypes=(('PAN Files', '.PAN'),),
multiple=False,
title='Select PAN Data File')
b.description = 'Seleccionado'
b.icon = 'check-square-o'
upload_modbtn = SelectPANButton()
modbtn = widgets.Button(value=False,
description='Cargar PAN',
disabled=False,
button_style='',
tooltip='Cargar los archivos .PAN',
icon='circle',
layout=widgets.Layout(width='34%', height='auto'))
modbtn.add_traits(files=traitlets.traitlets.Dict())
modbtn_output = widgets.Output()
w_modupload = widgets.VBox([widgets.Box([widgets.HTML('<h5> </h5>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('Archivo Módulo (.PAN)'), upload_modbtn, modbtn], layout=gui_layout)])
# Manual Widgets
dropdown_modmanual = widgets.Dropdown(options=['', 'SNL PVlib', 'NREL PVWatts'],
value=None,
description='Método',
style={'description_width': 'initial'})
# BIFACIAL PARAMETERS
dropdown_bifacial = widgets.Dropdown(options=[('Sí', True), ('No', False)],
value=False,
description='',
style={'description_width': 'initial'})
w_dropbrifacial = widgets.VBox([widgets.Box([widgets.Label('Panel Bifacial'), dropdown_bifacial], layout=gui_layout)])
bifacial_vbox = widgets.VBox([w_dropbrifacial])
def handle_modtoggle(change):
if change['new'] == 'Repositorio':
module_vbox.children = [module_btn, w_dropmodrepo]
elif change['new'] == 'PVsyst':
module_vbox.children = [module_btn, w_modupload]
elif change['new'] == 'Manual':
w_T_NOCT = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_Type = widgets.Dropdown(options=[('Mono-Si', 'monoSi'), ('Multi-Si', 'multiSi'), ('Poli-Si', 'polySi'), ('CIS', 'cis'), ('CIGS', 'cigs'), ('CdTe', 'cdte'), ('Amorfo', 'amorphous')], value=None, description='', style={'description_width': 'initial'})
w_N_s = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_I_sc_ref = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_V_oc_ref = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_I_mp_ref = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_V_mp_ref = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_alpha_sc = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_beta_oc = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_gamma_r = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_STC = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
mod_conf = widgets.VBox([widgets.Box([widgets.HTML('<h5>Configuración Módulo</h5>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('$T_{NOCT}$ [ºC]'), w_T_NOCT], layout=gui_layout),
widgets.Box([widgets.Label('Tecnología'), w_Type], layout=gui_layout),
widgets.Box([widgets.Label('Número Celdas'), w_N_s], layout=gui_layout),
widgets.Box([widgets.Label('$I_{SC}$ en STC [A]'), w_I_sc_ref], layout=gui_layout),
widgets.Box([widgets.Label('$V_{OC}$ en STC [V]'), w_V_oc_ref], layout=gui_layout),
widgets.Box([widgets.Label('$I_{MP}$ en STC [A]'), w_I_mp_ref], layout=gui_layout),
widgets.Box([widgets.Label('$V_{MP}$ en STC [A]'), w_V_mp_ref], layout=gui_layout),
widgets.Box([widgets.Label('Coef. Temp. $I_{SC}$ [A/ºC]'), w_alpha_sc], layout=gui_layout),
widgets.Box([widgets.Label('Coef. Temp. $V_{OC}$ [V/ºC]'), w_beta_oc], layout=gui_layout),
widgets.Box([widgets.Label('Coef. Temp. $P_{MP}$ [%/ºC]'), w_gamma_r], layout=gui_layout),
widgets.Box([widgets.Label('$P_{Nominal}$ en STC [W]'), w_STC], layout=gui_layout)])
module_vbox.children = [module_btn, mod_conf]
def handle_dropdown_modmanuf(change):
if change['new'] == 'PVFree':
dropdown_pvfree = widgets.Dropdown(options=['', 'pvmodule', 'cecmodule'],
value=None,
description='',
style={'description_width': 'initial'})
pvfree_id = widgets.VBox([widgets.IntText(value=None, description='', style={'description_width': 'initial'})])
w_droppvfree = widgets.VBox([widgets.Box([widgets.Label('Base de Datos'), dropdown_pvfree], layout=gui_layout)])
w_modconf = widgets.VBox([widgets.Box([widgets.Label('ID'), pvfree_id], layout=gui_layout)])
module_vbox.children = [module_btn, w_dropmodrepo, w_droppvfree, w_modconf]
else:
modules = pvlib.pvsystem.retrieve_sam(change['new'])
manufacturers = []
manufacturers.append('')
for string in modules.transpose().index:
manufacturers.append(string[:string.index('_')])
manufacturers.append(change['new'])
dropdown_modmanu.options = list(pd.unique(manufacturers))
dropdown_modmanu.disabled = False
module_vbox.children = [module_btn, w_dropmodrepo, w_dropmodmanu]
def handle_dropdown_modrepo(change):
modules = pvlib.pvsystem.retrieve_sam(dropdown_modmanu.options[-1])
matching = [s for s in modules.transpose().index if change['new'] in s]
mod_options = list(modules[matching].transpose().index)
mod_options.insert(0, '')
mod_drop = widgets.Dropdown(options=mod_options,
value=None,
description='',
style={'description_width': 'initial'})
w_dropmod = widgets.VBox([widgets.Box([widgets.Label('Módulos'), mod_drop], layout=gui_layout)])
module_vbox.children = [module_btn, w_dropmodrepo, w_dropmodmanu, w_dropmod]
# PVSYST
def on_modbutton_clicked(obj):
modbtn.description = 'PAN Cargado'
modbtn.icon = 'check-circle'
with modbtn_output:
modbtn_output.clear_output()
module = pvsyst.pan_to_module_param(path=upload_modbtn.files)
module['Adjust'] = 0
module['Technology'] = module['Technol']
module['T_NOCT'] = module['TRef'] + 20
module['IAM'] = module['IAM'].tolist()
modbtn.files = {'mod': module}
# BIFACIAL
def handle_dropdown_bifacial(change):
if change['new'] == True:
w_bifaciality = widgets.BoundedFloatText(value=None, min=0, max=1, step=0.1, description='', style={'description_width': 'initial'})
w_rowheight = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
w_rowwidth = widgets.FloatText(value=None, description='', style={'description_width': 'initial'})
bif_conf = widgets.VBox([widgets.Box([widgets.Label('Bifacialidad [%]'), w_bifaciality], layout=gui_layout),
widgets.Box([widgets.Label('Alto Fila Paneles [m]'), w_rowheight], layout=gui_layout),
widgets.Box([widgets.Label('Ancho Fila Paneles [m]'), w_rowwidth], layout=gui_layout)])
bifacial_vbox.children = [w_dropbrifacial, bif_conf]
else:
bifacial_vbox.children = [w_dropbrifacial]
# OBSERVE
module_btn.observe(handle_modtoggle, 'value')
dropdown_modrepo.observe(handle_dropdown_modmanuf, 'value')
dropdown_modmanu.observe(handle_dropdown_modrepo, 'value')
modbtn.on_click(on_modbutton_clicked)
dropdown_bifacial.observe(handle_dropdown_bifacial, 'value')
# TAB
tab_module = widgets.Box([widgets.HTML('<h4>Método de Configuración</h4>', layout=widgets.Layout(height='auto')),
module_vbox,
widgets.HTML('<h4>Parámetros Bifacialidad</h4>', layout=widgets.Layout(height='auto')),
bifacial_vbox],
layout=widgets.Layout(display='flex',
flex_flow='column',
border='solid 0px',
align_items='stretch',
width='50%'))
###############################
# SYSTEM CONFIGURATION TAB #
###############################
# SUBARRAYS
w_subarrays = widgets.IntText(value=0, description='', style={'description_width': 'initial'})
conf_subarrays = widgets.VBox([widgets.Box([widgets.HTML('<h4>Subarrays</h4>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('Cantidad Subarrays'), w_subarrays], layout=gui_layout)])
# ELECTRICAL CONFIGURATION
w_mps = widgets.Text(value=None, description='', style={'description_width': 'initial'})
w_spi = widgets.Text(value=None, description='', style={'description_width': 'initial'})
w_numinv = widgets.IntText(value=1, description='', style={'description_width': 'initial'})
def handle_mppt(change):
if change['new'] == 1:
v_mppt = '1'
v_others = '0'
else:
v_mppt = '1, ' * change['new']
v_mppt = v_mppt[:-2]
v_others = '0, ' * change['new']
v_others = v_others[:-2]
w_mps.value = v_others
w_spi.value = v_others
w_subarrays.observe(handle_mppt, 'value')
conf_elec = widgets.VBox([widgets.Box([widgets.HTML('<h4>Configuración Eléctrica</h4>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('Módulos por String'), w_mps], layout=gui_layout),
widgets.Box([widgets.Label('Strings por Inversor'), w_spi], layout=gui_layout),
widgets.Box([widgets.Label('Número Inversores'), w_numinv], layout=gui_layout)])
# TRACKING AND ORIENTATION CONFIGURATION
header_TO = widgets.HTML("<h4>Seguidores y Orientación</h4>", layout=widgets.Layout(height='auto'))
tracker_btn = widgets.ToggleButtons(value=None,
options=['Sin Seguidor', 'Seguidor 1-Eje'],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Montaje con estructura fija',
'Montaje con single-axis tracker'])
sysconfig_vbox = widgets.VBox([header_TO, tracker_btn])
def handle_toggle(change):
if change['new'] == 'Sin Seguidor':
w_Azimuth = widgets.Text(value=None, description='', style={'description_width': 'initial'})
w_Tilt = widgets.Text(value=None, description='', style={'description_width': 'initial'})
w_Racking = widgets.Dropdown(options=['', 'open_rack', 'close_mount', 'insulated_back'], value=None, description='', style={'description_width': 'initial'})
if w_subarrays.value == 1:
v_angles = '0'
else:
v_angles = '0, ' * w_subarrays.value
v_angles = v_angles[:-2]
w_Azimuth.value = v_angles
w_Tilt.value = v_angles
no_tracker = widgets.VBox([widgets.Box([widgets.Label('Elevación [º]'), w_Tilt], layout=gui_layout),
widgets.Box([widgets.Label('Azimutal [º]'), w_Azimuth], layout=gui_layout),
widgets.Box([widgets.Label('Racking'), w_Racking], layout=gui_layout)])
sysconfig_vbox.children = [header_TO, tracker_btn, no_tracker]
elif change['new'] == 'Seguidor 1-Eje':
w_AxisTilt = widgets.Text(value=None, description='', style={'description_width': 'initial'})
w_AxisAzimuth = widgets.Text(value=None, description='', style={'description_width': 'initial'})
w_MaxAngle = widgets.Text(value=None, description='', style={'description_width': 'initial'})
w_Racking = widgets.Dropdown(options=['', 'open_rack', 'close_mount', 'insulated_back'], value=None, description='', style={'description_width': 'initial'})
if w_subarrays.value == 1:
v_angles = '0'
else:
v_angles = '0, ' * w_subarrays.value
v_angles = v_angles[:-2]
w_AxisTilt.value = v_angles
w_AxisAzimuth.value = v_angles
w_MaxAngle.value = v_angles
single_tracker = widgets.VBox([widgets.Box([widgets.Label('Elevación Eje [º]'), w_AxisTilt], layout=gui_layout),
widgets.Box([widgets.Label('Azimutal Eje [º]'), w_AxisAzimuth], layout=gui_layout),
widgets.Box([widgets.Label('Ángulo Máximo [º]'), w_MaxAngle], layout=gui_layout),
widgets.Box([widgets.Label('Racking'), w_Racking], layout=gui_layout)])
sysconfig_vbox.children = [header_TO, tracker_btn, single_tracker]
tracker_btn.observe(handle_toggle, 'value')
# GLOBAL PARAMETERS
w_loss = widgets.BoundedFloatText(value=14.6, min=0, max=100, step=0.1, description='', style={'description_width': 'initial'})
w_name = widgets.Text(value='', placeholder='Sufijo extensión .JSON', description='', style={'description_width': 'initial'})
kpc_loss = widgets.BoundedFloatText(value=0.0, min=0, max=100, step=0.1, description='', style={'description_width': 'initial'})
kt_loss = widgets.BoundedFloatText(value=0.0, min=0, max=100, step=0.1, description='', style={'description_width': 'initial'})
kin_loss = widgets.BoundedFloatText(value=0.0, min=0, max=100, step=0.1, description='', style={'description_width': 'initial'})
conf_globalparams = widgets.VBox([widgets.Box([widgets.HTML('<h4>Parámetros Globales</h4>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('Pérdidas DC [%]'), w_loss], layout=gui_layout),
widgets.Box([widgets.HTML('<h4> </h4>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('$k_{pc}$ [%]'), kpc_loss], layout=gui_layout),
widgets.Box([widgets.Label('$k_{t}$ [%]'), kt_loss], layout=gui_layout),
widgets.Box([widgets.Label('$k_{in}$ [%]'), kin_loss], layout=gui_layout),
widgets.Box([widgets.HTML('<h4> </h4>', layout=widgets.Layout(height='auto'))]),
widgets.Box([widgets.Label('Nombre Planta'), w_name], layout=gui_layout)])
# CONFIGURATION FILE
# Config Button
genconfig_btn = widgets.Button(value=False,
description='Generar Configuración',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Generar Configuración del Sistema',
icon='gear',
layout=widgets.Layout(width='50%', height='auto'))
genconfig_output = widgets.Output()
def on_genconfig_clicked(obj):
with genconfig_output:
genconfig_output.clear_output()
inverter_status = check_inverter()
module_status = check_module()
mount_status = check_mount(num_arrays=w_subarrays.value)
econfig_status = check_econfig(num_arrays=w_subarrays.value)
system_configuration = sys_config(inverter_status, module_status, mount_status, econfig_status)
genconfig_btn.description = 'Configuración Generada'
genconfig_btn.icon = 'check'
genconfig_btn.on_click(on_genconfig_clicked)
# Download Button
download_btn = widgets.Button(value=False,
description='Descargar Configuración',
disabled=False,
button_style='',
tooltip='Descarga JSON de la Configuración del Sistema',
icon='download',
layout=widgets.Layout(width='50%', height='auto'))
output = widgets.Output()
def on_button_clicked(obj):
with output:
output.clear_output()
inverter_status = check_inverter()
module_status = check_module()
mount_status = check_mount(num_arrays=w_subarrays.value)
econfig_status = check_econfig(num_arrays=w_subarrays.value)
system_configuration = sys_config(inverter_status, module_status, mount_status, econfig_status)
if w_name.value != '':
json_file = f'./configurations/system_config_{w_name.value}.json'
else:
json_file = './configurations/system_config.json'
with open(json_file, 'w') as f:
json.dump(system_configuration, f, indent=2)
download_btn.description = 'Configuración Descargada'
download_btn.icon = 'check'
download_btn.on_click(on_button_clicked)
conf_json = widgets.VBox([widgets.Box([widgets.HTML('<h4>Archivo Configuración</h4>', layout=widgets.Layout(height='auto'))]),
widgets.HBox([genconfig_btn, download_btn]),
widgets.HBox([genconfig_output, output])])
# TAB
tab_sysconfig = widgets.Box([conf_subarrays,
conf_elec,
sysconfig_vbox,
conf_globalparams,
conf_json],
layout=widgets.Layout(display='flex',
flex_flow='column',
border='solid 0px',
align_items='stretch',
width='50%'))
###############################
# GUI #
###############################
# Str to List
def str_to_list(string):
l = []
l.append('[')
l.append(string) # l.append(string.value)
l.append(']')
return json.loads(''.join(l))
# Status Check
## Inverter
def check_inverter():
if inverter_btn.value == 'Repositorio':
inverters_database = dropdown_invrepo.value
inverter_name = inverter_vbox.children[3].children[0].children[1].value
inverter = dict(pvlib.pvsystem.retrieve_sam(inverters_database)[inverter_name])
inverter['Vac'] = float(inverter['Vac'])
ac_model = 'sandia'
if inverter_btn.value == 'PVsyst':
inverter = btn.files['inv']
ac_model = 'pvwatts'
inverters_database = None
inverter_name = None
if inverter_btn.value == 'Manual':
if dropdown_manual.value == 'SNL PVlib':
inverter = {'Paco': inverter_vbox.children[2].children[1].children[1].value,
'Pdco': inverter_vbox.children[2].children[2].children[1].value,
'Vdco': inverter_vbox.children[2].children[3].children[1].value,
'Pso': inverter_vbox.children[2].children[4].children[1].value,
'C0': inverter_vbox.children[2].children[5].children[1].value,
'C1': inverter_vbox.children[2].children[6].children[1].value,
'C2': inverter_vbox.children[2].children[7].children[1].value,
'C3': inverter_vbox.children[2].children[8].children[1].value,
'Pnt': inverter_vbox.children[2].children[9].children[1].value}
ac_model = 'sandia'
elif dropdown_manual.value == 'NREL PVWatts':
inverter = {'pdc0': inverter_vbox.children[2].children[1].children[1].value,
'eta_inv_nom': inverter_vbox.children[2].children[2].children[1].value,
'eta_inv_ref': 0.9637}
ac_model = 'pvwatts'
inverters_database = None
inverter_name = None
return [inverters_database, inverter_name, inverter, ac_model]
## Module
def check_module():
if module_btn.value == 'Repositorio':
if dropdown_modrepo.value != 'PVFree':
modules_database = dropdown_modrepo.value
modules_name = module_vbox.children[3].children[0].children[1].value
module = dict(pvlib.pvsystem.retrieve_sam(modules_database)[modules_name])
else:
modules_database = dropdown_modrepo.value
module = dict(requests.get(f'https://pvfree.herokuapp.com/api/v1/{module_vbox.children[2].children[0].children[1].value}/{module_vbox.children[3].children[0].children[1].children[0].value}/').json())
modules_name = module['Name']
if module_btn.value == 'PVsyst':
module = modbtn.files['mod']
module['a_ref'] = module['Gamma'] * module['NCelS'] * (1.38e-23 * (273.15 + 25) / 1.6e-19)
modules_database = None
modules_name = None
if module_btn.value == 'Manual':
module = {'T_NOCT': module_vbox.children[1].children[1].children[1].value,
'Technology': module_vbox.children[1].children[2].children[1].value,
'N_s': module_vbox.children[1].children[3].children[1].value,
'I_sc_ref': module_vbox.children[1].children[4].children[1].value,
'V_oc_ref': module_vbox.children[1].children[5].children[1].value,
'I_mp_ref': module_vbox.children[1].children[6].children[1].value,
'V_mp_ref': module_vbox.children[1].children[7].children[1].value,
'alpha_sc': module_vbox.children[1].children[8].children[1].value,
'beta_oc': module_vbox.children[1].children[9].children[1].value,
'gamma_r': module_vbox.children[1].children[10].children[1].value,
'STC': module_vbox.children[1].children[11].children[1].value}
modules_database = None
modules_name = None
if bifacial_vbox.children[0].children[0].children[1].value == False:
bifacial = False
bifaciality = 0
row_height = 0
row_width = 0
else:
bifacial = True
bifaciality = bifacial_vbox.children[1].children[0].children[1].value
row_height = bifacial_vbox.children[1].children[1].children[1].value
row_width = bifacial_vbox.children[1].children[2].children[1].value
return [modules_database, modules_name, module, bifacial, bifaciality, row_height, row_width]
## Mount
def check_mount(num_arrays):
if tracker_btn.value == 'Sin Seguidor':
with_tracker = False
axis_tilt = None
axis_azimuth = None
max_angle = None
racking_model = sysconfig_vbox.children[2].children[2].children[1].value
if num_arrays == 1:
surface_tilt = [float(sysconfig_vbox.children[2].children[0].children[1].value)]
surface_azimuth = [float(sysconfig_vbox.children[2].children[1].children[1].value)]
elif num_arrays > 1:
surface_tilt = str_to_list(sysconfig_vbox.children[2].children[0].children[1].value)
surface_azimuth = str_to_list(sysconfig_vbox.children[2].children[1].children[1].value)
if racking_model == 'open_rack':
module_type = 'open_rack_glass_glass'
elif racking_model == 'close_mount':
module_type = 'close_mount_glass_glass'
elif racking_model == 'insulated_back':
module_type = 'insulated_back_glass_polymer'
elif tracker_btn.value == 'Seguidor 1-Eje':
with_tracker = True
surface_azimuth = None
surface_tilt = None
racking_model = sysconfig_vbox.children[2].children[3].children[1].value
if num_arrays == 1:
axis_tilt = [float(sysconfig_vbox.children[2].children[0].children[1].value)]
axis_azimuth = [float(sysconfig_vbox.children[2].children[1].children[1].value)]
max_angle = [float(sysconfig_vbox.children[2].children[2].children[1].value)]
elif num_arrays > 1:
axis_tilt = str_to_list(sysconfig_vbox.children[2].children[0].children[1].value)
axis_azimuth = str_to_list(sysconfig_vbox.children[2].children[1].children[1].value)
max_angle = str_to_list(sysconfig_vbox.children[2].children[2].children[1].value)
if racking_model == 'open_rack':
module_type = 'open_rack_glass_glass'
elif racking_model == 'close_mount':
module_type = 'close_mount_glass_glass'
elif racking_model == 'insulated_back':
module_type = 'insulated_back_glass_polymer'
return [with_tracker, surface_azimuth, surface_tilt, axis_tilt, axis_azimuth, max_angle, module_type, racking_model]
## Electric Configuration
def check_econfig(num_arrays):
num_inverters = int(w_numinv.value)
if num_arrays == 1:
modules_per_string = [int(w_mps.value)] #Modules Per String
strings_per_inverter = [int(w_spi.value)] #Strings Per Inverter
elif num_arrays > 1:
modules_per_string = str_to_list(w_mps.value) #Modules Per String
strings_per_inverter = str_to_list(w_spi.value) #Strings Per Inverter
return [modules_per_string, strings_per_inverter, num_inverters]
## System Configuration
def sys_config(inverter_status, module_status, mount_status, econfig_status):
system_configuration = {# Geographic Info
'latitude': w_latitude.value,
'longitude': w_longitude.value,
'tz': w_timezone.value,
'altitude': w_altitude.value,
'surface_type': w_surface.value,
'surface_albedo': w_albedo.value,
# Inverter
'inverters_database': inverter_status[0],
'inverter_name': inverter_status[1],
'inverter': dict(inverter_status[2]),
'ac_model': inverter_status[3],
# PV Module
'modules_database': module_status[0],
'module_name': module_status[1],
'module': dict(module_status[2]),
'bifacial': module_status[3],
'bifaciality': module_status[4],
'row_height': module_status[5],
'row_width': module_status[6],
# Mount
'with_tracker': mount_status[0],
'surface_azimuth': mount_status[1],
'surface_tilt': mount_status[2],
'axis_tilt': mount_status[3],
'axis_azimuth': mount_status[4],
'max_angle': mount_status[5],
'module_type': mount_status[6],
'racking_model': mount_status[7],
# Electric Configuration
'num_arrays': w_subarrays.value,
'modules_per_string': econfig_status[0],
'strings_per_inverter': econfig_status[1],
'num_inverter': econfig_status[2],
# Global Parameters
'loss': w_loss.value,
'kpc': kpc_loss.value,
'kt': kt_loss.value,
'kin': kin_loss.value,
'name': w_name.value}
return system_configuration
# GUI - Dashboard
item_layout = widgets.Layout(margin='0 0 25px 0')
tab = widgets.Tab([tab_doc, tab_location, tab_inverter, tab_module, tab_sysconfig],
layout=item_layout)
tab.set_title(0, 'Documentación')
tab.set_title(1, 'Ubicación')
tab.set_title(2, 'Inversor')
tab.set_title(3, 'Módulo')
tab.set_title(4, 'Diseño Planta')
dashboard = widgets.VBox([tab])
display(dashboard) | StarcoderdataPython |
3452604 | <reponame>UtkarshPathrabe/Competitive-Coding
class Solution:
def search(self, nums: List[int], target: int) -> bool:
if len(nums) == 0:
return False
start, end = 0, len(nums) - 1
def isBinarySearchHelpful(start, element):
return nums[start] != element
def existsInFirst(start, element):
return nums[start] <= element
while start <= end:
mid = start + ((end - start) >> 1)
if nums[mid] == target:
return True
if not isBinarySearchHelpful(start, nums[mid]):
start += 1
continue
pivotArray, targetArray = existsInFirst(start, nums[mid]), existsInFirst(start, target)
if pivotArray ^ targetArray:
if pivotArray:
start = mid + 1
else:
end = mid - 1
else:
if nums[mid] < target:
start = mid + 1
else:
end = mid - 1
return False | StarcoderdataPython |
9786171 | <filename>website/admin.py
from django.contrib import admin
from website.models import contact
# Register your models here.
class contactAdmin(admin.ModelAdmin) :
date_hierarchy = 'created_date'
list_display = ('name','email','created_date','subject')
list_filter = ('email',)
search_fields = ('name','message')
admin.site.register (contact,contactAdmin)
| StarcoderdataPython |
4848 | <filename>tensorflow_rnn/mnist_lstm.py<gh_stars>0
import numpy as np
import tensorflow as tf
"""
Do an MNIST classification line by line by LSTM
"""
(x_train, y_train), \
(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(128, input_shape=(None, 28)))
#model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Activation("softmax"))
model.summary()
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer="sgd",
metrics=["accuracy"])
model.fit(x_train, y_train, validation_data=(x_test, y_test),
batch_size=100, epochs=100)
| StarcoderdataPython |
142916 | # encoding: utf-8
"""
Training implementation
Author: <NAME>
Update time: 08/11/2020
"""
import re
import sys
import os
import cv2
import time
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import lr_scheduler
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from read_data import ChestXrayDataSet
from sklearn.metrics import roc_auc_score
from skimage.measure import label
from model import Densenet121_AG, Fusion_Branch
from PIL import Image
#np.set_printoptions(threshold = np.nan)
CKPT_PATH = ''
CKPT_PATH_G = '/best_model/AG_CNN_Global_epoch_1.pkl'
CKPT_PATH_L = '/best_model/AG_CNN_Local_epoch_2.pkl'
CKPT_PATH_F = '/best_model/AG_CNN_Fusion_epoch_23.pkl'
N_CLASSES = 14
CLASS_NAMES = [ 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass', 'Nodule', 'Pneumonia',
'Pneumothorax', 'Consolidation', 'Edema', 'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia']
# load with your own dataset path
DATA_DIR = '/media/xxxx/data/xxxx/images'
TRAIN_IMAGE_LIST = '/labels/train_list.txt'
VAL_IMAGE_LIST = '/labels/val_list.txt'
save_model_path = '/model-AG-CNN/'
save_model_name = 'AG_CNN'
# learning rate
LR_G = 1e-8
LR_L = 1e-8
LR_F = 1e-3
num_epochs = 50
BATCH_SIZE = 32
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
preprocess = transforms.Compose([
transforms.Resize((256,256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
def Attention_gen_patchs(ori_image, fm_cuda):
# feature map -> feature mask (using feature map to crop on the original image) -> crop -> patchs
feature_conv = fm_cuda.data.cpu().numpy()
size_upsample = (224, 224)
bz, nc, h, w = feature_conv.shape
patchs_cuda = torch.FloatTensor().cuda()
for i in range(0, bz):
feature = feature_conv[i]
cam = feature.reshape((nc, h*w))
cam = cam.sum(axis=0)
cam = cam.reshape(h,w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
heatmap_bin = binImage(cv2.resize(cam_img, size_upsample))
heatmap_maxconn = selectMaxConnect(heatmap_bin)
heatmap_mask = heatmap_bin * heatmap_maxconn
ind = np.argwhere(heatmap_mask != 0)
minh = min(ind[:,0])
minw = min(ind[:,1])
maxh = max(ind[:,0])
maxw = max(ind[:,1])
# to ori image
image = ori_image[i].numpy().reshape(224,224,3)
image = image[int(224*0.334):int(224*0.667),int(224*0.334):int(224*0.667),:]
image = cv2.resize(image, size_upsample)
image_crop = image[minh:maxh,minw:maxw,:] * 256 # because image was normalized before
image_crop = preprocess(Image.fromarray(image_crop.astype('uint8')).convert('RGB'))
img_variable = torch.autograd.Variable(image_crop.reshape(3,224,224).unsqueeze(0).cuda())
patchs_cuda = torch.cat((patchs_cuda,img_variable),0)
return patchs_cuda
def binImage(heatmap):
_, heatmap_bin = cv2.threshold(heatmap , 0 , 255 , cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# t in the paper
#_, heatmap_bin = cv2.threshold(heatmap , 178 , 255 , cv2.THRESH_BINARY)
return heatmap_bin
def selectMaxConnect(heatmap):
labeled_img, num = label(heatmap, connectivity=2, background=0, return_num=True)
max_label = 0
max_num = 0
for i in range(1, num+1):
if np.sum(labeled_img == i) > max_num:
max_num = np.sum(labeled_img == i)
max_label = i
lcc = (labeled_img == max_label)
if max_num == 0:
lcc = (labeled_img == -1)
lcc = lcc + 0
return lcc
def main():
print('********************load data********************')
normalize = transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
train_dataset = ChestXrayDataSet(data_dir=DATA_DIR,
image_list_file=TRAIN_IMAGE_LIST,
transform=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=4, pin_memory=True)
test_dataset = ChestXrayDataSet(data_dir=DATA_DIR,
image_list_file=VAL_IMAGE_LIST,
transform=transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
test_loader = DataLoader(dataset=test_dataset, batch_size=128,
shuffle=False, num_workers=4, pin_memory=True)
print('********************load data succeed!********************')
print('********************load model********************')
# initialize and load the model
Global_Branch_model = Densenet121_AG(pretrained = False, num_classes = N_CLASSES).cuda()
Local_Branch_model = Densenet121_AG(pretrained = False, num_classes = N_CLASSES).cuda()
Fusion_Branch_model = Fusion_Branch(input_size = 2048, output_size = N_CLASSES).cuda()
if os.path.isfile(CKPT_PATH):
print("=> loading checkpoint")
checkpoint = torch.load(CKPT_PATH)
# to load state
# Code modified from torchvision densenet source for loading from pre .4 densenet weights.
state_dict = checkpoint['state_dict']
remove_data_parallel = True # Change if you don't want to use nn.DataParallel(model)
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
ori_key = key
key = key.replace('densenet121.','')
#print('key',key)
match = pattern.match(key)
new_key = match.group(1) + match.group(2) if match else key
new_key = new_key[7:] if remove_data_parallel else new_key
#print('new_key',new_key)
if '.0.' in new_key:
new_key = new_key.replace('0.','')
state_dict[new_key] = state_dict[ori_key]
# Delete old key only if modified.
if match or remove_data_parallel:
del state_dict[ori_key]
Global_Branch_model.load_state_dict(state_dict)
Local_Branch_model.load_state_dict(state_dict)
print("=> loaded baseline checkpoint")
else:
print("=> no checkpoint found")
if os.path.isfile(CKPT_PATH_G):
checkpoint = torch.load(CKPT_PATH_G)
Global_Branch_model.load_state_dict(checkpoint)
print("=> loaded Global_Branch_model checkpoint")
if os.path.isfile(CKPT_PATH_L):
checkpoint = torch.load(CKPT_PATH_L)
Local_Branch_model.load_state_dict(checkpoint)
print("=> loaded Local_Branch_model checkpoint")
if os.path.isfile(CKPT_PATH_F):
checkpoint = torch.load(CKPT_PATH_F)
Fusion_Branch_model.load_state_dict(checkpoint)
print("=> loaded Fusion_Branch_model checkpoint")
cudnn.benchmark = True
criterion = nn.BCELoss()
optimizer_global = optim.Adam(Global_Branch_model.parameters(), lr=LR_G, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
lr_scheduler_global = lr_scheduler.StepLR(optimizer_global , step_size = 10, gamma = 1)
optimizer_local = optim.Adam(Local_Branch_model.parameters(), lr=LR_L, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
lr_scheduler_local = lr_scheduler.StepLR(optimizer_local , step_size = 10, gamma = 1)
optimizer_fusion = optim.Adam(Fusion_Branch_model.parameters(), lr=LR_F, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
lr_scheduler_fusion = lr_scheduler.StepLR(optimizer_fusion , step_size = 15, gamma = 0.1)
print('********************load model succeed!********************')
print('********************begin training!********************')
for epoch in range(num_epochs):
since = time.time()
print('Epoch {}/{}'.format(epoch , num_epochs - 1))
print('-' * 10)
#set the mode of model
lr_scheduler_global.step() #about lr and gamma
lr_scheduler_local.step()
lr_scheduler_fusion.step()
Global_Branch_model.train() #set model to training mode
Local_Branch_model.train()
Fusion_Branch_model.train()
running_loss = 0.0
#Iterate over data
for i, (input, target) in enumerate(train_loader):
input_var = torch.autograd.Variable(input.cuda())
target_var = torch.autograd.Variable(target.cuda())
optimizer_global.zero_grad()
optimizer_local.zero_grad()
optimizer_fusion.zero_grad()
# compute output
output_global, fm_global, pool_global = Global_Branch_model(input_var)
patchs_var = Attention_gen_patchs(input,fm_global)
output_local, _, pool_local = Local_Branch_model(patchs_var)
#print(fusion_var.shape)
output_fusion = Fusion_Branch_model(pool_global, pool_local)
#
# loss
loss1 = criterion(output_global, target_var)
loss2 = criterion(output_local, target_var)
loss3 = criterion(output_fusion, target_var)
#
loss = loss1*0.8 + loss2*0.1 + loss3*0.1
if (i%500) == 0:
print('step: {} totalloss: {loss:.3f} loss1: {loss1:.3f} loss2: {loss2:.3f} loss3: {loss3:.3f}'.format(i, loss = loss, loss1 = loss1, loss2 = loss2, loss3 = loss3))
loss.backward()
optimizer_global.step()
optimizer_local.step()
optimizer_fusion.step()
#print(loss.data.item())
running_loss += loss.data.item()
#break
'''
if i == 40:
print('break')
break
'''
epoch_loss = float(running_loss) / float(i)
print(' Epoch over Loss: {:.5f}'.format(epoch_loss))
print('*******testing!*********')
test(Global_Branch_model, Local_Branch_model, Fusion_Branch_model,test_loader)
#break
#save
if epoch % 1 == 0:
save_path = save_model_path
torch.save(Global_Branch_model.state_dict(), save_path+save_model_name+'_Global'+'_epoch_'+str(epoch)+'.pkl')
print('Global_Branch_model already save!')
torch.save(Local_Branch_model.state_dict(), save_path+save_model_name+'_Local'+'_epoch_'+str(epoch)+'.pkl')
print('Local_Branch_model already save!')
torch.save(Fusion_Branch_model.state_dict(), save_path+save_model_name+'_Fusion'+'_epoch_'+str(epoch)+'.pkl')
print('Fusion_Branch_model already save!')
time_elapsed = time.time() - since
print('Training one epoch complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60 , time_elapsed % 60))
def test(model_global, model_local, model_fusion, test_loader):
# initialize the ground truth and output tensor
gt = torch.FloatTensor().cuda()
pred_global = torch.FloatTensor().cuda()
pred_local = torch.FloatTensor().cuda()
pred_fusion = torch.FloatTensor().cuda()
# switch to evaluate mode
model_global.eval()
model_local.eval()
model_fusion.eval()
cudnn.benchmark = True
for i, (inp, target) in enumerate(test_loader):
with torch.no_grad():
if i % 2000 == 0:
print('testing process:',i)
target = target.cuda()
gt = torch.cat((gt, target), 0)
input_var = torch.autograd.Variable(inp.cuda())
#output = model_global(input_var)
output_global, fm_global, pool_global = model_global(input_var)
patchs_var = Attention_gen_patchs(inp,fm_global)
output_local, _, pool_local = model_local(patchs_var)
output_fusion = model_fusion(pool_global,pool_local)
pred_global = torch.cat((pred_global, output_global.data), 0)
pred_local = torch.cat((pred_local, output_local.data), 0)
pred_fusion = torch.cat((pred_fusion, output_fusion.data), 0)
AUROCs_g = compute_AUCs(gt, pred_global)
AUROC_avg = np.array(AUROCs_g).mean()
print('Global branch: The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
for i in range(N_CLASSES):
print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs_g[i]))
AUROCs_l = compute_AUCs(gt, pred_local)
AUROC_avg = np.array(AUROCs_l).mean()
print('\n')
print('Local branch: The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
for i in range(N_CLASSES):
print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs_l[i]))
AUROCs_f = compute_AUCs(gt, pred_fusion)
AUROC_avg = np.array(AUROCs_f).mean()
print('\n')
print('Fusion branch: The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
for i in range(N_CLASSES):
print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs_f[i]))
def compute_AUCs(gt, pred):
"""Computes Area Under the Curve (AUC) from prediction scores.
Args:
gt: Pytorch tensor on GPU, shape = [n_samples, n_classes]
true binary labels.
pred: Pytorch tensor on GPU, shape = [n_samples, n_classes]
can either be probability estimates of the positive class,
confidence values, or binary decisions.
Returns:
List of AUROCs of all classes.
"""
AUROCs = []
gt_np = gt.cpu().numpy()
pred_np = pred.cpu().numpy()
for i in range(N_CLASSES):
AUROCs.append(roc_auc_score(gt_np[:, i], pred_np[:, i]))
return AUROCs
if __name__ == '__main__':
main()
| StarcoderdataPython |
1688379 | <filename>wavepy3/__init__.py
from .atmos import Atmos
from .constraint_analysis import constraint_analysis
from .prop import split_step
from . import analytic
from . import sources
| StarcoderdataPython |
1668316 | import pyautogui
import time
import json
from old_info import kols
# 摁chrome浏览器
link_num = 137
link_num_end = len(kols)
link_num_str = ""
action_list = [
{
"name":"摁chrome浏览器",
"x":618,
"y":800 - 25,
"action":"move_and_click",
"sleep": 1
},
{
"name":"摁pgy tab",
"x":120,
"y":800 - 750,
"action":"move_and_click",
"sleep": 1
},
{
"name":"摁地址栏",
"x":400,
"y":800 - 715,
"action":"move_and_click",
"sleep": 1
},
{
"name":"输入网址",
"x":400,
"y":800 - 715,
"action":"input_link",
"sleep": 15
},
{
"name":"点击userId",
"x":673,
"y":800 - 635,
"action":"move_and_click",
"sleep": 1
},
{
"name":"输入userId",
"x":673,
"y":800 - 635,
"action":"input_link_userid",
"sleep": 2
},
{
"name":"点击network内容",
"x":707,
"y":800 - 483,
# "x":721,
# "y":800 - 441,
"action":"move_and_click",
"sleep": 1
},
{
"name":"点击response",
"x":1112,
"y":800 - 471,
"action":"move_and_click",
"sleep": 1
},
{
"name":"复制response",
"x":1083,
"y":800 - 453,
"action":"move_and_click_and_copy",
"sleep": 1
},
{
"name":"摁qsh page pgy tab",
"x":592,
"y":800 - 751,
"action":"move_and_click",
"sleep": 1
},
{
"name":"摁输入框",
"x":187,
"y":800 - 606,
"action":"move_and_click_and_paste",
"sleep": 2
},
{
"name":"摁输入框 提交",
"x":230,
"y":800 - 522,
"action":"move_and_click",
"sleep": 1
},
{
"name":"更新link num",
"x":673,
"y":800 - 635,
"action":"update_link_num",
"sleep": 0
},
]
do_one = True
def do_one():
global do_one
global link_num
global link_num_str
global link_num_end
for action in action_list:
action_now = action.get("action",None)
if action_now in ["move_and_click"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
elif action_now in ["input_link"]:
link_num_str = kols[link_num]["userId"]
pyautogui.typewrite("https://pgy.xiaohongshu.com/solar/advertiser/kol/%s"%link_num_str)
pyautogui.press('enter')
elif action_now in ["input_link_userid"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("command", "a")
link_num_str = kols[link_num]["userId"]
pyautogui.typewrite(link_num_str)
pyautogui.press('enter')
elif action_now in ["move_and_click_and_copy"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("command", "a")
pyautogui.hotkey("command", "c")
elif action_now in ["move_and_click_and_paste"]:
pyautogui.moveTo(x=action.get("x",None), y=action.get("y",None),duration=0, tween=pyautogui.linear)
time.sleep(2)
pyautogui.click(x=action.get("x",None), y=action.get("y",None),clicks=1, button='left')
pyautogui.hotkey("command", "v")
elif action_now in ["update_link_num"]:
link_num = link_num + 1
time.sleep(action.get("sleep",0))
if link_num > link_num_end:
do_one = False
while do_one:
do_one()
| StarcoderdataPython |
12848657 | <filename>plane_waves/polarization_animation.py
#----------------------------------------------------------------------
# # 9/25/18 - Update to use Python 3.6, PyQt5 and pyqtgraph 0.10.0
# <NAME>
#----------------------------------------------------------------------
from PyQt5 import QtGui, QtCore
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import numpy as np
import sys
## Always start by initializing Qt (only once per application)
app = QtGui.QApplication([])
## Define a top-level widget to hold everything
w = QtGui.QWidget()
w.resize(1000,600)
w.setWindowTitle('Polarization Visualization')
## Create widgets to be placed inside
heading_text = QtGui.QLabel('Polarization Angles ' + u"\u03C8" + ' and ' + u"\u03B4")
# Box with sliders
sliderbox = QtGui.QGroupBox()
hBoxLayout = QtGui.QHBoxLayout()
psi_slider_layout = QtGui.QVBoxLayout()
delta_slider_layout = QtGui.QVBoxLayout()
# psi slider
psi_label = QtGui.QLabel(u"\u03C8")
psi_slider = QtGui.QSlider()
psi_slider.setOrientation(QtCore.Qt.Vertical)
psi_slider.setMinimum(0)
psi_slider.setMaximum(90)
psi_slider.setValue(0)
psi_value = QtGui.QLabel(str(psi_slider.value()) + u"\u00b0")
psi_slider_layout.addWidget(psi_label)
psi_slider_layout.addWidget(psi_slider)
psi_slider_layout.addWidget(psi_value)
def set_psi_value(value):
psi_value.setText(str(value) + u"\u00b0")
global psi_deg
psi_deg = value
psi_slider.valueChanged.connect(set_psi_value)
# delta slider
delta_label = QtGui.QLabel(u"\u03B4")
delta_slider = QtGui.QSlider()
delta_slider.setOrientation(QtCore.Qt.Vertical)
delta_slider.setMinimum(-180)
delta_slider.setMaximum(180)
delta_slider.setValue(0)
delta_value = QtGui.QLabel(str(delta_slider.value()) + u"\u00b0")
delta_slider_layout.addWidget(delta_label)
delta_slider_layout.addWidget(delta_slider)
delta_slider_layout.addWidget(delta_value)
def set_delta_value(value):
delta_value.setText(str(value) + u"\u00b0")
global delta_deg
delta_deg = value
delta_slider.valueChanged.connect(set_delta_value)
# Set layout of box containing sliders
hBoxLayout.addItem(psi_slider_layout)
hBoxLayout.addItem(delta_slider_layout)
sliderbox.setLayout(hBoxLayout)
# Box with options
optionbox = QtGui.QGroupBox()
vBoxLayout = QtGui.QVBoxLayout()
# Options
hfield_checkbox = QtGui.QCheckBox("Show H-field")
# Add to layout
vBoxLayout.addWidget(hfield_checkbox)
# Add to box
optionbox.setLayout(vBoxLayout)
# Create openGL view widget & add a grid
wGL = gl.GLViewWidget()
wGL.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
wGL.opts['distance'] = 5
g = gl.GLGridItem()
wGL.addItem(g)
## Create a grid layout to manage the widgets size and position
layout = QtGui.QGridLayout()
w.setLayout(layout)
layout.setColumnStretch (1, 2)
## Add widgets to the layout in their proper positions
layout.addWidget(heading_text, 0, 0) # heading text goes in upper-left
layout.addWidget(sliderbox, 1, 0) # slider box goes underneath heading text
layout.addWidget(optionbox, 2, 0) # option box goes underneath slider box
layout.addWidget(wGL, 0, 1, 3, 1) # wGL goes on right side, spanning 3 rows
## Display the widget as a new window
w.show()
##------------ Set up polarization animation ------------##
degtorad = np.pi/180.0
# Function to create new array from old where new array is formatted to prepare to
# draw lines perpendicular from z-axis to curve defined by input array
def preptomakelines(pts):
pts2 = np.zeros(shape=(2*pts.shape[0], pts.shape[1]))
for i in range(pts.shape[0]):
pts2[2*i,2] = pts[i,2]
pts2[2*i + 1,:] = pts[i,:]
return pts2
psi_deg = float(psi_slider.value())
delta_deg = float(delta_slider.value())
# Calculate sinusoidal electric field for arbitrary polarization
def efield_arbpol(t,z,amplitude,psi_rad,delta_rad):
x = amplitude * np.cos(psi_rad) * np.cos(2*np.pi*(t-z))
y = amplitude * np.sin(psi_rad) * np.cos(2*np.pi*(t-z) + delta_rad)
z = z
return x, y, z
# Prep coordinate rotations for electric & magnetic fields to go from calculation
# coordinates to pyqtgraph plotting coordinates
temp2Darray = [[-1, 0, 0],
[0, 0, 1],
[0, 1, 0]]
rot_efield_coord = np.array(temp2Darray)
# Calculate electric & magnetic field arrays. Also make arrays to define lines.
amplitude = 1.0
z = np.linspace(-10, 10, 500)
x, y, z = efield_arbpol(0.0,z,amplitude,psi_deg*degtorad,delta_deg*degtorad)
# E-field
pts_e = np.vstack([x,y,z]).transpose()
pts_e_lines = preptomakelines(pts_e)
pts_e = np.dot(pts_e, rot_efield_coord)
pts_e_lines = np.dot(pts_e_lines, rot_efield_coord)
z0 = np.zeros(len(z))
pts_e_z0 = np.vstack([x,y,z0]).transpose()
pts_e_z0 = np.dot(pts_e_z0, rot_efield_coord)
pts_e_arrow = np.array( [[0.0, 0.0, 0.0], pts_e_z0[int(len(pts_e_z0)/2.0)]] )
# H-field
pts_h = np.vstack([-y,x,z]).transpose() # Orthogonal to E
pts_h_lines = preptomakelines(pts_h)
pts_h = np.dot(pts_h, rot_efield_coord)
pts_h_lines = np.dot(pts_h_lines, rot_efield_coord)
pts_h_z0 = np.vstack([-y,x,z0]).transpose()
pts_h_z0 = np.dot(pts_h_z0, rot_efield_coord)
pts_h_arrow = np.array( [[0.0, 0.0, 0.0], pts_h_z0[int(len(pts_h_z0)/2.0)]] )
# Get ready to make plots
efield_color = (1, 0, 0, 1)
efield_color_z0 = (1, 1, 1, 1)
efield_color_arrow = (1, 0.67, 0.67, 1)
hfield_color = (0, 0, 1, 1)
hfield_color_z0 = (1, 1, 1, 1)
hfield_color_arrow = (0.67, 0.67, 1, 1)
linewidth = 4.0
linewidth2Dpol = 2.0
linewidth2Defieldvector = 10.0
# Make plots
plt_e = gl.GLLinePlotItem(pos=pts_e, mode='line_strip', color=efield_color, width=linewidth, antialias=True)
wGL.addItem(plt_e)
#plt_e_lines = gl.GLLinePlotItem(pos=pts_e_lines, mode='lines', color=efield_color, width=linewidth, antialias=True)
#wGL.addItem(plt_e_lines)
plt_e_z0 = gl.GLLinePlotItem(pos=pts_e_z0, mode='line_strip', color=efield_color_z0, width=linewidth2Dpol, antialias=True)
wGL.addItem(plt_e_z0)
plt_e_arrow = gl.GLLinePlotItem(pos=pts_e_arrow, mode='line_strip', color=efield_color_arrow, width=linewidth2Defieldvector, antialias=True)
wGL.addItem(plt_e_arrow)
plt_h = gl.GLLinePlotItem(pos=pts_h, mode='line_strip', color=hfield_color, width=linewidth, antialias=True)
wGL.addItem(plt_h)
#plt_h_lines = gl.GLLinePlotItem(pos=pts_h_lines, mode='lines', color=hfield_color, width=linewidth, antialias=True)
#wGL.addItem(plt_h_lines)
plt_h_z0 = gl.GLLinePlotItem(pos=pts_h_z0, mode='line_strip', color=hfield_color_z0, width=linewidth2Dpol, antialias=True)
wGL.addItem(plt_h_z0)
plt_h_arrow = gl.GLLinePlotItem(pos=pts_h_arrow, mode='line_strip', color=hfield_color_arrow, width=linewidth2Defieldvector, antialias=True)
wGL.addItem(plt_h_arrow)
# Start with H-field items as invisible
plt_h.setVisible(False)
#plt_h_lines.setVisible(False)
plt_h_z0.setVisible(False)
plt_h_arrow.setVisible(False)
# Add lines to visually define axes
x_length = 1.1
y_length = 1.1
z_length = 10
linewidthaxis = 1.0
axis_color = (32, 32, 32, 40)
## make z-axis
zaxis = np.linspace(-z_length,z_length,10)
x_zaxis = np.zeros(10)
y_zaxis = np.zeros(10)
pts_zaxis = np.vstack([x_zaxis,zaxis,y_zaxis]).transpose()
plt_zaxis = gl.GLLinePlotItem(pos=pts_zaxis, color=axis_color, width=linewidthaxis, antialias=True)
#wGL.addItem(plt_zaxis)
## make y-axis
yaxis = np.linspace(-y_length,y_length,10)
x_yaxis = np.zeros(10)
z_yaxis = np.zeros(10)
pts_yaxis = np.vstack([yaxis,z_yaxis,x_yaxis]).transpose()
plt_yaxis = gl.GLLinePlotItem(pos=pts_yaxis, color=axis_color, width=linewidthaxis, antialias=True)
wGL.addItem(plt_yaxis)
## make x-axis
xaxis = np.linspace(-x_length,x_length,10)
y_xaxis = np.zeros(10)
z_xaxis = np.zeros(10)
pts_xaxis = np.vstack([y_xaxis,z_xaxis,xaxis]).transpose()
plt_xaxis = gl.GLLinePlotItem(pos=pts_xaxis, color=axis_color, width=linewidthaxis, antialias=True)
wGL.addItem(plt_xaxis)
# make image for x-y plane
image_shape = (2,2)
uniform_values = np.ones(image_shape, dtype=np.int) * 255
print(uniform_values)
uniform_image_transparent = pg.makeARGB(uniform_values)[0]
uniform_image_transparent[:,:,:] = 255
uniform_image_transparent[:,:,3] = 80
print(uniform_image_transparent)
v1 = gl.GLImageItem(uniform_image_transparent)
v1.translate(-image_shape[0]/2., -image_shape[1]/2., 0)
v1.rotate(90, 1,0,0)
wGL.addItem(v1)
# Set up some animation parameters
frametime = 50 # frame refresh time in ms
velocity = 1./frametime
counter = 0
# Function to update scene for each frame
def update():
global z, z0, velocity, counter, amplitude
global plt_e, rot_efield_coord, plt_e_z0, plt_e_arrow #, plt_e_lines
global plt_h, plt_h_z0, plt_h_arrow #, plt_h_lines
global psi_deg, delta_deg, degtorad
counter +=1
time = float(counter)/frametime % 1
x, y, z = efield_arbpol(time,z,amplitude,psi_deg*degtorad,delta_deg*degtorad)
pts_e = np.vstack([x,y,z]).transpose()
pts_e_lines = preptomakelines(pts_e)
pts_e = np.dot(pts_e, rot_efield_coord)
#pts_e_lines = np.dot(pts_e_lines, rot_efield_coord)
plt_e.setData(pos=pts_e)
#plt_e_lines.setData(pos=pts_e_lines)
pts_e_z0 = np.vstack([x,y,z0]).transpose()
pts_e_z0 = np.dot(pts_e_z0, rot_efield_coord)
plt_e_z0.setData(pos=pts_e_z0)
pts_e_arrow = np.array( [[0.0, 0.0, 0.0], pts_e_z0[int(len(pts_e_z0)/2.0)]] )
plt_e_arrow.setData(pos=pts_e_arrow)
pts_h = np.vstack([-y,x,z]).transpose()
pts_h_lines = preptomakelines(pts_h)
pts_h = np.dot(pts_h, rot_efield_coord)
#pts_h_lines = np.dot(pts_h_lines, rot_efield_coord)
plt_h.setData(pos=pts_h)
#plt_h_lines.setData(pos=pts_h_lines)
pts_h_z0 = np.vstack([-y,x,z0]).transpose()
pts_h_z0 = np.dot(pts_h_z0, rot_efield_coord)
plt_h_z0.setData(pos=pts_h_z0)
pts_h_arrow = np.array( [[0.0, 0.0, 0.0], pts_h_z0[int(len(pts_h_z0)/2.0)]] )
plt_h_arrow.setData(pos=pts_h_arrow)
# Poor man's state updating
if hfield_checkbox.isChecked():
plt_h.setVisible(True)
#plt_h_lines.setVisible(True)
plt_h_z0.setVisible(True)
plt_h_arrow.setVisible(True)
else:
plt_h.setVisible(False)
#plt_h_lines.setVisible(False)
plt_h_z0.setVisible(False)
plt_h_arrow.setVisible(False)
# Set up timer for animation
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
## Start the Qt event loop
app.exec_()
| StarcoderdataPython |
3449470 | print '---- THIS CODE REQUIRES CHAINER V3 ----'
import warnings
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', RuntimeWarning)
warnings.simplefilter('ignore', FutureWarning)
import numpy as np
import time, os, copy, random, h5py
from argparse import ArgumentParser
import chainer
import chainer.functions as F
from chainer import link
from chainer import optimizers, Variable, cuda, serializers
from chainer.iterators import MultiprocessIterator
from chainer.optimizer import WeightDecay, GradientClipping
import datachef as DataChef
import modelx as Models
def parse_args():
# set extract_features to 0 for training or 1 for feature extraction
def_extract_features = 0
# batch size
def_minibatch = 16
# image size for semantic segmentation
def_scales_tr = '512,512'
# image size for re-identification
def_scales_reid = '512,170' # '778,255'
# learning rates for fresh and pretrained layers
def_optimizer = 'lr:0.01--lr_pretrained:0.01'
# GPU ids
def_GPUs = '0'
# set checkpoint bigger than zero to load saved model from checkpoints folder
def_checkpoint = 0
# set pre-trained model path for finetuning using evaluation datasets
def_model_path_for_ft = ''
# label for the dataset
def_dataset = 'ReID10Dx'
# number of different ids in training data
def_label_dim = '16803'
def_label_dim_ft = '16803'
# the image list for feature extraction
def_eval_split = 'cuhk03_gallery'
# the image list for training
def_train_set = 'train_10d'
# number of workers to load images parallel
def_nb_processes = 4
# maximum number of iterations
def_max_iter = 200000
# loss report interval
def_report_interval = 50
# number of iterations for checkpoints
def_save_interval = 20000
def_project_folder = '.'
def_dataset_folder = ''
p = ArgumentParser()
p.add_argument('--extract_features', default=def_extract_features, type=int)
p.add_argument('--minibatch', default=def_minibatch, type=int)
p.add_argument('--scales_tr', default=def_scales_tr, type=str)
p.add_argument('--scales_reid', default=def_scales_reid, type=str)
p.add_argument('--optimizer', default=def_optimizer, type=str)
p.add_argument('--GPUs', default=def_GPUs, type=str)
p.add_argument('--dataset', default=def_dataset, type=str)
p.add_argument('--eval_split', default=def_eval_split, type=str)
p.add_argument('--train_set', default=def_train_set, type=str)
p.add_argument('--checkpoint', default=def_checkpoint, type=int)
p.add_argument('--model_path_for_ft', default=def_model_path_for_ft, type=str)
p.add_argument('--label_dim', default=def_label_dim, type=str)
p.add_argument('--label_dim_ft', default=def_label_dim_ft, type=int)
p.add_argument('--nb_processes', default=def_nb_processes, type=int)
p.add_argument('--max_iter', default=def_max_iter, type=int)
p.add_argument('--report_interval', default=def_report_interval, type=int)
p.add_argument('--save_interval', default=def_save_interval, type=int)
p.add_argument('--project_folder', default=def_project_folder, type=str)
p.add_argument('--dataset_folder', default=def_dataset_folder, type=str)
args = p.parse_args()
return args
def Evaluation():
# Creat data generator
batch_tuple = MultiprocessIterator(
DataChef.ReID10D(args, args.project_folder + '/evaluation_list/' + args.eval_split + '.txt',
image_size=args.scales_tr[0]),
args.minibatch, n_prefetch=2, n_processes=args.nb_processes, shared_mem=20000000, repeat=False, shuffle=False)
# Keep the log in history
history = {args.dataset: {'features': []}}
for dataBatch in batch_tuple:
dataBatch = zip(*dataBatch)
# Prepare batch data
IMG = np.array_split(np.array(dataBatch[0]), len(Model), axis=0)
LBL = np.array_split(np.array(dataBatch[1]), len(Model), axis=0)
# Forward
for device_id, img, lbl in zip(range(len(Model)), IMG, LBL):
Model[device_id](img, lbl, args.dataset, train=False)
# Aggregate reporters from all GPUs
reporters = []
for i in range(len(Model)):
reporters.append(Model[i].reporter)
Model[i].reporter = {} # clear reporter
# History
for reporter in reporters:
for k in reporter[args.dataset].keys():
history[args.dataset][k].append(reporter[args.dataset][k])
# storing features to an outputfile
features = np.concatenate(history[args.dataset]['features'], axis=0)
outfile = '%s/evaluation_features/%s_@%s_%s.csv' % (
args.project_folder, args.dataset, args.checkpoint, args.eval_split)
np.savetxt(outfile, features, delimiter=',', fmt='%0.12e')
def Train():
# Create data generator
batch_tuples, history = {}, {}
for dataset in args.dataset.split('+'):
batch_tuples.update({dataset: []})
for image_size in args.scales_tr:
iterator = MultiprocessIterator(
DataChef.ReID10D(args, args.project_folder + '/train_list/' + args.train_set + '.txt',
image_size=image_size),
args.minibatch, n_prefetch=2, n_processes=args.nb_processes, shared_mem=20000000, repeat=True,
shuffle=True)
batch_tuples[dataset].append(iterator)
# Keep the log in history
history.update({dataset: {'loss': []}})
# Random input image size (change it after every x minibatch)
batch_tuple_indx = np.random.choice(range(len(args.scales_tr)), args.max_iter / 10)
batch_tuple_indx = list(np.repeat(batch_tuple_indx, 10))
# Train
start_time = time.time()
for iterk in range(args.checkpoint, len(batch_tuple_indx)):
# Get a minibatch while sequentially rotating between datasets
for dataset in args.dataset.split('+'):
dataBatch = batch_tuples[dataset][batch_tuple_indx[iterk]].next()
dataBatch = zip(*dataBatch)
# Prepare batch data
IMG = np.array_split(np.array(dataBatch[0]), len(Model), axis=0)
LBL = np.array_split(np.array(dataBatch[1]), len(Model), axis=0)
# Forward
for device_id, img, lbl in zip(range(len(Model)), IMG, LBL):
Model[device_id](img, lbl, dataset, train=True)
# Aggregate reporters from all GPUs
reporters = []
for i in range(len(Model)):
reporters.append(Model[i].reporter)
Model[i].reporter = {} # clear reporter
# History
for reporter in reporters:
for k in reporter[dataset].keys():
history[dataset][k].append(reporter[dataset][k])
# Accumulate grads
for i in range(1, len(Model)):
Model[0].addgrads(Model[i])
# Update
opt.update()
# Update params of other models
for i in range(1, len(Model)):
Model[i].copyparams(Model[0])
# Report
if (iterk + 1) % args.report_interval == 0:
DataChef.Report(
history, args.report_interval * len(args.GPUs), (iterk + 1), time.time() - start_time, split='train')
# Saving the model
if (iterk + 1) % args.save_interval == 0 or (iterk + 1) == len(batch_tuple_indx):
serializers.save_hdf5('%s/checkpoints/%s_%s_iter_%d.chainermodel' %
(args.project_folder, args.dataset, args.train_set[6:], iterk + 1), Model[0])
serializers.save_npz('%s/checkpoints/%s_%s_iter_%d.chaineropt' %
(args.project_folder, args.dataset, args.train_set[6:], iterk + 1), opt)
# Decrease learning rate (poly in 10 steps)
if (iterk + 1) % int(args.max_iter / 10) == 0:
decay_rate = (1.0 - float(iterk) / args.max_iter) ** 0.9
# Learning rate of fresh layers
opt.lr = args.optimizer['lr'] * decay_rate
# Learning rate of pretrained layers
for name, param in opt.target.namedparams():
if name.startswith('/predictor/'):
param.update_rule.hyperparam.lr = args.optimizer['lr_pretrained'] * decay_rate
def SetupOptimizer(model):
opt = optimizers.NesterovAG(
lr=args.optimizer['lr'], momentum=0.9)
opt.setup(model)
return opt
def toGPU():
# main model is always first
Model = [opt.target]
for i in range(1, len(args.GPUs)):
_model = copy.deepcopy(opt.target)
_model.to_gpu(args.GPUs[i])
_model.gpu_id = args.GPUs[i]
_model.reporter = {}
Model.append(_model)
# First GPU device is by default the main one
opt.target.to_gpu(args.GPUs[0])
opt.target.gpu_id = args.GPUs[0]
opt.target.reporter = {}
return Model
def ResumeFromCheckpoint(path_to_checkpoint, model):
init_weights = h5py.File(path_to_checkpoint, 'r')
for name, link in model.namedlinks():
if name.endswith('/conv') or name.endswith('/bn'):
path_to_link = ['init_weights']
for i in name.split('/')[1:]:
path_to_link.append('["%s"]' % i)
f = eval(''.join(path_to_link))
if name.endswith('/conv'):
link.W.data = np.array(f['W'])
if 'b' in f.keys():
link.b.data = np.array(f['b'])
elif name.endswith('/bn'):
link.beta.data = np.array(f['beta'])
link.gamma.data = np.array(f['gamma'])
link.avg_mean = np.array(f['avg_mean'])
link.avg_var = np.array(f['avg_var'])
return model
# MAIN BODY
args = parse_args()
args.optimizer = dict(zip(['lr', 'lr_pretrained'], [float(x.split(':')[-1]) for x in args.optimizer.split('--')]))
args.label_dim = map(int, args.label_dim.split('+'))
args.scales_tr = [map(int, x.split(',')) for x in args.scales_tr.split('--')]
args.scales_reid = map(int, args.scales_reid.split(','))
# Adjust params w.r.t number of GPUs
args.GPUs = map(int, args.GPUs.split('/'))
args.minibatch *= len(args.GPUs)
args.optimizer['lr'] /= len(args.GPUs)
args.optimizer['lr_pretrained'] /= len(args.GPUs)
args.report_interval /= len(args.GPUs)
args.save_interval /= len(args.GPUs)
print vars(args)
print 'Initialize Model'
predictor = Models.InceptionV3(args, dilated=False)
model = Models.ReIDClassifier(predictor, args.label_dim[0], args)
with model.init_scope():
model.segmentation = Models.InceptionV3Classifier(
Models.InceptionV3(args, dilated=True), [Models.Classifier(20)], args)
if len(args.model_path_for_ft) > 0:
model = ResumeFromCheckpoint(args.model_path_for_ft, model)
model.classifiers = link.ChainList(Models.Conv(2048*3, args.label_dim_ft, 1, 1, 0, init_weights=None, pool=None,
nobias=False))
print 'Setup optimizer'
opt = SetupOptimizer(model)
# Use lower learning rate for pretrained parts
for name, param in opt.target.namedparams():
if name.startswith('/predictor/'):
param.update_rule.hyperparam.lr = args.optimizer['lr_pretrained']
opt.add_hook(WeightDecay(0.0005))
# opt.add_hook(GradientClipping(2.0))
# Resume training from a checkpoint
if args.checkpoint > 0:
print 'Resume training from checkpoint'
# Load model weights
model = ResumeFromCheckpoint('%s/checkpoints/%s_%s_iter_%d.chainermodel' %
(args.project_folder, args.dataset, args.train_set[6:], args.checkpoint), model)
# Load optimizer status
serializers.load_npz('%s/checkpoints/%s_%s_iter_%d.chaineropt' %
(args.project_folder, args.dataset, args.train_set[6:], args.checkpoint), opt)
# Adjust the learning rate
decay_rate = 1.0
for iterk in range(args.checkpoint):
if (iterk + 1) % int(args.max_iter / 10) == 0:
decay_rate = (1.0 - float(iterk) / args.max_iter) ** 0.9
# Learning rate of fresh layers
opt.lr = args.optimizer['lr'] * decay_rate
# Learning rate of pretrained layers
for name, param in opt.target.namedparams():
if name.startswith('/predictor/'):
param.update_rule.hyperparam.lr = args.optimizer['lr_pretrained'] * decay_rate
print 'Load segmentation weights'
model.segmentation = ResumeFromCheckpoint('LIP_iter_30000.chainermodel', model.segmentation)
print 'Push Model to GPU'
Model = toGPU()
print 'Main Begins'
if args.extract_features:
Evaluation()
else:
Train()
| StarcoderdataPython |
5089823 | <reponame>aotuai/brainframe-qt
from .detection_item import DetectionItem
from .detection_polygon_item import DetectionPolygonItem
| StarcoderdataPython |
6666550 | <filename>cast/cast.py<gh_stars>0
#!/usr/bin/env python3
# CAST main class
# Author: <NAME> (<EMAIL>)
import os
import sys
import json
from cast.compile_command_parser import read_db
class Cast:
"""
Main Cast class
"""
def __init__(self):
self.compile_db = ""
self.template = ""
self.output = ""
self.workdir = ""
def __init__(self, compile_db, template, output, workdir):
self.compile_db = compile_db
self.template = template
self.output = output
self.workdir = workdir
def validate(self):
return os.path.isfile(self.compile_db) and \
os.path.exists(self.template) and \
os.path.exists(self.workdir) and \
self.output != ""
def translate(self):
if self.validate() == False:
return False
data = read_db(self.compile_db, self.workdir)
file_list = "".join(list(map(lambda file:
" <Compile Include=\"" + file + "\">\n" +
" <SubType>compile</SubType>\n" +
" </Compile>\n", data.entries)))
folder_list = "".join(list(map(lambda folder:
" <Folder Include=\"" + folder + "\"/>\n",
data.folders)))
# Read template
new_cproj = ""
with open(self.template, 'r') as file:
new_cproj = file.read()
# Replace dummy comments with actual data
new_cproj = new_cproj.replace("/*FileGroup*/", file_list) \
.replace("/*FolderGroup*/", folder_list)
# We could pretty print it, but Atmel Studio refuses most
# kinds of pretty printing.
# Write out xml
with open(self.output, "w") as file:
file.write(new_cproj)
| StarcoderdataPython |
6699466 | <gh_stars>0
from django.test import SimpleTestCase
from django.utils.crypto import get_random_string
from zentral.contrib.santa.serializers import RuleUpdateSerializer
class SantaSerializersTestCase(SimpleTestCase):
def test_rule_wrong_policy_for_bundle_rule(self):
data = {"rule_type": "BUNDLE",
"identifier": get_random_string(64, "0123456789abcdef"),
"policy": "BLOCKLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["non_field_errors"][0]
self.assertEqual(str(ed), "Wrong policy for BUNDLE rule")
def test_rule_identifier_and_sha256(self):
data = {"rule_type": "BINARY",
"identifier": get_random_string(64, "0123456789abcdef"),
"sha256": get_random_string(64, "0123456789abcdef"),
"policy": "BLOCKLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["non_field_errors"][0]
self.assertEqual(str(ed), "sha256 and identifier cannot be both set")
def test_rule_missing_identifier(self):
data = {"rule_type": "TEAMID",
"policy": "BLOCKLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["identifier"][0]
self.assertEqual(str(ed), "This field is required")
def test_rule_team_id_sha256(self):
data = {"rule_type": "TEAMID",
"sha256": get_random_string(64, "0123456789abcdef"),
"policy": "BLOCKLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["sha256"][0]
self.assertEqual(str(ed), "This field cannot be used in a Team ID rule")
def test_rule_bad_team_id_identifier(self):
data = {"rule_type": "TEAMID",
"identifier": get_random_string(24),
"policy": "BLOCKLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["identifier"][0]
self.assertEqual(str(ed), "Invalid Team ID")
def test_rule_bad_sha256_identifier(self):
data = {"rule_type": "BINARY",
"identifier": get_random_string(24),
"policy": "BLOCKLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["identifier"][0]
self.assertEqual(str(ed), "Invalid sha256")
def test_rule_custom_msg_allowlist(self):
data = {"rule_type": "BINARY",
"identifier": get_random_string(64, "0123456789abcdef"),
"custom_msg": "yolo fomo",
"policy": "ALLOWLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["non_field_errors"][0]
self.assertEqual(str(ed), "Custom message can only be set on BLOCKLIST rules")
def test_rule_tags_conflict(self):
data = {"rule_type": "BINARY",
"identifier": get_random_string(64, "0123456789abcdef"),
"tags": ["un", "deux"],
"excluded_tags": ["deux", "trois"],
"policy": "BLOCKLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["non_field_errors"][0]
self.assertEqual(str(ed), "Conflict between tags and excluded_tags")
def test_rule_serial_numbers_conflict(self):
data = {"rule_type": "BINARY",
"identifier": get_random_string(64, "0123456789abcdef"),
"serial_numbers": ["un", "deux"],
"excluded_serial_numbers": ["deux", "trois"],
"policy": "BLOCKLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["non_field_errors"][0]
self.assertEqual(str(ed), "Conflict between serial_numbers and excluded_serial_numbers")
def test_rule_primary_users_conflict(self):
data = {"rule_type": "BINARY",
"identifier": get_random_string(64, "0123456789abcdef"),
"primary_users": ["un", "deux"],
"excluded_primary_users": ["deux", "trois"],
"policy": "BLOCKLIST"}
serializer = RuleUpdateSerializer(data=data)
self.assertFalse(serializer.is_valid())
ed = serializer.errors["non_field_errors"][0]
self.assertEqual(str(ed), "Conflict between primary_users and excluded_primary_users")
| StarcoderdataPython |
9795233 | <gh_stars>0
P,Pen = 0,0
while 1:
Hex = H*(2 * H - 1)
while Pen < Hex:
P += 1
Pen = int( 0.5 * P * (3*P - 1 ))
if not Pen == Hex:
continue
print Tri,Hex,Pen
break
| StarcoderdataPython |
1921922 | from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic import TemplateView
from django.conf import settings
from django.conf.urls.static import static
import rest_framework
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('api/token/', TokenObtainPairView.as_view(), name='token-obtain-pair'),
path('api/token/refresh', TokenRefreshView.as_view(), name='token-refresh'),
path('api/accounts/', include('accounts.urls'), name='accounts'),
path('api/realtors/', include('realtors.urls'), name='realtors'),
path('api/listings/', include('listings.urls'), name='listings'),
path('api/contacts/', include('contacts.urls'), name='contacts'),
]+static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
1624835 | import sys, struct, random, string, meterpreter_bindings
# A stack of this stuff was stolen from the Python Meterpreter. We should look
# to find a nice way of sharing this across the two without the duplication.
#
# START OF COPY PASTE
#
# Constants
#
# these values will be patched, DO NOT CHANGE THEM
DEBUGGING = False
HTTP_CONNECTION_URL = None
HTTP_PROXY = None
HTTP_USER_AGENT = None
PAYLOAD_UUID = ''
SESSION_COMMUNICATION_TIMEOUT = 300
SESSION_EXPIRATION_TIMEOUT = 604800
SESSION_RETRY_TOTAL = 3600
SESSION_RETRY_WAIT = 10
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
ERROR_FAILURE_PYTHON = 2
ERROR_FAILURE_WINDOWS = 3
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = ( 0 )
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_QWORD = (1 << 20)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1<<31)+(1<<30)+(1<<29)+(1<<19)+(1<<18)+(1<<17)+(1<<16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_COMMAND_ID = TLV_META_TYPE_UINT | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_CHANNEL_PARENTID = TLV_META_TYPE_UINT | 55
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_TRANS_TYPE = TLV_META_TYPE_UINT | 430
TLV_TYPE_TRANS_URL = TLV_META_TYPE_STRING | 431
TLV_TYPE_TRANS_UA = TLV_META_TYPE_STRING | 432
TLV_TYPE_TRANS_COMM_TIMEOUT = TLV_META_TYPE_UINT | 433
TLV_TYPE_TRANS_SESSION_EXP = TLV_META_TYPE_UINT | 434
TLV_TYPE_TRANS_CERT_HASH = TLV_META_TYPE_RAW | 435
TLV_TYPE_TRANS_PROXY_HOST = TLV_META_TYPE_STRING | 436
TLV_TYPE_TRANS_PROXY_USER = TLV_META_TYPE_STRING | 437
TLV_TYPE_TRANS_PROXY_PASS = TLV_META_TYPE_STRING | 438
TLV_TYPE_TRANS_RETRY_TOTAL = TLV_META_TYPE_UINT | 439
TLV_TYPE_TRANS_RETRY_WAIT = TLV_META_TYPE_UINT | 440
TLV_TYPE_TRANS_HEADERS = TLV_META_TYPE_STRING | 441
TLV_TYPE_TRANS_GROUP = TLV_META_TYPE_GROUP | 442
TLV_TYPE_MACHINE_ID = TLV_META_TYPE_STRING | 460
TLV_TYPE_UUID = TLV_META_TYPE_RAW | 461
TLV_TYPE_CIPHER_NAME = TLV_META_TYPE_STRING | 500
TLV_TYPE_CIPHER_PARAMETERS = TLV_META_TYPE_GROUP | 501
TLV_TYPE_PEER_HOST = TLV_META_TYPE_STRING | 1500
TLV_TYPE_PEER_PORT = TLV_META_TYPE_UINT | 1501
TLV_TYPE_LOCAL_HOST = TLV_META_TYPE_STRING | 1502
TLV_TYPE_LOCAL_PORT = TLV_META_TYPE_UINT | 1503
NULL_BYTE = '\x00'
is_str = lambda obj: issubclass(obj.__class__, str)
is_bytes = lambda obj: issubclass(obj.__class__, str)
bytes = lambda *args: str(*args[:1])
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, str) else x)
def tlv_pack(*args):
if len(args) == 2:
tlv = {'type':args[0], 'value':args[1]}
else:
tlv = args[0]
data = ''
value = tlv['value']
if (tlv['type'] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
if isinstance(value, float):
value = int(round(value))
data = struct.pack('>III', 12, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
data = struct.pack('>IIQ', 16, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
data = struct.pack('>II', 9, tlv['type']) + bytes(chr(int(bool(value))), 'UTF-8')
else:
if value.__class__.__name__ == 'unicode':
value = value.encode('UTF-8')
elif not is_bytes(value):
value = bytes(value, 'UTF-8')
if (tlv['type'] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
data = struct.pack('>II', 8 + len(value) + 1, tlv['type']) + value + NULL_BYTE
elif (tlv['type'] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_GROUP) == TLV_META_TYPE_GROUP:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_COMPLEX) == TLV_META_TYPE_COMPLEX:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
return data
def packet_enum_tlvs(pkt, tlv_type = None):
offset = 0
while (offset < len(pkt)):
tlv = struct.unpack('>II', pkt[offset:offset+8])
if (tlv_type == None) or ((tlv[1] & ~TLV_META_TYPE_COMPRESSED) == tlv_type):
val = pkt[offset+8:(offset+8+(tlv[0] - 8))]
if (tlv[1] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
val = str(val.split(NULL_BYTE, 1)[0])
elif (tlv[1] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
val = struct.unpack('>I', val)[0]
elif (tlv[1] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
val = struct.unpack('>Q', val)[0]
elif (tlv[1] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
val = bool(struct.unpack('b', val)[0])
elif (tlv[1] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
pass
yield {'type':tlv[1], 'length':tlv[0], 'value':val}
offset += tlv[0]
raise StopIteration()
def packet_get_tlv(pkt, tlv_type):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {}
return tlv
def packet_get_tlv_default(pkt, tlv_type, default):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {'value': default}
return tlv
# END OF COPY PASTE
def validate_binding(required):
"""Makes sure that the current set of bindings that is available
in Meterpreter's bindings list contains that required by the caller.
This function returns the correct binding name to call."""
# assume all core commands are valid
if required < 1000:
required = 'meterpreter_core'
else:
required = 'command_{0}'.format(required)
if not required in set(dir(meterpreter_bindings)):
raise Exception('Missing bindings: {0} (is a dependent extension not yet loaded?)'.format(required))
return required
def invoke_meterpreter(command_id, is_local, tlv = ""):
binding = validate_binding(command_id)
header = struct.pack('>I', PACKET_TYPE_REQUEST)
header += tlv_pack(TLV_TYPE_COMMAND_ID, command_id)
header += tlv_pack(TLV_TYPE_REQUEST_ID, 0)
# add a leading 4-byte "zero" for the xor-key, 16 byte null guid, 4 byte encryption flag
req = '\x00' * 24
req += struct.pack('>I', len(header) + len(tlv) + 4)
req += header + tlv
return getattr(meterpreter_bindings, binding)(is_local, req)
def rnd_string(n):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(n))
| StarcoderdataPython |
1968539 | import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
def psnr(im1, im2):
""" im1 and im2 value must be between 0 and 255"""
im1 = np.float64(im1)
im2 = np.float64(im2)
rmse = np.sqrt(np.mean(np.square(im1[:] - im2[:])))
psnr = 20 * np.log10(255 / rmse)
return psnr, rmse
def img_to_uint8(img):
img = np.clip(img, 0, 255)
return np.round(img).astype(np.uint8)
rgb_to_ycbcr = np.array([[65.481, 128.553, 24.966],
[-37.797, -74.203, 112.0],
[112.0, -93.786, -18.214]])
ycbcr_to_rgb = np.linalg.inv(rgb_to_ycbcr)
def rgb2ycbcr(img):
""" img value must be between 0 and 255"""
img = np.float64(img)
img = np.dot(img, rgb_to_ycbcr.T) / 255.0
img = img + np.array([16, 128, 128])
return img
def ycbcr2rgb(img):
""" img value must be between 0 and 255"""
img = np.float64(img)
img = img - np.array([16, 128, 128])
img = np.dot(img, ycbcr_to_rgb.T) * 255.0
return img
| StarcoderdataPython |
6586564 | import sys
import argparse
import numpy as np
import math
def isCtoT(site):
return any([(s[0]=='C' and s[-1] =='T') for s in site.split(',')])
def isGtoT(site):
return any([(s[0]=='G' and s[-1] =='T') for s in site.split(',')])
def isRare(n,cutoff):
return (n <= cutoff)
def isExtremal (s, n, cutoff, min_n_for_s, max_s_for_n):
return ((n > cutoff) and (n==min_n_for_s[s][0]) and \
(s==max_s_for_n[n][0]) and \
all([(s>v[0]) for (k,v) in max_s_for_n.items() if (k<n)]) and \
all([(n<v[0]) for (k,v) in min_n_for_s.items() if (k>s)]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Identify extremal sites ')
parser.add_argument("-in", type=str,
help="input parsimony file.")
parser.add_argument("-ignoreCtoT", type=int,
help="set to 1 to ignore C>T sites (default=0).")
parser.add_argument("-ignoreGtoT", type=int,
help="set to 1 to ignore G>T sites (default=0).")
args = vars(parser.parse_args())
parsimony_filename = args.get('in', '')
if (parsimony_filename== None):
parser.print_help()
sys.exit(1)
ignoreCtoT = args.get('ignoreCtoT', '0')
if (ignoreCtoT == None):
ignoreCtoT = '0'
ignoreCtoT = int(ignoreCtoT)
ignoreGtoT = args.get('ignoreGtoT', '0')
if (ignoreGtoT == None):
ignoreGtoT = '0'
ignoreGtoT = int(ignoreGtoT)
max_s_for_n = {}
min_n_for_s = {}
sites = {}
x = []
y = []
saturated_s = {}
with file(parsimony_filename,'r') as f:
for line in f:
if 'alt_alleles' in line:
words = line.split()
site = words[0]
if ((ignoreCtoT) and (isCtoT(site)) or \
(ignoreGtoT) and (isGtoT(site))):
continue
n = int(words[1].split('=')[1])
s = int(words[2].split('=')[1])
s_fwd = 0
if (len(words[3].split('=')) > 1):
s_fwd = len(words[3].split('=')[1].split(','))
s_bck = s - s_fwd
sites[site] = (n, s, s_fwd, s_bck)
mn = min_n_for_s.get(s, [1e6, []])
ms = max_s_for_n.get(n, [0, []])
if n == s:
saturated_s[s] = 1+saturated_s.get(s, 0)
if n == mn[0]:
mn[1].append(site)
min_n_for_s[s] = mn
if s == ms[0]:
ms[1].append(site)
max_s_for_n[n] = ms
if n < mn[0]:
min_n_for_s[s] = [n, [site]]
if s > ms[0]:
max_s_for_n[n] = [s, [site]]
cutoff = max([k for (k,v) in saturated_s.items() if (v>1)])
log_base = 2
for site in sites.keys():
(n,s,s_fwd, s_bck) = sites[site]
if (isExtremal(s, n, cutoff, min_n_for_s, max_s_for_n)):
print site, 'alt_alleles='+str(n), 'parsimony_score='+str(s),\
'parsimony_score_forward='+str(s_fwd),\
'parsimony_score_backward='+str(s_bck)
x.append(math.log(n,log_base))
y.append(s)
m,b = np.polyfit(np.array(x), np.array(y), 1)
print '\nPhylogenetic instability (log-' + str(log_base)+' slope) =', m
| StarcoderdataPython |
3582930 | #!/usr/bin/env python2.7
# Permutations of bits to figure out the proper bit order from a fully
# undocumented SoC implementation whose name if being kept secret :-)
from array import array
from binascii import unhexlify
from neo.bits import BitSequence
from neo.util import crc16
def l2a(l):
return array('B', l)
def flatten(l):
return [item for sublist in l for item in sublist]
def wordendian(l):
# 32-bit little-endian permutation
words = [l[n:n+4:1] for n in range(0,16, 4)]
return flatten([reversed(w) for w in words])
def reverse(l):
return list(reversed(l))
def niot(l):
return l + [0, 0]
def niotcrc(l):
POLYNOMIAL = 0x1021
crc = 0xffff
for x in l + [0, 0]:
j = 0x80
while j:
f = crc & 0x8000
crc <<= 1
if x & j:
crc |= 1
if f:
crc ^= POLYNOMIAL
j >>= 1
crc &= 0xffff
return crc
def show(m,l):
print "%-24s:" % m, ' '.join(["%02x" % x for x in l]), "= %04x" % niotcrc(l)
seq = range(16)
wseq = wordendian(seq)
rseq = reverse(seq)
rwseq = reverse(wseq)
bseq = [BitSequence(i, msb=True, length=8).tobyte() for i in seq]
bwseq = wordendian(bseq)
brseq = reverse(bseq)
bwrseq = reverse(bwseq)
show('natural', seq)
show('endian', wseq)
show('reverse', rseq)
show('reverse+endian', rwseq)
show('bitrev', bseq)
show('bitrev+endian', bwseq)
show('bitrev+reverse', brseq)
show('bitrev+endian+reverse', bwrseq) | StarcoderdataPython |
1806852 | from django.db.models.signals import post_save
from django.db import models
from django.contrib.auth.models import User
from videos.models import Video, Tag
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
bio = models.TextField(max_length=500, blank=True)
location = models.CharField(max_length=30, blank=True)
favorite_videos = models.ManyToManyField(Video, blank=True)
favorite_tags = models.ManyToManyField(
Tag, blank=True, related_name='favorite_tags')
exclude_tags = models.ManyToManyField(
Tag, blank=True, related_name='exclude_tags')
gender = models.CharField(max_length=35, null=True)
race = models.CharField(max_length=35, null=True)
dob = models.DateField(null=True, blank=True)
orientation = models.CharField(max_length=35, null=True)
follows = models.ManyToManyField(
'self', related_name='followers', symmetrical=False, blank=True)
friends = models.ManyToManyField(
'self', related_name='friends_with', symmetrical=False, blank=True)
autoplay = models.BooleanField(default=True)
random = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
is_uploader = models.BooleanField(default=False)
one_click_purchasing = models.BooleanField(default=False)
stripe_customer_id = models.CharField(max_length=50, blank=True, null=True)
def __str__(self):
return f'{self.user.username} Profile'
def userprofile_receiver(sender, instance, created, *args, **kwargs):
if created:
userprofile = UserProfile.objects.create(user=instance)
post_save.connect(userprofile_receiver, sender=User)
| StarcoderdataPython |
8037273 | <filename>posts/urls.py<gh_stars>1-10
from django.urls import path
from . import views
urlpatterns = [
path('home/', views.home, name="home"),
path('friends-home/', views.friends_home, name="friends-home"),
path('profile/', views.profile_without_user, name="profile"),
path('profile/<str:username>/', views.profile, name="profile"),
path('posts/', views.post, name="post"),
path('posts/<str:post_id>/like', views.like, name="like"),
path('post/<str:post_id>/', views.single_post, name="single_post"),
]
| StarcoderdataPython |
4947369 | <reponame>Karmantez/MachineLearning_Practice
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
def get_new_feature_name_df(old_feature_name_df):
feature_dup_df = pd.DataFrame(data=old_feature_name_df.groupby('column_name').cumcount(), columns=['dup_cnt'])
feature_dup_df = feature_dup_df.reset_index()
new_feature_name_df = pd.merge(old_feature_name_df.reset_index(), feature_dup_df, how='outer')
new_feature_name_df['column_name'] = new_feature_name_df[['column_name', 'dup_cnt']].apply(lambda x : x[0]+'_'+str(x[1])
if x[1] >0 else x[0] , axis=1)
new_feature_name_df = new_feature_name_df.drop(['index'], axis=1)
return new_feature_name_df
# In[3]:
def get_human_dataset( ):
# 각 데이터 파일들은 공백으로 분리되어 있으므로 read_csv에서 공백 문자를 sep으로 할당.
feature_name_df = pd.read_csv('./human_activity/features.txt',sep='\s+',
header=None,names=['column_index','column_name'])
# 중복된 feature명을 새롭게 수정하는 get_new_feature_name_df()를 이용하여 새로운 feature명 DataFrame생성.
new_feature_name_df = get_new_feature_name_df(feature_name_df)
# DataFrame에 피처명을 컬럼으로 부여하기 위해 리스트 객체로 다시 변환
feature_name = new_feature_name_df.iloc[:, 1].values.tolist()
# 학습 피처 데이터 셋과 테스트 피처 데이터을 DataFrame으로 로딩. 컬럼명은 feature_name 적용
X_train = pd.read_csv('./human_activity/train/X_train.txt',sep='\s+', names=feature_name )
X_test = pd.read_csv('./human_activity/test/X_test.txt',sep='\s+', names=feature_name)
# 학습 레이블과 테스트 레이블 데이터을 DataFrame으로 로딩하고 컬럼명은 action으로 부여
y_train = pd.read_csv('./human_activity/train/y_train.txt',sep='\s+',header=None,names=['action'])
y_test = pd.read_csv('./human_activity/test/y_test.txt',sep='\s+',header=None,names=['action'])
# 로드된 학습/테스트용 DataFrame을 모두 반환
return X_train, X_test, y_train, y_test
| StarcoderdataPython |
1797387 | <gh_stars>1-10
"""
Steve has a string, , consisting of lowercase English alphabetic
letters. In one operation, he can delete any pair of adjacent
letters with same value. For example, string "aabcc" would become
either "aab" or "bcc" after operation.
Steve wants to reduce as much as possible. To do this, he will
repeat the above operation as many times as it can be performed.
Help Steve out by finding and printing 's non-reducible form!
Note: If the final string is empty, print Empty String.
Sample Input: aaabccddd
Sample Output: abd
"""
def reduce_string(s):
"""
>>> assert(reduce_string(None) == 'Empty String')
>>> assert(reduce_string('') == 'Empty String')
>>> assert(reduce_string('abc') == 'abc')
>>> assert(reduce_string('aabbc') == 'c')
>>> assert(reduce_string('abcc') == 'ab')
>>> assert(reduce_string('aabbcc') == 'Empty String')
"""
if s is None or len(s) == 0:
return 'Empty String'
stack = []
for char in s:
if len(stack) == 0:
stack.append(char)
continue
if char == stack[-1]:
stack.pop()
else:
stack.append(char)
return 'Empty String' if len(stack) == 0 else ''.join(stack)
| StarcoderdataPython |
3273717 | # app/context_processors.py
def blogcategories(request):
from olympicvaxinfo.models import Category
return {'blogcategories': Category.objects.all().order_by('-name')} | StarcoderdataPython |
9766483 | <filename>keyboards/__init__.py
from .inlinekb import select_storage_kb
from .inlinekb import what_to_store_kb
from .inlinekb import season_things_kb
from .inlinekb import weeks_or_months_kb
from .inlinekb import pay_kb
from .inlinekb import back_kb
from .replykb import get_location_kb
__all__ = [
select_storage_kb,
what_to_store_kb,
season_things_kb,
weeks_or_months_kb,
pay_kb,
back_kb,
get_location_kb
]
| StarcoderdataPython |
1777515 | <gh_stars>0
from __future__ import division
import numpy as np
def make_load_func(plan):
def getload(t):
return plan[t-1] # ff model plans start with index 1
return getload
def g(t, tau_1, w):
'''time continuous version of g'''
return w(t) * np.exp(-t/tau_1)
def discrete_g(n, tau_1, w):
g_values = np.empty(n - 1, dtype=np.double)
for i in range(1, n):
g_values[i-1] = w(i) * np.exp(-(n-i)/tau_1)
return np.sum(g_values)
def h(t, tau_2, w):
'''time continuous version of h'''
return w(t) * np.exp(-t/tau_2)
def discrete_h(n, tau_2, w):
h_values = np.empty(n - 1, dtype=np.double)
for i in range(1, n):
h_values[i-1] = w(i) * np.exp(-(n-i)/tau_2)
return np.sum(h_values)
def p(t, initial_p, tau_1, tau_2, k_1, k_2, w):
'''time continuous performance output'''
return initial_p + k_1 * g(t, tau_1, w) - k_2 * h(t, tau_2, w)
def discrete_p(n, initial_p, tau_1, tau_2, k_1, k_2, w):
g = discrete_g(n, tau_1, w)
h = discrete_h(n, tau_2, w)
return initial_p + k_1 * g - k_2 * h
def discrete_p_curve_fit(plans, initial_p, k_1, tau_1, k_2, tau_2):
'''takes a list of plans. to be used with scipy.optimize.curve_fit'''
results = [0.0] * len(plans)
for i, plan in enumerate(plans):
w = make_load_func(plan)
n = len(plan)
g = discrete_g(n, tau_1, w)
h = discrete_h(n, tau_2, w)
results[i] = initial_p + k_1 * g - k_2 * h
return results
def leistungs_entwicklung(n, initial_p, tau_1, tau_2, k_1, k_2, w):
p = []
for i in range(1, n+1):
g = discrete_g(i, tau_1, w)
h = discrete_h(i, tau_2, w)
p.append(initial_p + k_1 * g - k_2 * h)
return p
def performance_over_time(plan, initial_p, k_1, tau_1, k_2, tau_2):
n = len(plan)
p_values = np.empty(n, dtype=np.double)
w = make_load_func(plan)
for i in range(1, n+1):
g = discrete_g(i, tau_1, w)
h = discrete_h(i, tau_2, w)
p_values[i-1] = (initial_p + k_1 * g - k_2 * h)
return p_values
def performance_over_time2(plan, parms):
return performance_over_time(plan,
parms[0],
parms[1],
parms[2],
parms[3],
parms[4])
def after_plan(plan, **kwargs):
w = make_load_func(plan)
return discrete_p(len(plan),
kwargs['initial_p'],
kwargs['tau_1'],
kwargs['tau_2'],
kwargs['k_1'],
kwargs['k_2'],
w)
def examplenew():
plan1 = [0.0, 0.0, 0.0, 0.0, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.9, 0.0]
initial_p = 0.2
tau_1 = 10
tau_2 = 6
k_1 = 0.15
k_2 = 0.13
e = performance_over_time(plan1, initial_p, tau_1, tau_2, k_1, k_2)
for v in e:
print(v)
def example():
plan1 = [0.0, 0.0, 0.0, 0.0, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
plan2 = [0.9, 0.9, 0.9, 0.0, 0.9, 0.9, 0.0,
0.9, 0.9, 0.9, 0.0, 0.9, 0.9, 0.0,
0.9, 0.9, 0.9, 0.0, 0.9, 0.9, 0.0,
0.9, 0.9, 0.9, 0.0, 0.9, 0.9, 0.0]
plan3 = [0.9, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
w = make_load_func(plan3)
n = len(plan3)
initial_p = 0.2
tau_1 = 10
tau_2 = 6
k_1 = 0.15
k_2 = 0.13
e = leistungs_entwicklung(n, initial_p, tau_1, tau_2, k_1, k_2, w)
for v in e:
print(v)
def example2():
plan = [0.05, 0.05, 0.05, 0.0, 0.05, 0.05, 0.0,
0.05, 0.05, 0.05, 0.0, 0.05, 0.00, 0.0]
w = make_load_func(plan)
n = len(plan)
initial_p = 0.1
k_1 = 0.242
tau_1 = 45.2
k_2 = 0.372
tau_2 = 11.3
e = leistungs_entwicklung(n, initial_p, tau_1, tau_2, k_1, k_2, w)
for v in e:
print(v)
if __name__ == '__main__':
examplenew()
| StarcoderdataPython |
36106 | <gh_stars>1-10
import os
import cv2
from ReceiptGenerator.draw_receipt import create_crnn_sample
NUM_OF_TRAINING_IMAGES = 3000
NUM_OF_TEST_IMAGES = 1000
TEXT_TYPES = ['word', 'word_column', 'word_bracket', 'int', 'float', 'price_left', 'price_right', 'percentage']
# TEXT_TYPES = ['word']
with open('./ReceiptProcessor/training_images/Train/sample.txt', 'w') as input_file:
for type in TEXT_TYPES:
if not os.path.exists('./ReceiptProcessor/training_images/Train/{}'.format(type)):
os.mkdir('./ReceiptProcessor/training_images/Train/{}'.format(type))
for i in range(0, NUM_OF_TRAINING_IMAGES):
img, label = create_crnn_sample(type)
cv2.imwrite('./ReceiptProcessor/training_images/Train/{}/{}.jpg'.format(type, i), img)
input_file.write('{}/{}.jpg {}\n'.format(type, i, label))
with open('./ReceiptProcessor/training_images/Test/sample.txt', 'w') as input_file:
for type in TEXT_TYPES:
if not os.path.exists('./ReceiptProcessor/training_images/Test/{}'.format(type)):
os.mkdir('./ReceiptProcessor/training_images/Test/{}'.format(type))
for i in range(0, NUM_OF_TEST_IMAGES):
img, label = create_crnn_sample(type)
cv2.imwrite('./ReceiptProcessor/training_images/Test/{}/{}.jpg'.format(type, i), img)
input_file.write('{}/{}.jpg {}\n'.format(type, i, label))
| StarcoderdataPython |
1700464 | <reponame>Zylphrex/drakma
from django.conf import settings
from django.db import models
from api.models import Account
class CurrentAccount(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, unique=True, on_delete=models.CASCADE)
account = models.ForeignKey(Account, on_delete=models.CASCADE)
def __str__(self):
return f'{self.user} - {self.account}'
| StarcoderdataPython |
11264454 | from js9 import j
from zerorobot.template.base import TemplateBase
class IpmiClient(TemplateBase):
version = '0.0.1'
template_name = "ipmi_client"
def __init__(self, name=None, guid=None, data=None):
super().__init__(name=name, guid=guid, data=data)
def validate(self):
# client instance already exists
if self.name in j.clients.zboot.list():
return
# create the client instance
bmc = self.data.get('bmc')
if not bmc:
raise ValueError("no bmc specified in service data")
user = self.data.get('user')
if not user:
raise ValueError("no user specified in service data")
password = self.data.get('password')
if not password:
raise ValueError("no password specified in service data")
data = {
'bmc': bmc,
'user': user,
'password_': password,
'port': self.data.get('port')
}
_ = j.clients.ipmi.get(instance=self.name, data=data, interactive=False)
def delete(self):
"""
delete the client configuration
"""
j.clients.ipmi.delete(self.name)
# call the delete of the base class
super().delete()
| StarcoderdataPython |
3329720 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import os
import re
import subprocess
import sys
from string import Template
DOCKER_IMAGE_NAME_RE = re.compile(r"^([a-zA-Z0-9_.]+/)?[a-zA-Z0-9_.]+$")
DOCKER_IMAGE_TAG_RE = re.compile(r"^[a-zA-Z0-9_.]+$")
ARCHIVE_NAME_VALID_CHAR_RE = re.compile(r"^[a-zA-Z0-9_]")
def docker_image_str(v):
if DOCKER_IMAGE_NAME_RE.match(v):
return v
else:
raise argparse.ArgumentTypeError("'{}' is not a valid Docker image name".format(v))
def docker_tag_str(v):
if DOCKER_IMAGE_TAG_RE.match(v):
return v
else:
raise argparse.ArgumentTypeError("'{}' is not a valid Docker tag name".format(v))
def run(args=[], wd=os.getcwd(), verbose=False):
args_str = " ".join(args)
if verbose:
print "--- Running '{}'...".format(args_str)
returncode = subprocess.call(args, cwd=wd)
sys.stdout.flush()
if returncode != 0:
print "--- Error while running '{}'! See above for details".format(args_str)
return False
else:
return True
else:
try:
output = subprocess.check_output(
args,
stderr=subprocess.STDOUT,
cwd=wd)
return True
except subprocess.CalledProcessError, e:
print "--- Error while running '{}'! See below for details".format(args_str)
print e.output
print "---"
return False
def templated_run(templated_args=[], cfg_dict={}, wd=os.getcwd(), verbose=False):
args = []
for templates_arg in templated_args:
arg = Template(templates_arg).substitute(**cfg_dict)
args.append(arg)
return run(args=args, wd=wd, verbose=verbose)
def load_configuration(cfg_file="package.json"):
# Loading the configuration file
cfg_dict = dict()
with open(cfg_file) as cfg_file_data:
cfg_dict = json.load(cfg_file_data)
# Setupping the 'file_dir' variable
file_dir = os.path.dirname(os.path.abspath(cfg_file))
docker_file_dir = file_dir
if sys.platform == "win32":
drive, path = os.path.splitdrive(file_dir)
drive_letter = drive.replace(":","").lower()
path_to = path.replace("\\","/")
docker_file_dir = "/" + drive_letter + path_to
cfg_dict["file_dir"] = file_dir
cfg_dict["docker_file_dir"] = docker_file_dir
cfg_dict["file"] = cfg_file
return cfg_dict
def create_args_parser(cfg_dict):
# Parse command line arguments
args_parser = argparse.ArgumentParser(
description="Build '{}' Docker image".format(cfg_dict["name"]),
epilog="Configuration (incl. default parameters value) are loaded from '{}'".format(cfg_dict["file"]))
args_parser.add_argument(
"--name",
dest="image_name",
type=docker_image_str,
help="Docker image name (default is '%(default)s')")
args_parser.add_argument(
"--tag",
dest="image_tag",
type=docker_tag_str,
help="Docker image tag (default is '%(default)s')")
args_parser.add_argument(
"--version",
help="Version (default is '%(default)s')")
args_parser.add_argument(
"--build",
help="Build identifier (default is '%(default)s')",
default="internal")
args_parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Increase verbosity")
args_parser.add_argument(
"--skip_build",
dest="do_build",
action="store_false",
help="Skip the image build (make sure the image has been built before)")
if "image_test" in cfg_dict:
args_parser.add_argument(
"--test",
dest="test",
action="store_true",
help="Test the image")
args_parser.add_argument(
"--save",
dest="save",
action="store_true",
help="Save the image as a '.tar'")
args_parser.add_argument(
"--push",
dest="push",
action="store_true",
help="Push the image to Docker Hub")
return args_parser
def parse_args(args_parser, cfg_dict):
args_parser.set_defaults(**cfg_dict)
cfg_dict.update(vars(args_parser.parse_args()))
cfg_dict["full_image_name"] = cfg_dict["image_name"] + ":" + cfg_dict["image_tag"]
cfg_dict["image_out_file"] = os.path.join(
"out",
"{}_{}_{}.tar".format(
"".join(c if ARCHIVE_NAME_VALID_CHAR_RE.match(c) else "_" for c in cfg_dict["image_name"]),
"".join(c if ARCHIVE_NAME_VALID_CHAR_RE.match(c) else "_" for c in cfg_dict["image_tag"]),
"".join(c if ARCHIVE_NAME_VALID_CHAR_RE.match(c) else "" for c in cfg_dict["build"])))
return cfg_dict
def build(cfg_file="package.json"):
# Load the configuration
cfg_dict = load_configuration(cfg_file)
# Create the cli agument parser
args_parser = create_args_parser(cfg_dict)
## Parse the cli arguments
cfg_dict = parse_args(args_parser, cfg_dict)
print ("Building docker image for '{}', version '{}' ({})...".format(
cfg_dict["name"],
cfg_dict["version"],
cfg_dict["build"]))
if cfg_dict["do_build"]:
if not templated_run(
templated_args=["docker", "build", "-t", "${full_image_name}", "."],
cfg_dict=cfg_dict,
wd=cfg_dict["file_dir"],
verbose=cfg_dict["verbose"]):
exit(1)
print ("-- Docker image '{}' built successfully".format(cfg_dict["full_image_name"]))
if "image_test" in cfg_dict and cfg_dict["test"]:
success = True
for docker_test_raw_args in cfg_dict["image_test"]:
if not templated_run(
templated_args=docker_test_raw_args,
cfg_dict=cfg_dict,
wd=cfg_dict["file_dir"],
verbose=cfg_dict["verbose"]):
success = False
if not success:
exit(2)
print ("-- Docker image '{}' tested successfully".format(image_name))
if cfg_dict["save"]:
image_package_path = os.path.join(cfg_dict["file_dir"], cfg_dict["image_out_file"])
if not os.path.exists(os.path.dirname(image_package_path)):
os.makedirs(os.path.dirname(image_package_path))
if not templated_run(
templated_args=["docker", "save", "-o", "${docker_file_dir}/${image_out_file}", "${full_image_name}"],
cfg_dict=cfg_dict,
wd=cfg_dict["file_dir"],
verbose=cfg_dict["verbose"]):
exit(3)
print ("-- Docker image successfully saved to '{}'".format(image_package_path))
if cfg_dict["push"]:
if not templated_run(
templated_args=["docker", "push", "${full_image_name}"],
cfg_dict=cfg_dict,
wd=cfg_dict["file_dir"],
verbose=cfg_dict["verbose"]):
exit(4)
print ("-- Docker image successfully pushed to Docker Hub")
| StarcoderdataPython |
6706344 | <reponame>ulope/raiden-contracts
import pytest
from eth_tester.exceptions import TransactionFailed
from raiden_contracts.constants import EVENT_TOKEN_NETWORK_CREATED
from .fixtures.config import (
raiden_contracts_version,
empty_address,
fake_address,
)
from raiden_contracts.utils.events import check_token_network_created
from web3.exceptions import ValidationError
def test_version(token_network_registry_contract):
assert (token_network_registry_contract.functions.contract_version().call()[:2]
== raiden_contracts_version[:2])
def test_constructor_call(
web3,
get_token_network_registry,
secret_registry_contract,
get_accounts
):
A = get_accounts(1)[0]
chain_id = int(web3.version.network)
with pytest.raises(TypeError):
get_token_network_registry([])
with pytest.raises(TypeError):
get_token_network_registry([3, chain_id])
with pytest.raises(TypeError):
get_token_network_registry([0, chain_id])
with pytest.raises(TypeError):
get_token_network_registry(['', chain_id])
with pytest.raises(TypeError):
get_token_network_registry([fake_address, chain_id])
with pytest.raises(TypeError):
get_token_network_registry([secret_registry_contract.address, ''])
with pytest.raises(TypeError):
get_token_network_registry([secret_registry_contract.address, '1'])
with pytest.raises(TypeError):
get_token_network_registry([secret_registry_contract.address, -3])
with pytest.raises(TransactionFailed):
get_token_network_registry([empty_address, chain_id])
with pytest.raises(TransactionFailed):
get_token_network_registry([A, chain_id])
with pytest.raises(TransactionFailed):
get_token_network_registry([secret_registry_contract.address, 0])
get_token_network_registry([secret_registry_contract.address, chain_id])
def test_constructor_call_state(web3, get_token_network_registry, secret_registry_contract):
chain_id = int(web3.version.network)
registry = get_token_network_registry([secret_registry_contract.address, chain_id])
assert secret_registry_contract.address == registry.functions.secret_registry_address().call()
assert chain_id == registry.functions.chain_id().call()
def test_create_erc20_token_network_call(
token_network_registry_contract,
custom_token,
get_accounts
):
A = get_accounts(1)[0]
fake_token_contract = token_network_registry_contract.address
with pytest.raises(ValidationError):
token_network_registry_contract.functions.createERC20TokenNetwork().transact()
with pytest.raises(ValidationError):
token_network_registry_contract.functions.createERC20TokenNetwork(3).transact()
with pytest.raises(ValidationError):
token_network_registry_contract.functions.createERC20TokenNetwork(0).transact()
with pytest.raises(ValidationError):
token_network_registry_contract.functions.createERC20TokenNetwork('').transact()
with pytest.raises(ValidationError):
token_network_registry_contract.functions.createERC20TokenNetwork(fake_address).transact()
with pytest.raises(TransactionFailed):
token_network_registry_contract.functions.createERC20TokenNetwork(empty_address).transact()
with pytest.raises(TransactionFailed):
token_network_registry_contract.functions.createERC20TokenNetwork(A).transact()
with pytest.raises(TransactionFailed):
token_network_registry_contract.functions.createERC20TokenNetwork(
fake_token_contract
).transact()
token_network_registry_contract.functions.createERC20TokenNetwork(
custom_token.address
).transact()
def test_create_erc20_token_network(
register_token_network,
token_network_registry_contract,
custom_token,
get_accounts
):
assert token_network_registry_contract.functions.token_to_token_networks(
custom_token.address).call() == empty_address
token_network = register_token_network(custom_token.address)
assert token_network.functions.token().call() == custom_token.address
secret_registry_address = token_network_registry_contract.functions.secret_registry_address().call() # noqa
assert token_network.functions.secret_registry().call() == secret_registry_address
assert (token_network.functions.chain_id().call()
== token_network_registry_contract.functions.chain_id().call())
def test_events(
register_token_network,
token_network_registry_contract,
custom_token,
event_handler
):
ev_handler = event_handler(token_network_registry_contract)
new_token_network = register_token_network(custom_token.address)
ev_handler.add(
None,
EVENT_TOKEN_NETWORK_CREATED,
check_token_network_created(custom_token.address, new_token_network.address)
)
ev_handler.check()
| StarcoderdataPython |
3316359 | """
独立出来的标尺编辑窗口类。架空主程序中相关部分。
2018.12.14修改,将main设为可选,保证可以从数据库独立调用。
"""
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import Qt
from .rulerTabWidget import RulerTabWidget
from .data.line import Line, Ruler
class RulerWidget(QtWidgets.QTabWidget):
okClicked = QtCore.pyqtSignal()
showStatus = QtCore.pyqtSignal(str)
def __init__(self, line: Line, main=None):
super().__init__()
self.line = line
self.main = main
self.updating = False
self.initUI()
def initUI(self):
"""
初始化数据。
"""
self.updating = True
self.clear()
line = self.line
for ruler in line.rulers:
self._addRulerTab(ruler)
new_ruler = Ruler(line=line)
self._addRulerTab(new_ruler)
self.updating = False
def setData(self):
"""
2019.07.19新增。
标尺数量可能变化,更新内容。由刷新操作调用。
"""
self.updating = True
cnt = len(self.line.rulers)
while self.count() > cnt + 1:
self.removeTab(self.count() - 1)
# 先更新既有的widget
for i, ruler in enumerate(self.line.rulers):
if i < self.count():
widget = self.widget(i)
widget.ruler = ruler
self._updateRulerTabWidget(widget)
self.setTabText(i, ruler.name())
else:
self._addRulerTab(ruler)
if self.count() != len(self.line.rulers) + 1:
# 差最后一个“新建”,补上。
self._addRulerTab(Ruler(line=self.line, name='新建'))
else:
widget = self.widget(self.count() - 1)
widget.ruler = Ruler(name='新建', line=self.line)
self.setTabText(self.count() - 1, '新建')
self._updateRulerTabWidget(widget)
self.updating = False
def updateRulerTabs(self):
"""
已知标尺不增减,只有内部变化,更新所有标尺的标签页。
"""
for i in range(self.count()):
widget = self.widget(i)
self._updateRulerTabWidget(widget)
def _addRulerTab(self, ruler):
"""
主要逻辑移动到RulerTabWidget.initUI()中去。这里主要是实例化和connect
"""
widget = RulerTabWidget(ruler, self.main)
tabname = ruler.name()
if not tabname:
tabname = "新建"
self.addTab(widget, tabname)
widget.newRulerAdded.connect(self._addRulerTab)
widget.tabNameChanged.connect(self._current_ruler_name_changed)
widget.rulerDeleted.connect(self._del_ruler)
def _updateRulerTabWidget(self, widget: RulerTabWidget):
"""
转移到新的 updateData()
"""
widget.updateData()
@staticmethod
def _tableRowInterval(tableWidget: QtWidgets.QTableWidget, row: int):
"""
返回某一行对应的区间。
"""
try:
return tableWidget.item(row, 0).data(-1)
except:
return None
# slots
def _current_ruler_name_changed(self, name: str):
self.setTabText(self.currentIndex(), name)
def _del_ruler(self):
"""
2020.06.08重构:不再通过ruler参数定位,而是直接用当前显示的tab。
"""
tab = self.currentWidget()
ruler: Ruler = tab.ruler
line: Line = ruler.line()
new = line.isNewRuler(ruler)
if not self.qustion("是否确认删除当前标尺?"):
return
if self.main is not None and ruler is self.main.graph.ordinateRuler():
# 若是排图标尺,取消排图标尺
self.main.changeOrdinateRuler(None)
self.removeTab(self.currentIndex())
line.delRuler(ruler)
if new:
# 如果是新建标尺,则再新建一个tab
new_ruler = Ruler(line=line)
self._addRulerTab(new_ruler)
def _derr(self, note: str):
# print("_derr")
QtWidgets.QMessageBox.warning(self, "错误", note)
def _dout(self, note: str):
QtWidgets.QMessageBox.information(self, "提示", note)
def qustion(self, note: str, default=True):
flag = QtWidgets.QMessageBox.question(self, '标尺编辑', note,
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if flag == QtWidgets.QMessageBox.Yes:
return True
elif flag == QtWidgets.QMessageBox.No:
return False
else:
return default
| StarcoderdataPython |
5107033 | <filename>organice/bin/__init__.py
"""
Scripts for managing the django Organice project.
"""
| StarcoderdataPython |
325790 | # BSD 2-Clause License
#
# Copyright (c) 2020, <NAME> (<EMAIL>)
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pylivemaker string extraction tool"""
import shutil
import sys
import csv
from pathlib import Path
import click
from livemaker.lsb import LMScript
from livemaker.lsb.command import CommandType
from livemaker.exceptions import LiveMakerException
@click.command()
@click.argument("lsb_file", required=True, type=click.Path(exists=True, dir_okay="False"))
@click.argument("csv_file", required=True, type=click.Path(exists=True, dir_okay="False"))
@click.option(
"-e",
"--encoding",
type=click.Choice(["cp932", "utf-8", "utf-8-sig"]),
default="utf-8",
help="Input text encoding (defaults to utf-8).",
)
@click.option("--no-backup", is_flag=True, default=False, help="Do not generate backup of original lsb file.")
def insert_strings(lsb_file, csv_file, encoding, no_backup):
"""Insert strings from the given CSV file to a given LSB file.
CSV_FILE should be a file previously created by the extractstrings command, with added translations.
--encoding option must match the values were used for extractstrings.
The original LSB file will be backed up to <lsb_file>.bak unless the --no-backup option is specified.
NOTE: be very careful with translating strings. Changing the wrong text can break game functionality!
Lines you would not translate have to be left blank.
"""
lsb_file = Path(lsb_file)
print("Patching {} ...".format(lsb_file))
with open(lsb_file, "rb") as f:
try:
lsb: LMScript = LMScript.from_file(f)
except LiveMakerException as e:
sys.exit("Could not open LSB file: {}".format(e))
csv_data = []
with open(csv_file, newline="\n", encoding=encoding) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=",", quotechar='"')
for row in csv_reader:
csv_data.append(row)
translated = 0
for c in lsb.commands:
calc = c.get("Calc")
if calc:
for s in calc["entries"]:
op = s["operands"][0]
if op["type"] == "Str":
for line in csv_data:
if len(line) < 4: continue
if line[3] == "": continue
if line[4] == "": continue
if line[0] == "pylm:string:{}:{}:{}".format(lsb_file, c.LineNo, s["name"]):
op.value = line[4]
translated += 1
if not translated:
return
if not no_backup:
print("Backing up original LSB.")
shutil.copyfile(str(lsb_file), "{}.bak".format(str(lsb_file)))
try:
new_lsb_data = lsb.to_lsb()
with open(lsb_file, "wb") as f:
f.write(new_lsb_data)
print("Wrote new LSB.")
except LiveMakerException as e:
sys.exit("Could not generate new LSB file: {}".format(e))
if __name__ == "__main__":
insert_strings()
| StarcoderdataPython |
3458074 | import os, re, yaml
from mdfile import MdFile
RE_MDLINK = r'(?<=\[{2})(.*?)(?=\]{2})' # [[link]]
RE_MDFM = r'(?<=(\-{3}))(.*?)(?=(\-{3}))' # front matter yaml
class MdParser():
def __init__(self, target_dir):
self.pages = []
self.target_dir = target_dir
# parse markdown front matter (yaml)
def parse_frontmatter(self, content):
flags = re.MULTILINE + re.IGNORECASE + re.DOTALL
fm = re.search(RE_MDFM, content, flags=flags).group(0)
return yaml.safe_load(fm)
# grab all the information needed from the markdown file
def parse_md(self, file_name):
base_name = os.path.splitext(os.path.basename(file_name))[0]
with open(file_name, 'r') as f:
content = f.read()
try:
title = self.parse_frontmatter(content)['title']
except AttributeError:
title = base_name
links = re.findall(RE_MDLINK, content, flags=re.MULTILINE)
return MdFile(file_name, base_name, title, links)
# parse all markdown files in directory
def parse(self):
uid = 1
# parse each markdown file
for subdir, dirs, files in os.walk(self.target_dir):
for f in files:
if f.endswith('md'):
path = os.path.join(subdir, f)
if not any(x for x in self.pages if x.file_path == path):
md = self.parse_md(path)
md.uid = uid
uid += 1
self.pages.append(md)
# replace mdlinks with uids for future lookup
for page in self.pages:
uids = []
for link in page.mdlinks:
uid = list(filter(lambda x: x.base_name == link, self.pages))
if len(uid) > 0:
uids.append(uid[0].uid)
page.mdlinks = uids
return self.pages
| StarcoderdataPython |
3235228 | from spinnman.messages.eieio.data_messages.eieio_data_message\
import EIEIODataMessage
from spinnman.exceptions import SpinnmanInvalidParameterException
from spinnman.messages.eieio.data_messages.eieio_key_payload_data_element \
import EIEIOKeyPayloadDataElement
class EIEIOWithPayloadDataMessage(EIEIODataMessage):
""" An EIEIO message with a payload
"""
def __init__(self, eieio_header, data=None, offset=0):
EIEIODataMessage.__init__(self, eieio_header, data, offset)
if eieio_header.eieio_type.payload_bytes == 0:
raise SpinnmanInvalidParameterException(
"eieio_header", eieio_header,
"This message should have a payload, but the header indicates"
" that it doesn't")
def add_key_and_payload(self, key, payload):
""" Adds a key and payload to the packet
:param key: The key to add
:type key: int
:param payload: The payload to add
:type payload: int
:raise SpinnmanInvalidParameterException: If the key or payload is too\
big for the format
"""
if key > self._eieio_header.eieio_type.max_value:
raise SpinnmanInvalidParameterException(
"key", key,
"Larger than the maximum allowed of {}".format(
self._eieio_header.eieio_type.max_value))
if payload > self._eieio_header.eieio_type.max_value:
raise SpinnmanInvalidParameterException(
"payload", payload,
"Larger than the maximum allowed of {}".format(
self._eieio_header.eieio_type.max_value))
EIEIODataMessage.add_element(
self, EIEIOKeyPayloadDataElement(key, payload,
self._eieio_header.is_time))
| StarcoderdataPython |
3486044 | import time
import unittest
from tests.integration.programs import validations as validations_module
class TestGooeyIntegration(unittest.TestCase):
"""
A few quick integration tests that exercise Gooey's various run modes
WX Python needs to control the main thread. So, in order to simulate a user
running through the system, we have to execute the actual assertions in a
different thread
"""
def test__gooeyValidation(self):
"""Verifies that custom validation routines supplied via gooey_options prevents
the user from advancing past the configuration page when they fail"""
from gooey.tests.integration import runner
runner.run_integration(validations_module, self.verifyValidators)
def verifyValidators(self, app, buildSpec):
time.sleep(1)
try:
app.TopWindow.onStart()
title = app.TopWindow.header._header.GetLabel()
subtitle = app.TopWindow.header._subheader.GetLabel()
self.assertNotEqual(title, buildSpec['program_name'])
self.assertNotEqual(subtitle, buildSpec['program_description'])
except:
app.TopWindow.Destroy()
raise
else:
import wx
wx.CallAfter(app.TopWindow.Destroy)
return None
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5070524 | from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, TextAreaField, SubmitField, SelectMultipleField
from wtforms.validators import DataRequired, Email
from ..models import User
class ProfileUpdate(FlaskForm):
profile_bio=TextAreaField('Tell us about yourself.', validators=[DataRequired()])
submit=SubmitField('Save')
class CommentForm(FlaskForm):
comment = TextAreaField('Leave a comment',validators=[DataRequired()])
submit = SubmitField('Comment')
class BlogForm(FlaskForm):
title=TextAreaField('Title', validators=[DataRequired()])
category=SelectMultipleField('Select Category', choices=[('Politics', 'Politics'), ('Sports', 'Sports'), ('Celebrity-gossip', 'Celebrity-gossip')], validators=[DataRequired()])
post= TextAreaField('Enter your blog', validators=[DataRequired()])
submit=SubmitField('Create') | StarcoderdataPython |
4843639 | from libs.generate_fingerprint import fingerprint
from libs.constants import *
from itertools import zip_longest
from libs.db import get_conn
import math
def find_matches(channel, sampling_rate=DEFAULT_SAMPLING_RATE, args='remote'):
"""Matches audio fingerprints.
Fingerprints of an audio channel is matched against the stored fingerprints in the database.
Args:
channel:
An audio channel. Array of bytes.
sampling_rate:
Number of samples per second taken to construct a discrete signal.
args:
Either 'localhost' to connect to localhost server or 'remote'
Yields:
song_id: Song id of matched fingerprint.
"""
hashes = fingerprint(channel, sampling_rate)
mapper = {}
for hash_val, offset in hashes:
mapper[hash_val.upper()] = offset
values = mapper.keys()
if values is None:
print("no values")
else:
conn, cur = get_conn(args)
counter = 0
print("\nTaking step length of {}\n".format(MATCH_STEP_LENGTH))
for split_values in grouper(values, MATCH_STEP_LENGTH):
counter += 1
query = '''
SELECT upper(hash), song_id
FROM fingerprints
WHERE upper(hash) IN (%s)
'''
split_values = list(split_values)
lis = ['%s'] * len(split_values)
query = query % ', '.join(lis)
x = cur.execute(query, split_values)
val = ()
if x > 0:
val = cur.fetchall()
matches_found = len(val)
if matches_found > 0:
msg = "\tFound {a} hash matches at step {b}/{c}"
print(msg.format(a=matches_found, b=counter, c=math.ceil(len(values)/MATCH_STEP_LENGTH)))
else:
msg = "\tNo hash matches found at step {b}/{c}"
print(msg.format(b=counter, c=math.ceil(len(values)/MATCH_STEP_LENGTH)))
for hashs, song_id in val:
yield [song_id]
cur.close()
def grouper(iterable, n, fill_value=None):
"""Generates iterator.
Generates iterables of fingerprints.
Args:
iterable:
List of objects to generate iterator.
n:
Number of iterables
fill_value:
A value placed in case of missing value from the iterable
Returns:
iterator: Aggregated elements of each iterable
"""
args = [iter(iterable)] * n
return (filter(None, values) for values
in zip_longest(fillvalue=fill_value, *args))
| StarcoderdataPython |
1619079 | <reponame>odoochain/addons_oca<filename>addons14/knowledge_attachment_category/__manifest__.py
{
"name": "Knowledge Attachment Category",
"summary": "Glue module between knowledge and attachment_category",
"version": "14.0.1.0.0",
"category": "Knowledge",
"website": "https://github.com/OCA/knowledge",
"author": " Akretion, Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"auto_install": True,
"depends": [
"knowledge",
"attachment_category",
],
"data": ["views/knowledge.xml", "views/menu.xml"],
}
| StarcoderdataPython |
5137246 | <filename>code.py
import time
import analogio
import digitalio
import board
sample_speed = .01
tcslip_time = (0.0, 6.7, 13.3, 20.0)
tcslip_retard = (0.0, 3.3, 6.7, 10.0)
slip_percent = .10
tc_active_above = 50
slip_window_min = .01
slip_window_max = .05
slip_window = 0.01
magic_number = 200
my_slip = 0
my_retard = 0
button = digitalio.DigitalInOut(board.A2)
button.switch_to_input(pull=digitalio.Pull.UP)
def percentage_calculator(num1, num2):
if(num2 < tc_active_above or num2 > num1):
return 0
else:
return (abs(num1 - num2)/((num1 + num2)/2) * 100)
def tc_retard_calc(current_slip_time):
if(current_slip_time > slip_percent and current_slip_time < tcslip_time[1] and current_slip_time > tcslip_time[0]):
print("level one retard " + str(tcslip_retard[1]) + " " + str(tcslip_time[1]))
return current_slip_time*(tcslip_retard[1] / tcslip_time[1])
elif(current_slip_time > slip_percent and current_slip_time < tcslip_time[2] and current_slip_time > tcslip_time[1]):
print("level two retard " + str(tcslip_retard[2]) + " " + str(tcslip_time[2]))
return current_slip_time*(tcslip_retard[2] / tcslip_time[2])
elif(current_slip_time > slip_percent and current_slip_time < tcslip_time[3] and current_slip_time > tcslip_time[2]):
print("level three retard " + str(tcslip_retard[3]) + " " + str(tcslip_time[3]))
return current_slip_time*(tcslip_retard[3] / tcslip_time[3])
else:
return 0
while True:
if button.value:
slip_window = .01
if not button.value:
offset = magic_number / 65532
vss1 = analogio.AnalogIn(board.A0)
vss2 = analogio.AnalogIn(board.A1)
vss1_mph = vss1.value * offset
vss2_mph = vss2.value * offset
if(vss1_mph > 0 and vss2_mph > 0):
my_slip = ((percentage_calculator(vss1_mph, vss2_mph)) * slip_window)
# calculate the rolling slip window
if(my_slip > slip_percent and slip_window < slip_window_max):
slip_window = slip_window + .01
elif(my_slip < slip_percent and slip_window > slip_window_min):
slip_window = slip_window - .01
if(my_slip > slip_percent):
my_retard = tc_retard_calc(my_slip*100)
else:
my_retard = 0
if(my_retard > 0):
print("vss1:[%0.2f] vss2:[%0.2f] slip:[%0.2f] retard:[%0.2f] window:[%0.2f]" %
(vss1_mph, vss2_mph, my_slip, my_retard, slip_window))
# print("(%0.2f,%0.2f,%0.2f,%0.2f)" %
# (vss1_mph, vss2_mph, my_slip, my_retard))
vss1.deinit()
vss2.deinit()
time.sleep(sample_speed) | StarcoderdataPython |
11318584 | """
* Assignment: <NAME>
* Complexity: medium
* Lines of code: 20 lines
* Time: 21 min
English:
TODO: English Translation
X. Run doctests - all must succeed
Polish:
1. Z podanego powyżej adresu URL pobierz dane
2. Dla każdego gatunku
3. Dane stosunku `sepal_length` do `sepal_width` zwizualizuj w formie `scatter` za pomocą `matplotlib`
4. Każdy gatunek powinien mieć inny kolor
5. Uruchom doctesty - wszystkie muszą się powieść
Hints:
* `pd.groupby()`
"""
import matplotlib.pyplot as plt
import pandas as pd
DATA = 'https://python.astrotech.io/_static/iris.csv'
| StarcoderdataPython |
11347924 | import Question
from Question_Class import Question_Class
# We are building questions to answers
questionsandAnswers = [Question_Class(Question.questionsList[0], "a"),
Question_Class(Question.questionsList[1], "b"),
Question_Class(Question.questionsList[2], "b")
]
# print(questionsandAnswers[0])
def take_test(questionsandAnswers):
score = 0
for question in questionsandAnswers:
answer = input(question.questionActual)
if(answer == question.response):
score += 1
print("Total score is "+str(score)+"/3")
take_test(questionsandAnswers)
| StarcoderdataPython |
3243537 | from flask import Flask, render_template, request, flash, redirect, url_for
from flask_login import LoginManager, UserMixin, current_user, login_user, logout_user
from flask_sqlalchemy import SQLAlchemy
from models import *
import os
ALLOWED_EXTENSIONS = set(['jpg'])
app = Flask(__name__, static_url_path='/static')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.abspath('cf-challenge.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
login_manager = LoginManager(app)
db = SQLAlchemy(app)
| StarcoderdataPython |
5193758 | <gh_stars>1-10
from datetime import datetime
import numpy as np
from pytz import timezone
from thermophysical import p_atm, \
T_env, \
d_from_p_t, \
d_from_p_sl, \
h_from_p_sl, \
h_from_p_sv, \
r_from_p_sl
# liquefier data
v_linde_dewar_L_hr = 54.4 # production rate [L/hr]
p_linde_dewar_gauge_psi = 3.2 # pressure in the dewar [psi gauge]
p_linde_dewar = 101325 + p_linde_dewar_gauge_psi * 6894.76 # pressure in the dewar [Pa]
t_rampup_linde_cold = 1 * 3600 # time required to ramp linde production from cold state [s]
t_rampup_linde_warm = 3 * 3600 # time required to ramp linde production from warm state [s]
t_linde_warmup_threshold = 2 * 24 * 3600 # max off time after which full warmup needed
t_linde_warmup = 2 * 24 * 3600 # full warmup time, in addition to the threshold
V_linde_dewar_min_safe_L = 100 # minimal safe level in the dewar (stop fills when drops below) [L]
V_linde_dewar_min_okay_L = 110 # minimal level in the dewar at which fills can be started [L]
V_linde_dewar_max_L = 900 # maximal level in the dewar [L]
V_linde_dewar_start_L = 500 # threshold of dewar level for NOT starting linde if it's not running already [L]
x_linde_dewar_loss_day = 0.5 / 100 # dewar liquid helium loss per day
x_linde_production_transfer = 47.0 / 100 # max reduction of production at max withdrawal rate
x_linde_dewar_fill_loss = 0.0 / 100 # losses when filling dewars as a fraction of what lands into dewar
v_dewar_pull_run_L_hr = 79.2 # max dewar withdrawal rate while linde is running [L/hr]
v_dewar_pull_off_L_hr = 120 # max dewar withdrawal rate rate while linde is off [L/hr]
m_linde_loss_g_s = 0.01 # rate of helium loss when liquefier is running [g/s]
# liquefier calcs
d_linde_dewar = d_from_p_sl(p_linde_dewar) # density of liquid in the dewar [kg/m^3]
m_linde_dewar = v_linde_dewar_L_hr * 1e-3 / 3600 * d_linde_dewar # production rate [kg/s]
M_linde_dewar_min_safe = V_linde_dewar_min_safe_L * 1e-3 * d_linde_dewar # min amount in the dewar [kg]
M_linde_dewar_min_okay = V_linde_dewar_min_okay_L * 1e-3 * d_linde_dewar # min amount in the dewar [kg]
M_linde_dewar_max = V_linde_dewar_max_L * 1e-3 * d_linde_dewar # max amount in the dewar [kg]
# M_linde_dewar_start: threshold of dewar level for NOT starting linde if it's not running [kg]
M_linde_dewar_start = V_linde_dewar_start_L * 1e-3 * d_linde_dewar
m_linde_dewar_loss = M_linde_dewar_max * x_linde_dewar_loss_day / 24 / 3600 # dewar evap rate [kg/s]
m_dewar_pull_run = v_dewar_pull_run_L_hr * 1e-3 * d_linde_dewar / 3600 # max dewar fill flow rate [L/hr]
m_dewar_pull_off = v_dewar_pull_off_L_hr * 1e-3 * d_linde_dewar / 3600 # dewar fill flow rate [L/hr]
m_dewar_pull_loss_run = x_linde_dewar_fill_loss * m_dewar_pull_run # dewar fill losses while linde is running [kg/s]
m_dewar_fill_loss_off = x_linde_dewar_fill_loss * m_dewar_pull_off # dewar fill losses while linde is off [kg/s]
m_linde_loss = m_linde_loss_g_s * 1e-3 # rate of helium loss when liquefier is running [kg/s]
# hp & lp storage data
p_hp_storage_max_psi = 3600 # max allowed pressure in hp storage [psi]
p_hp_storage_min_psi = 500 # min allowed pressure in hp storage [psi]
m_hp_compressor = 4.08e-3 / 3 # total flow of recovery compressors per each compressor [kg/s]
V_hp_storage_cu_ft = 45.9 * 9 # total volume of impure hp storage [ft^3]
V_bag_max_cu_ft = 1500 # max volume of the bag [ft^3]
x_bag_setpoint_high_1 = 0.70 # high setpoint of bag volume for hp compressor 1
x_bag_setpoint_high_2 = 0.75 # high setpoint of bag volume for hp compressor 2
x_bag_setpoint_high_3 = 0.80 # high setpoint of bag volume for hp compressor 3
x_bag_setpoint_low_1 = 0.25 # low setpoint of bag volume for hp compressor 1
x_bag_setpoint_low_2 = 0.30 # low setpoint of bag volume for hp compressor 2
x_bag_setpoint_low_3 = 0.35 # low setpoint of bag volume for hp compressor 3
# hp & lp storage calcs
p_hp_storage_max = p_hp_storage_max_psi * 6894.76 # max allowed pressure in hp storage [Pa]
p_hp_storage_min = p_hp_storage_min_psi * 6894.76 # min allowed pressure in hp storage [Pa]
V_hp_storage = V_hp_storage_cu_ft * 0.0283168 # total volume of impure hp storage [m^3]
V_bag_max = V_bag_max_cu_ft * 0.0283168 # max volume of the bag [m^3]
M_hp_storage_max = V_hp_storage * d_from_p_t(p_hp_storage_max, T_env) # max amount of gas in hp storage [kg]
M_hp_storage_min = V_hp_storage * d_from_p_t(p_hp_storage_min, T_env) # min amount of gas in hp storage [kg]
d_bag = d_from_p_t(p_atm, T_env) # density of helium in helium bag [kg/m^3]
M_bag_max = V_bag_max * d_bag # max amount of gas in the bag [kg]
# portable dewars data
N_dewars_purchased_max = 100 # max number of dewars to be purchased
N_dewars = 9 # total number of dewars
p_portable_dewar = 101325 # pressure in portable dewars [Pa]
x_portable_dewar_loss_day = 1.0 / 100 # liquid helium loss per day [-]
V_portable_dewar_full_L = 330 # max level [L]
V_portable_dewar_min_L = 20 # min level [L]
V_portable_dewar_topup_L = 150 # threshold for top up [L]
V_portable_dewar_cooldown_L = 100 # amount of helium required to cool down dewar from warm state [L]
# portable dewars calcs
d_portable_dewar = d_from_p_sl(p_portable_dewar) # density of liquid in portable dewars [kg/m^3]
M_portable_dewar_full = V_portable_dewar_full_L * 1e-3 * d_portable_dewar # full amount in portable dewars [kg]
M_portable_dewar_min = V_portable_dewar_min_L * 1e-3 * d_portable_dewar # full amount in portable dewars [kg]
M_portable_dewar_topup = V_portable_dewar_topup_L * 1e-3 * d_portable_dewar # threshold for top up [kg]
# m_portable_dewar_loss: loss rate from portable dewars [kg/s]
m_portable_dewar_loss = x_portable_dewar_loss_day * M_portable_dewar_full / 24 / 3600
# M_portable_dewar_cooldown: amount of helium required to cool down dewar from warm state [kg]
M_portable_dewar_cooldown = V_portable_dewar_cooldown_L * 1e-3 * d_portable_dewar # cool down amount [kg]
# M_linde_dewar_fill_ok: minimal required level in main dewar to start a fill
M_linde_dewar_fill_ok = M_linde_dewar_min_safe + M_portable_dewar_topup
# ucn source cryostat data
v_ucn_static_L_hr = 12 # flow rate from only static heat load (4K + 1K pots) [L/hr]
v_ucn_beam_L_hr = 19 # flow rate from only beam loading (4K + 1K pots), adjusted to make the run possible [L/hr]
v_transfer_line_L_hr = 79.2 # flow rate through the transfer line as seen from linde dewar [L/hr]
P_transfer_line = 0.06 * 40 # heat load to the transfer line [W/m * m = W]
P_transfer_misc = 0.5 + 1 # heat load from valves, field joints, etc. [W]
p_ucn_4K = 101325 # pressure in the 4K pot of ucn source
V_ucn_4K_min_L = 100 # min level in UCN cryostat [L]
V_ucn_4K_max_L = 180 # max level in UCN cryostat [L]
v_ucn_cooldown_L_hr = 19.0 # flow rate during cooldown loads [L/hr]
t_ucn_cooldown_hrs = 72 # ucn cryostat cooldown period [hr]
# ucn source cryostat calcs
d_ucn_4K = d_from_p_sl(p_ucn_4K) # density of liquid in ucn 4K pot [kg/m^3]
M_ucn_4K_min = V_ucn_4K_min_L * 1e-3 * d_ucn_4K # min level in UCN cryostat [kg]
M_ucn_4K_max = V_ucn_4K_max_L * 1e-3 * d_ucn_4K # max level in UCN cryostat [kg]
m_ucn_static = v_ucn_static_L_hr * 1e-3 * d_ucn_4K / 3600 # flow rate at static heat load (4K + 1K pots) [kg/s]
m_ucn_beam = v_ucn_beam_L_hr * 1e-3 * d_ucn_4K / 3600 # flow rate at heat load with beam (4K + 1K pots) [kg/s]
m_ucn_cooldown = v_ucn_cooldown_L_hr * 1e-3 * d_ucn_4K / 3600 # flow rate at cooldown loads [kg/s]
# m_transfer_line: flow rate through the transfer line as seen from linde dewar [kg/s]
m_transfer_line = v_transfer_line_L_hr * 1e-3 * d_linde_dewar / 3600
P_transfer_total = P_transfer_line + P_transfer_misc # total heat load to transfer line [W]
# enthalpy balance: h(p_linde_dewar, SL) = h(p_ucn_4K, SV) * x_vapor_ucn_4K + h(p_ucn_4K, SL) * (1 - x_vapor_ucn_4K)
# x_vapor_ucn_4K_JT: fraction of helium vapor generated from JT process in transfer line
x_vapor_ucn_4K_JT = (h_from_p_sl(p_linde_dewar) - h_from_p_sl(p_ucn_4K))/(h_from_p_sv(p_ucn_4K) - h_from_p_sl(p_ucn_4K))
# q_latent_transfer_line: latent heat of liquid helium in transfer line [J/kg]
q_latent_transfer_line = 0.5 * (r_from_p_sl(p_linde_dewar) + r_from_p_sl(p_ucn_4K)) # when in doubt, use average
m_vapor_ucn_4K_Q = P_transfer_total / q_latent_transfer_line # helium vapor generated by heat load [kg/s]
m_transfer_line_trickle = m_vapor_ucn_4K_Q + x_vapor_ucn_4K_JT * m_transfer_line # minimal flow to keep transfer line cold [kg/s]
t_ucn_cooldown = t_ucn_cooldown_hrs * 3600 # ucn cryostat cooldown period [s]
# timezone and conversions
triumf_tz = timezone('America/Vancouver')
parse_time = lambda x: triumf_tz.localize(datetime.strptime(x, '%Y-%m-%d %H:%M:%S')).timestamp()
# iteration data
timestep = 60 # timestep for iteration [s]
start_time = parse_time('2027-04-01 00:00:00') # starting time in YYYY-MM-DD HH:MM:SS format
end_time = parse_time('2027-12-31 23:59:59') # end time in YYYY-MM-DD HH:MM:SS format
prediction_window = 5 * 24 * 3600 # period for predicting future use and making operational decisions [s]
# schedule tuples: [(start, stop), (start, stop), ... ]
# dewar 1 is 0, dewar 2 is 1, etc.
schedule = {}
# make sure beam doesn't start for at least t_ucn_cooldown period after ucn starts running
schedule['ucn_source'] = [(parse_time('2027-04-01 08:00:00'), parse_time('2027-05-01 20:00:00')),
(parse_time('2027-06-01 08:00:00'), parse_time('2027-07-01 20:00:00')),
(parse_time('2027-08-01 08:00:00'), parse_time('2027-09-01 20:00:00')),]
schedule['ucn_beam'] = [(parse_time('2027-04-14 08:00:00'), parse_time('2027-04-30 20:00:00')),
(parse_time('2027-06-14 08:00:00'), parse_time('2027-06-30 20:00:00')),
(parse_time('2027-08-14 08:00:00'), parse_time('2027-08-30 20:00:00')),]
schedule[0] = [(parse_time('2027-04-08 08:00:00'), parse_time('2027-08-22 20:00:00'))]
schedule[1] = [(parse_time('2027-08-26 08:00:00'), parse_time('2027-09-27 20:00:00'))]
schedule[2] = schedule[1] # 2 and 3 operate together
schedule[3] = [(parse_time('2027-10-14 08:00:00'), parse_time('2027-11-22 20:00:00'))]
schedule[4] = schedule[3] # 4 and 5 operate together
schedule[5] = [(parse_time('2027-04-09 08:00:00'), parse_time('2027-09-17 20:00:00'))]
schedule[6] = [(parse_time('2027-06-25 08:00:00'), parse_time('2027-12-13 20:00:00'))]
schedule[7] = [(parse_time('2027-04-13 08:00:00'), parse_time('2027-12-01 20:00:00'))]
schedule[8] = [(parse_time('2027-04-13 08:00:00'), parse_time('2027-12-15 20:00:00'))]
schedule[9] = [(parse_time('2027-04-18 08:00:00'), parse_time('2027-05-10 20:00:00'))]
schedule[10] = [(parse_time('2027-04-19 08:00:00'), parse_time('2027-07-26 20:00:00'))]
schedule[11] = [(parse_time('2027-04-19 08:00:00'), parse_time('2027-07-19 20:00:00'))]
# consumption data: L/wk to kg/s
cmms_consumption = np.zeros(12, dtype=float)
cmms_consumption[0] = 420 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[1] = 154 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[2] = 330 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[3] = 140 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[4] = 495 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[5] = 330 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[6] = 330 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[7] = 140 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[8] = 330 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[9] = 33.3 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[10] = 330 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
cmms_consumption[11] = 165 * 1e-3 * d_portable_dewar / 7 / 24 / 3600
| StarcoderdataPython |
347127 | #!/usr/bin/env python
# coding:utf-8
#code by:YasserBDJ96
#email:<EMAIL>
#START{
from setuptools import setup,find_packages
setup(
name="timeloading",
version="0.0.1",
author="YasserBDJ96",
author_email="<EMAIL>",
description='''Animated loading bar. This package is a loading bar that appears when a specific function is run an animation and text. This bar is stoped to run after the function has finished working. You can control the shape and the waiting message, even the animation and its colors.''',
long_description_content_type="text/markdown",
long_description=open('README.md','r').read(),
license='''MIT License''',
packages=find_packages(),
url="https://yasserbdj96.github.io/",
project_urls={
'Source Code': "https://github.com/yasserbdj96/timeloading",
'Instagram': "https://www.instagram.com/yasserbdj96/",
},
install_requires=[],
keywords=['yasserbdj96','python','loading','bar'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
'Topic :: Communications :: Email'
],
python_requires=">=3.x.x"
)
#}END. | StarcoderdataPython |
9616229 | <reponame>plegulluche/OPC-p7
import json
from grandpy.apigoogle import Apigoogle
def mock_requestget(*args, **kwarg):
class mock_response:
def __init__(self):
datastructure = {
"results": [
{
"adress_components": ["some irrelevants components"],
"formatted_address": "Champ de Mars, 5 Av. Anatole France, 75007 Paris, France",
"geometry": {
"location": {"lat": 48.85837009999999, "lng": 2.2944813}
},
}
]
}
self.data = json.dumps(datastructure)
self.status_code = self.status()
def status(self):
return 200
def json(self):
return json.loads(self.data)
response = mock_response()
return response
def mock_parser(*arg, **kwargs):
return "tour eiffel paris"
def test_response_status_is_200(mocker):
mocker.patch("requests.get", mock_requestget)
mocker.patch("grandpy.customparse.Customparser.get_loc_as_string", mock_parser)
api = Apigoogle("astringtest")
result = api.extract_google_data_from_response()
assert result != "request failed"
def test_request_google_gets_coordinates_and_adress(mocker):
mocker.patch("requests.get", mock_requestget)
mocker.patch("grandpy.customparse.Customparser.get_loc_as_string", mock_parser)
api = Apigoogle("astringtest")
result = api.extract_google_data_from_response()
assert result == {
"lat": 48.85837009999999,
"lng": 2.2944813,
"status_google": 200,
"address": "Champ de Mars, 5 Av. Anatole France, 75007 Paris, France",
}
| StarcoderdataPython |
9676978 | <reponame>michaelwang123/PaddleRec
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
from cluster import Cluster
import time
import argparse
from tree_search_util import tree_search_main
parser = argparse.ArgumentParser()
parser.add_argument("--emd_path", default='', type=str, help=".")
parser.add_argument("--emb_size", default=64, type=int, help=".")
parser.add_argument("--threads", default=1, type=int, help=".")
parser.add_argument("--n_clusters", default=3, type=int, help=".")
parser.add_argument("--output_dir", default='', type=str, help='.')
args = parser.parse_args()
def main():
cur_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if not os.path.exists(args.output_dir):
os.system("mkdir -p " + args.output_dir)
print('%s start build tree' % cur_time)
# 1. Tree clustering, generating two files in current directory, tree.pkl, id2item.json
cluster = Cluster(
args.emd_path,
args.emb_size,
parall=args.threads,
output_dir=args.output_dir,
_n_clusters=args.n_clusters)
cluster.train()
# 2. Tree searching, generating tree_info, travel_list, layer_list for train process.
tree_search_main(
os.path.join(args.output_dir, "tree.pkl"),
os.path.join(args.output_dir, "id2item.json"), args.output_dir,
args.n_clusters)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4898497 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-08-12 05:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tickets', '0008_ticket_created_tickets'),
]
operations = [
migrations.AddField(
model_name='coupon',
name='number_of_usage',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='ticket',
name='coupon_usage',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='usages', to='tickets.Coupon'),
),
]
| StarcoderdataPython |
12811756 | <gh_stars>0
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
from move_robot import MoveKobuki
class LineFollower(object):
def __init__(self):
self.bridge_object = CvBridge()
self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.camera_callback)
self.movekobuki_object = MoveKobuki()
def camera_callback(self,data):
try:
# We select bgr8 because its the OpneCV encoding by default
cv_image = self.bridge_object.imgmsg_to_cv2(data, desired_encoding="bgr8")
except CvBridgeError as e:
print(e)
# We get image dimensions and crop the parts of the image we dont need
# Bare in mind that because its image matrix first value is start and second value is down limit.
# Select the limits so that it gets the line not too close, not too far and the minimum portion possible
# To make process faster.
height, width, channels = cv_image.shape
descentre = 160
rows_to_watch = 100
crop_img = cv_image[(height)/2+descentre:(height)/2+(descentre+rows_to_watch)][1:width]
# Convert from RGB to HSV
hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20,100,100])
upper_yellow = np.array([50,255,255])
# Threshold the HSV image to get only yellow colors
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(crop_img,crop_img, mask= mask)
contours, _ = cv2.findContours(mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_L1)
rospy.loginfo("Number of centroids==>"+str(len(contours)))
centres = []
for i in range(len(contours)):
moments = cv2.moments(contours[i])
try:
centres.append((int(moments['m10']/moments['m00']), int(moments['m01']/moments['m00'])))
cv2.circle(res, centres[-1], 10, (0, 255, 0), -1)
except ZeroDivisionError:
pass
rospy.loginfo(str(centres))
#Select the right centroid
# [(542, 39), (136, 46)], (x, y)
most_right_centroid_index = 0
index = 0
max_x_value = 0
for candidate in centres:
# Retrieve the cx value
cx = candidate[0]
# Get the Cx more to the right
if cx >= max_x_value:
max_x_value = cx
most_right_centroid_index = index
index += 1
cx = centres[most_right_centroid_index][0]
cy = centres[most_right_centroid_index][1]
rospy.logwarn("Winner =="+str(cx)+","+str(cy)+"")
# Draw the centroid in the resultut image
# cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]])
cv2.circle(res,(int(cx), int(cy)), 5,(0,0,255),-1)
cv2.imshow("Original", cv_image)
#cv2.imshow("HSV", hsv)
#cv2.imshow("MASK", mask)
cv2.imshow("RES", res)
cv2.waitKey(1)
error_x = cx - width / 2;
twist_object = Twist();
twist_object.linear.x = 0.2;
twist_object.angular.z = -error_x / 100;
rospy.loginfo("ANGULAR VALUE SENT===>"+str(twist_object.angular.z))
# Make it start turning
self.movekobuki_object.move_robot(twist_object)
def clean_up(self):
self.movekobuki_object.clean_class()
cv2.destroyAllWindows()
def main():
rospy.init_node('line_following_node', anonymous=True)
line_follower_object = LineFollower()
rate = rospy.Rate(5)
ctrl_c = False
def shutdownhook():
# works better than the rospy.is_shut_down()
line_follower_object.clean_up()
rospy.loginfo("shutdown time!")
ctrl_c = True
rospy.on_shutdown(shutdownhook)
while not ctrl_c:
rate.sleep()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8109281 | import asyncio
import datetime
import freezegun
import itertools
import operator
import pytest
import pytest_mock
import pydantic
import re
import servo
import servo.pubsub
import servo.utilities.pydantic
import weakref
from typing import Callable, List, Optional
class TestMessage:
def test_text_message(self) -> None:
message = servo.pubsub.Message(text='A great and insightful message.')
assert message.text == 'A great and insightful message.'
assert message.content_type == 'text/plain'
assert message.content == b'A great and insightful message.'
def test_text_message_override_content_type(self) -> None:
message = servo.pubsub.Message(text='A great and insightful message.', content_type="funky/text")
assert message.text == 'A great and insightful message.'
assert message.content_type == 'funky/text'
assert message.content == b'A great and insightful message.'
def test_text_message_raises_if_not_string(self) -> None:
with pytest.raises(ValueError, match="Text Messages can only be created with `str` content: got 'int'"):
servo.pubsub.Message(text=1234)
def test_json_message(self) -> None:
message = servo.pubsub.Message(json={"key": "value"})
assert message.text == '{"key": "value"}'
assert message.content_type == 'application/json'
assert message.content == b'{"key": "value"}'
@freezegun.freeze_time("2021-01-01 12:00:01")
def test_json_message_via_protocol(self) -> None:
# NOTE: Use Pydantic's json() method support
channel = servo.pubsub.Channel.construct(name="whatever", created_at=datetime.datetime.now())
message = servo.pubsub.Message(json=channel)
assert message.text == '{"name": "whatever", "description": null, "created_at": "2021-01-01T12:00:01"}'
assert message.content_type == 'application/json'
assert message.content == b'{"name": "whatever", "description": null, "created_at": "2021-01-01T12:00:01"}'
def test_yaml_message(self) -> None:
message = servo.pubsub.Message(yaml={"key": "value"})
assert message.text == 'key: value\n'
assert message.content_type == 'application/x-yaml'
assert message.content == b'key: value\n'
def test_content_message(self) -> None:
message = servo.pubsub.Message(content=b"This is the message", content_type="foo/bar")
assert message.text == 'This is the message'
assert message.content_type == 'foo/bar'
assert message.content == b'This is the message'
def test_created_at(self) -> None:
message = servo.pubsub.Message(content=b"This is the message", content_type="foo/bar")
assert message.created_at is not None
class TestValidations:
def test_content_required(self) -> None:
with pytest.raises(pydantic.ValidationError) as excinfo:
servo.pubsub.Message()
assert {
"loc": ("content",),
"msg": "none is not an allowed value",
"type": "type_error.none.not_allowed",
} in excinfo.value.errors()
def test_content_type_required(self) -> None:
with pytest.raises(pydantic.ValidationError) as excinfo:
servo.pubsub.Message(content=b'foo')
assert {
"loc": ("content_type",),
"msg": "none is not an allowed value",
"type": "type_error.none.not_allowed",
} in excinfo.value.errors()
@pytest.fixture
async def exchange() -> servo.pubsub.Exchange:
exchange = servo.pubsub.Exchange()
yield exchange
# Shutdown the exchange it is left running
if exchange.running:
await exchange.shutdown()
else:
exchange.clear()
@pytest.fixture
def channel(exchange: servo.pubsub.Exchange) -> servo.pubsub.Channel:
return servo.pubsub.Channel(name="metrics", exchange=exchange)
class TestChannel:
class TestValidations:
def test_name_required(self, exchange: servo.pubsub.Exchange) -> None:
with pytest.raises(pydantic.ValidationError) as excinfo:
servo.pubsub.Channel(exchange=exchange)
assert {
"loc": ("name",),
"msg": "field required",
"type": "value_error.missing",
} in excinfo.value.errors()
def test_name_constraints(self, exchange: servo.pubsub.Exchange) -> None:
with pytest.raises(pydantic.ValidationError) as excinfo:
servo.pubsub.Channel(exchange=exchange, name="THIS_IS_INVALID***")
assert {
"loc": ("name",),
"msg": 'string does not match regex "^[0-9a-zA-Z]([0-9a-zA-Z\\.\\-_])*[0-9A-Za-z]$"',
"type": "value_error.str.regex",
'ctx': {
'pattern': '^[0-9a-zA-Z]([0-9a-zA-Z\\.\\-_])*[0-9A-Za-z]$',
}
} in excinfo.value.errors()
def test_exchange_required(self) -> None:
with pytest.raises(TypeError, match=re.escape("__init__() missing 1 required keyword-only argument: 'exchange'")):
servo.pubsub.Channel()
def test_hashing(self, channel: servo.pubsub.Channel) -> None:
channels = {channel,}
copy_of_channel = channel.copy()
assert copy_of_channel in channels
copy_of_channel.name = "another_name"
assert copy_of_channel not in channels
channels.add(copy_of_channel)
assert copy_of_channel in channels
assert channel in channels
def test_comparable_to_str(self, channel: servo.pubsub.Channel) -> None:
assert channel != 'foo'
assert channel == 'metrics'
async def test_publish(self, channel: servo.pubsub.Channel, mocker: pytest_mock.MockFixture) -> None:
message = servo.pubsub.Message(text="foo")
with servo.utilities.pydantic.extra(channel.exchange):
spy = mocker.spy(channel.exchange, "publish")
await channel.publish(message)
spy.assert_called_once_with(message, channel)
async def test_publish_fails_if_channel_is_closed(self, channel: servo.pubsub.Channel, mocker: pytest_mock.MockFixture) -> None:
await channel.close()
assert channel.closed
with pytest.raises(RuntimeError, match='Cannot publish messages to a closed Channel'):
await channel.publish(servo.pubsub.Message(text="foo"))
async def test_closing_channel_cancels_exclusive_subscribers(self, exchange: servo.pubsub.Exchange, channel: servo.pubsub.Channel, mocker: pytest_mock.MockFixture) -> None:
exclusive_subscriber = exchange.create_subscriber(channel.name)
glob_subscriber = exchange.create_subscriber(channel.name + '*')
regex_subscriber = exchange.create_subscriber(f'/{channel.name}.*/')
await channel.close()
assert channel.closed
assert exclusive_subscriber.cancelled
assert not glob_subscriber.cancelled
assert not regex_subscriber.cancelled
async def test_iteration(self, channel: servo.pubsub.Channel, mocker: pytest_mock.MockFixture) -> None:
messages = []
async def _subscriber() -> None:
async for message in channel:
messages.append(message)
if len(messages) == 3:
channel.stop()
async def _publisher() -> None:
for i in range(3):
await channel.publish(servo.pubsub.Message(text=f"Message: {i}"))
channel.exchange.start()
await task_graph(
_publisher(),
_subscriber(),
timeout=5.0
)
assert messages
class TestSubscription:
def test_string_selector(self) -> None:
subscription = servo.pubsub.Subscription(selector="metrics")
assert subscription.selector == "metrics"
assert isinstance(subscription.selector, str)
def test_regex_selector(self) -> None:
subscription = servo.pubsub.Subscription(selector=re.compile("metrics"))
assert subscription.selector == re.compile("metrics")
assert isinstance(subscription.selector, re.Pattern)
def test_regex_selector_expansion(self) -> None:
subscription = servo.pubsub.Subscription(selector="/metrics/")
assert subscription.selector == re.compile("metrics")
assert isinstance(subscription.selector, re.Pattern)
def test_match_by_string(self, exchange: servo.pubsub.Exchange) -> None:
metrics_channel = servo.pubsub.Channel(name="metrics", exchange=exchange)
other_channel = servo.pubsub.Channel(name="other", exchange=exchange)
message = servo.pubsub.Message(text="foo")
subscription = servo.pubsub.Subscription(selector="metrics")
assert subscription.matches(metrics_channel, message)
assert not subscription.matches(other_channel, message)
def test_match_no_message(self, exchange: servo.pubsub.Exchange) -> None:
metrics_channel = servo.pubsub.Channel(name="metrics", exchange=exchange)
other_channel = servo.pubsub.Channel(name="other", exchange=exchange)
subscription = servo.pubsub.Subscription(selector="metrics")
assert subscription.matches(metrics_channel)
assert not subscription.matches(other_channel)
@pytest.mark.parametrize(
("selector", "matches"),
[
("metrics", False),
("metrics.prometheus.http", True),
("metrics.*", True),
("metrics.*.http", True),
("metrics.*.https", False),
("metrics.*.[abc]ttp", False),
("metrics.*.[hef]ttp", True),
]
)
def test_match_by_glob(self, exchange: servo.pubsub.Exchange, selector: str, matches: bool) -> None:
metrics_channel = servo.pubsub.Channel(name="metrics.prometheus.http", exchange=exchange)
message = servo.pubsub.Message(text="foo")
subscription = servo.pubsub.Subscription(selector=selector)
assert subscription.matches(metrics_channel, message) == matches
@pytest.mark.parametrize(
("selector", "matches"),
[
("/metrics/", False),
("/metrics.prometheus.http/", True),
("/metrics.*/", True),
("/metrics.*.http/", True),
("/metrics.*.https/", False),
("/metrics.*.[abc]ttp/", False),
("/metrics.*.[hef]ttp/", True),
("/metrics.(prometheus|datadog|newrelic).https?/", True),
]
)
def test_match_by_regex(self, exchange: servo.pubsub.Exchange, selector: str, matches: bool) -> None:
channel = servo.pubsub.Channel(name="metrics.prometheus.http", exchange=exchange)
message = servo.pubsub.Message(text="foo")
subscription = servo.pubsub.Subscription(selector=selector)
assert subscription.matches(channel, message) == matches, f"expected regex pattern '{selector}' match of '{channel.name}' to == {matches}"
@pytest.fixture
def subscriber(exchange: servo.pubsub.Exchange, subscription: servo.pubsub.Subscription) -> servo.pubsub.Subscriber:
return servo.pubsub.Subscriber(exchange=exchange, subscription=subscription)
@pytest.fixture
def subscription(exchange: servo.pubsub.Exchange) -> servo.pubsub.Subscription:
return servo.pubsub.Subscription(selector="metrics*")
class TestSubscriber:
def test_not_cancelled_on_create(self, subscriber: servo.pubsub.Subscriber) -> None:
assert not subscriber.cancelled
async def test_sync_callback_is_invoked(self, subscriber: servo.pubsub.Subscriber, mocker: pytest_mock.MockerFixture) -> None:
callback = mocker.Mock()
subscriber.callback = callback
message = servo.pubsub.Message(text="foo")
channel = servo.pubsub.Channel(name="metrics", exchange=subscriber.exchange)
await subscriber(message, channel)
callback.assert_called_once_with(message, channel)
async def test_async_callback_is_invoked(self, subscriber: servo.pubsub.Subscriber, mocker: pytest_mock.MockerFixture) -> None:
callback = mocker.AsyncMock()
subscriber.callback = callback
message = servo.pubsub.Message(text="foo")
channel = servo.pubsub.Channel(name="metrics", exchange=subscriber.exchange)
await subscriber(message, channel)
callback.assert_called_once_with(message, channel)
async def test_async_iteration(self, subscriber: servo.pubsub.Subscriber) -> None:
message = servo.pubsub.Message(text="foo")
channel = servo.pubsub.Channel(name="metrics", exchange=subscriber.exchange)
messages = []
event = asyncio.Event()
async def _processor() -> None:
event.set()
async for message_, channel_ in subscriber:
assert message_ == message
assert channel_ == channel
messages.append(message_)
if len(messages) == 3:
subscriber.cancel()
task = asyncio.create_task(_processor())
await event.wait()
for _ in range(3):
await subscriber(message, channel)
await task
assert len(messages) == 3
@pytest.fixture
async def iterator_task_factory(self) -> Callable[[], asyncio.Task]:
# TODO: This should accept a callback for customization
async def _iterator_task_factory(subscriber: servo.pubsub.Subscriber) -> asyncio.Task:
event = asyncio.Event()
async def _iterate() -> None:
messages = []
event.set()
async for message_, channel_ in subscriber:
messages.append(message_)
if len(messages) == 3:
subscriber.stop()
return messages
task = asyncio.create_task(_iterate())
await event.wait()
return task
return _iterator_task_factory
async def test_multiple_iterators(self, subscriber: servo.pubsub.Subscriber, iterator_task_factory: Callable[[], asyncio.Task]) -> None:
message = servo.pubsub.Message(text="foo")
channel = servo.pubsub.Channel(name="metrics", exchange=subscriber.exchange)
tasks = await asyncio.gather(
iterator_task_factory(subscriber),
iterator_task_factory(subscriber),
iterator_task_factory(subscriber)
)
for _ in range(3):
await subscriber(message, channel)
results = await asyncio.gather(*tasks)
messages = list(itertools.chain(*results))
assert len(messages) == 9
async def test_iterator_context(self, channel: servo.pubsub.Subscriber, subscriber: servo.pubsub.Subscriber) -> None:
other_subscriber = servo.pubsub.Subscriber(exchange=subscriber.exchange, subscription=subscriber.subscription)
async def _create_iterator(subscriber_, current):
assert servo.pubsub._current_iterator() == current
async for message_, channel_ in subscriber_:
iterator = servo.pubsub._current_iterator()
assert iterator
assert iterator is not None, "Iterator context should not be None"
assert iterator.subscriber == subscriber_
subscriber_.stop()
task = asyncio.gather(*[
_create_iterator(subscriber, None),
_create_iterator(other_subscriber, None),
])
await asyncio.sleep(0.1)
for subscriber in [subscriber, other_subscriber]:
await subscriber(servo.pubsub.Message(text="foo"), channel)
await task
async def test_waiting(self, channel: servo.pubsub.Subscriber, subscriber: servo.pubsub.Subscriber) -> None:
async def _iterator() -> None:
async for message, channel in subscriber:
subscriber.cancel()
await asyncio.wait_for(
task_graph(
subscriber(servo.pubsub.Message(text="foo"), channel),
_iterator(),
subscriber.wait()
),
timeout=1.0
)
assert subscriber.cancelled
async def test_cannot_stop_inactive_iterator(self, channel, subscriber: servo.pubsub.Subscriber) -> None:
other_subscriber = servo.pubsub.Subscriber(exchange=subscriber.exchange, subscription=subscriber.subscription)
event = asyncio.Event()
async def _test() -> self:
with pytest.raises(RuntimeError, match="Attempted to stop an inactive iterator"):
async for message_, channel_ in subscriber:
iterator = servo.pubsub._current_iterator()
assert iterator
assert iterator.subscriber == subscriber
other_subscriber.stop()
event.set()
await asyncio.gather(
_test(),
subscriber(servo.pubsub.Message(text="foo"), channel),
event.wait()
)
async def test_cannot_stop_without_an_iterator(self, subscriber: servo.pubsub.Subscriber) -> None:
with pytest.raises(RuntimeError, match="Attempted to stop outside of an iterator"):
subscriber.stop()
async def test_cancellation_stops_all_iterators(self, channel, subscriber: servo.pubsub.Subscriber) -> None:
async def _create_iterator():
# will block waiting for messages
async for message_, channel_ in subscriber:
...
async def _cancel():
subscriber.cancel()
await asyncio.wait_for(
task_graph(
_cancel(),
_create_iterator(), _create_iterator(), _create_iterator()
),
timeout=1.0
)
async def test_cannot_stop_nested_iterator(self, subscriber: servo.pubsub.Subscriber, iterator_task_factory: Callable[[], asyncio.Task]) -> None:
channel = servo.pubsub.Channel(name="metrics", exchange=subscriber.exchange)
assert channel
other_subscriber = subscriber.exchange.create_subscriber('metrics')
assert other_subscriber
async def _boom() -> None:
async for message_, channel_ in subscriber:
async for message_, channel_ in other_subscriber:
# Try to cancel the outer subscriber to trigger the exception
subscriber.stop()
async def _emit_messages(*subscribers, channel) -> None:
while True:
for subscriber in subscribers:
await subscriber(servo.pubsub.Message(text="foo"), channel)
await asyncio.sleep(0.0001)
with pytest.raises(RuntimeError, match='Attempted to stop an inactive iterator'):
await asyncio.wait_for(
asyncio.gather(
_emit_messages(subscriber, other_subscriber, channel=channel),
_boom()
),
timeout=1.0
)
@pytest.fixture
def publisher(exchange: servo.pubsub.Exchange, channel: servo.pubsub.Channel) -> servo.pubsub.Publisher:
return servo.pubsub.Publisher(exchange=exchange, channels=[channel])
@pytest.fixture
def multipublisher(exchange: servo.pubsub.Exchange) -> servo.pubsub.Publisher:
raw_metrics = servo.pubsub.Channel(name="metrics.raw", exchange=exchange)
normalized_metrics = servo.pubsub.Channel(name="metrics.normalized", exchange=exchange)
return servo.pubsub.Publisher(exchange=exchange, channels=[raw_metrics, normalized_metrics])
class TestPublisher:
async def test_calling_publishes_to_exchange(self, multipublisher: servo.pubsub.Publisher, mocker: pytest_mock.MockFixture) -> None:
message = servo.pubsub.Message(text="foo")
with servo.utilities.pydantic.extra(multipublisher.exchange):
spy = mocker.spy(multipublisher.exchange, "publish")
await multipublisher(message)
spy.assert_has_calls([
mocker.call(message, multipublisher.channels[0]),
mocker.call(message, multipublisher.channels[1])
])
assert spy.call_count == 2
async def test_calling_with_one_channel(self, multipublisher: servo.pubsub.Publisher, mocker: pytest_mock.MockFixture) -> None:
message = servo.pubsub.Message(text="foo")
with servo.utilities.pydantic.extra(multipublisher.exchange):
spy = mocker.spy(multipublisher.exchange, "publish")
await multipublisher(message, multipublisher.channels[0])
spy.assert_called_once_with(message, multipublisher.channels[0])
assert spy.call_count == 1
async def test_calling_with_two_channels(self, multipublisher: servo.pubsub.Publisher, mocker: pytest_mock.MockFixture) -> None:
message = servo.pubsub.Message(text="foo")
with servo.utilities.pydantic.extra(multipublisher.exchange):
spy = mocker.spy(multipublisher.exchange, "publish")
await multipublisher(message, *multipublisher.channels)
spy.assert_has_calls([
mocker.call(message, multipublisher.channels[0]),
mocker.call(message, multipublisher.channels[1])
])
assert spy.call_count == 2
async def test_calling_with_one_channel_name(self, multipublisher: servo.pubsub.Publisher, mocker: pytest_mock.MockFixture) -> None:
message = servo.pubsub.Message(text="foo")
with servo.utilities.pydantic.extra(multipublisher.exchange):
spy = mocker.spy(multipublisher.exchange, "publish")
await multipublisher(message, 'metrics.raw')
spy.assert_called_once_with(message, multipublisher.channels[0])
assert spy.call_count == 1
async def test_calling_with_invalid_channel(self, multipublisher: servo.pubsub.Publisher, mocker: pytest_mock.MockFixture) -> None:
message = servo.pubsub.Message(text="foo")
with servo.utilities.pydantic.extra(multipublisher.exchange):
spy = mocker.spy(multipublisher.exchange, "publish")
with pytest.raises(ValueError, match="Publisher is not bound to Channel: 'invalid'"):
await multipublisher(message, 'invalid')
async def test_switching_message_by_channel(self, multipublisher: servo.pubsub.Publisher, mocker: pytest_mock.MockFixture) -> None:
foo = servo.pubsub.Message(text="foo")
bar = servo.pubsub.Message(text="bar")
with servo.utilities.pydantic.extra(multipublisher.exchange):
spy = mocker.spy(multipublisher.exchange, "publish")
for channel in multipublisher.channels:
if channel == 'metrics.raw':
await multipublisher(foo, channel)
elif channel == 'metrics.normalized':
await multipublisher(bar, channel)
spy.assert_has_calls([
mocker.call(foo, multipublisher.channels[0]),
mocker.call(bar, multipublisher.channels[1])
])
assert spy.call_count == 2
class TestExchange:
def test_starts_not_running(self, exchange: servo.pubsub.Exchange) -> None:
assert not exchange.running
async def test_start(self, exchange: servo.pubsub.Exchange) -> None:
assert not exchange.running
exchange.start()
assert exchange.running
await exchange.shutdown()
def test_clear(self, exchange: servo.pubsub.Exchange) -> None:
for i in range(3):
name = f"channel-{i}"
exchange.create_channel(name)
exchange.create_publisher(name)
exchange.create_subscriber(name)
assert len(exchange.channels) == 3
assert len(exchange._publishers) == 3
assert len(exchange._subscribers) == 3
exchange.clear()
assert len(exchange.channels) == 0
assert len(exchange._publishers) == 0
assert len(exchange._subscribers) == 0
async def test_shutdown(self, exchange: servo.pubsub.Exchange) -> None:
for i in range(3):
name = f"channel-{i}"
exchange.create_channel(name)
exchange.create_publisher(name)
exchange.create_subscriber(name)
exchange.start()
assert exchange.running
assert len(exchange.channels) == 3
assert len(exchange._publishers) == 3
assert len(exchange._subscribers) == 3
await exchange.shutdown()
assert not exchange.running
assert len(exchange.channels) == 0
assert len(exchange._publishers) == 0
assert len(exchange._subscribers) == 0
async def test_get_channel(self, exchange: servo.pubsub.Exchange) -> None:
assert exchange.get_channel('whatever') is None
channel = exchange.create_channel("whatever")
assert channel is not None
assert exchange.get_channel('whatever') == channel
async def test_create_channel(self, exchange: servo.pubsub.Exchange) -> None:
channel = exchange.create_channel("whatever")
assert channel is not None
assert channel.name == 'whatever'
assert channel.exchange == exchange
assert len(exchange.channels) == 1
async def test_create_channel_names_must_be_unique(self, exchange: servo.pubsub.Exchange) -> None:
exchange.create_channel("whatever")
with pytest.raises(ValueError, match="A Channel named 'whatever' already exists"):
exchange.create_channel("whatever")
async def test_remove_publisher(self, exchange: servo.pubsub.Exchange) -> None:
channel = exchange.create_channel("whatever")
assert channel in exchange.channels
channel = exchange.remove_channel(channel)
assert channel not in exchange.channel
async def test_publish(self, exchange: servo.pubsub.Exchange, mocker: pytest_mock.MockerFixture) -> None:
exchange.start()
channel = exchange.create_channel("metrics")
message = servo.pubsub.Message(text='Testing')
event = asyncio.Event()
callback = mocker.AsyncMock(side_effect=lambda m, c: event.set())
subscriber = exchange.create_subscriber(channel.name)
subscriber.callback = callback
await exchange.publish(message, channel)
await event.wait()
callback.assert_awaited_once_with(message, channel)
async def test_publish_to_channel_by_name(self, exchange: servo.pubsub.Exchange, mocker: pytest_mock.MockerFixture) -> None:
exchange.start()
channel = exchange.create_channel("metrics")
message = servo.pubsub.Message(text='Testing')
event = asyncio.Event()
callback = mocker.AsyncMock(side_effect=lambda m, c: event.set())
subscriber = exchange.create_subscriber(channel.name)
subscriber.callback = callback
await exchange.publish(message, channel.name)
await event.wait()
callback.assert_awaited_once_with(message, channel)
async def test_publish_to_unknown_channel_fails(self, exchange: servo.pubsub.Exchange) -> None:
message = servo.pubsub.Message(text='Testing')
with pytest.raises(ValueError, match="no such Channel: invalid"):
await exchange.publish(message, "invalid")
async def test_publish_when_not_running_enqueues(self, exchange: servo.pubsub.Exchange) -> None:
channel = exchange.create_channel("metrics")
message = servo.pubsub.Message(text='Testing')
await exchange.publish(message, channel.name)
assert exchange._queue.qsize() == 1
async def test_create_publisher(self, exchange: servo.pubsub.Exchange) -> None:
channel = exchange.create_channel("metrics")
publisher = exchange.create_publisher(channel)
assert publisher
assert publisher.exchange == exchange
assert publisher in exchange._publishers
async def test_create_publisher_by_channel_name(self, exchange: servo.pubsub.Exchange) -> None:
channel = exchange.create_channel("metrics")
publisher = exchange.create_publisher(channel.name)
assert publisher
assert publisher in exchange._publishers
async def test_create_publisher_creates_channels(self, exchange: servo.pubsub.Exchange) -> None:
channel = exchange.get_channel("metrics")
assert channel is None
publisher = exchange.create_publisher('metrics')
assert publisher
assert publisher in exchange._publishers
assert exchange.get_channel("metrics") is not None
async def test_remove_publisher(self, exchange: servo.pubsub.Exchange) -> None:
publisher = exchange.create_publisher('whatever')
assert publisher
assert publisher in exchange._publishers
exchange.remove_publisher(publisher)
assert publisher not in exchange._publishers
async def test_create_subscriber(self, exchange: servo.pubsub.Exchange) -> None:
subscriber = exchange.create_subscriber('whatever')
assert subscriber
assert subscriber.exchange == exchange
assert subscriber in exchange._subscribers
async def test_create_subscriber_with_dependency(self, exchange: servo.pubsub.Exchange) -> None:
async def _dependency() -> None:
...
exchange.start()
subscriber = exchange.create_subscriber('whatever', until_done=_dependency())
async for event in subscriber:
# block forever unless the dependency intervenes
...
async def test_create_subscriber_with_timeout(self, exchange: servo.pubsub.Exchange) -> None:
exchange.start()
subscriber = exchange.create_subscriber('whatever', timeout=0.01)
async for event in subscriber:
# block forever unless the timeout intervenes
...
async def test_remove_subscriber(self, exchange: servo.pubsub.Exchange) -> None:
subscriber = exchange.create_subscriber('whatever')
assert subscriber
assert subscriber in exchange._subscribers
exchange.remove_subscriber(subscriber)
assert subscriber not in exchange._subscribers
async def test_publisher_to_subscriber(self, exchange: servo.pubsub.Exchange, mocker: pytest_mock.MockerFixture) -> None:
exchange.start()
message = servo.pubsub.Message(text='Testing')
event = asyncio.Event()
callback = mocker.AsyncMock(side_effect=lambda m, c: event.set())
subscriber = exchange.create_subscriber('metrics*')
subscriber.callback = callback
publisher = exchange.create_publisher("metrics.http.production")
await publisher(message)
await event.wait()
callback.assert_awaited_once_with(message, publisher.channels[0])
async def test_repr(self, exchange: servo.pubsub.Exchange) -> None:
exchange.create_publisher('whatever')
exchange.create_subscriber('whatever')
assert repr(exchange) == "Exchange(running=False, channel_names=['whatever'], publisher_count=1, subscriber_count=1, queue_size=0)"
async def test_subscribe_context_manager(self, exchange: servo.pubsub.Exchange, mocker: pytest_mock.MockerFixture) -> None:
# This is a little bit tricky. To ensure that the Subscriber is attached before the Publisher begins firing Messages
# we use an Event to synchronize them and then gather them and compare the return values
exchange.start()
publisher = exchange.create_publisher("metrics.http.production")
event = asyncio.Event()
async def _publisher_func() -> List[servo.pubsub.Message]:
# Wait for subscriber registration
await event.wait()
messages = []
for i in range(10):
message = servo.pubsub.Message(text=f'Test Message #{i}')
await publisher(message)
messages.append(message)
return messages
async def _subscriber_func() -> List[servo.pubsub.Message]:
messages = []
async with exchange.subscribe('metrics*') as subscription:
# Trigger the Publisher to begin sending messages
event.set()
async for message, channel in subscription:
messages.append(message)
if len(messages) == 10:
subscription.cancel()
return messages
results = await asyncio.wait_for(
asyncio.gather(_publisher_func(), _subscriber_func()),
timeout=3.0
)
assert len(results) == 2
assert len(results[0]) == 10
assert len(results[1]) == 10
assert list(map(operator.attrgetter("text"), results[0])) == [
"Test Message #0",
"Test Message #1",
"Test Message #2",
"Test Message #3",
"Test Message #4",
"Test Message #5",
"Test Message #6",
"Test Message #7",
"Test Message #8",
"Test Message #9",
]
assert results[0] == results[1]
async def test_current_message_in_callback(self, exchange: servo.pubsub.Exchange, mocker: pytest_mock.MockerFixture) -> None:
exchange.start()
assert servo.pubsub.current_message() is None
message = servo.pubsub.Message(text='Testing')
event = asyncio.Event()
current_message = None
async def _callback(message: servo.pubsub.Message, channel: servo.pubsub.Channel) -> None:
nonlocal current_message
current_message = servo.pubsub.current_message()
event.set()
subscriber = exchange.create_subscriber('metrics*')
subscriber.callback = _callback
publisher = exchange.create_publisher("metrics.http.production")
await publisher(message)
await event.wait()
assert current_message is not None
assert current_message == (message, publisher.channels[0])
assert current_message[1].exchange == exchange
assert servo.pubsub.current_message() is None
async def test_current_message_in_iterator(self, exchange: servo.pubsub.Exchange, mocker: pytest_mock.MockerFixture) -> None:
exchange.start()
publisher = exchange.create_publisher("metrics.http.production")
message = servo.pubsub.Message(text='Some Message')
event = asyncio.Event()
current_message = None
async def _publisher_func() -> None:
# Wait for subscriber registration
await event.wait()
await publisher(message)
async def _subscriber_func() -> None:
nonlocal current_message
async with exchange.subscribe('metrics*') as subscription:
# Trigger the Publisher to begin sending messages
event.set()
async for message, channel in subscription:
current_message = servo.pubsub.current_message()
subscription.cancel()
await asyncio.wait_for(
asyncio.gather(_publisher_func(), _subscriber_func()),
timeout=3.0
)
assert current_message is not None
assert current_message == (message, publisher.channels[0])
assert current_message[1].exchange == exchange
assert servo.pubsub.current_message() is None
async def test_iteration(self, exchange: servo.pubsub.Exchange, mocker: pytest_mock.MockFixture) -> None:
channel = exchange.create_channel('whatever')
messages = []
async def _subscriber() -> None:
async for message in exchange:
messages.append(message)
if len(messages) == 3:
exchange.stop()
async def _publisher() -> None:
for i in range(3):
await exchange.publish(servo.pubsub.Message(text=f"Message: {i}"), channel)
exchange.start()
await task_graph(
_publisher(),
_subscriber(),
timeout=5.0
)
assert messages
class HostObject(servo.pubsub.Mixin):
async def _test_publisher_decorator(self, *, name: Optional[str] = None) -> None:
@self.publish("metrics", name=name)
async def _manual_publisher(publisher: servo.pubsub.Publisher) -> None:
await publisher(servo.pubsub.Message(json={"throughput": "31337rps"}))
await asyncio.sleep(30)
async def _test_repeating_publisher_decorator(self) -> None:
@self.publish("metrics", every="10ms")
async def _repeating_publisher(publisher: servo.pubsub.Publisher) -> None:
await publisher(servo.pubsub.Message(json={"throughput": "31337rps"}))
async def _test_subscriber_decorator(self, callback, *, name: Optional[str] = None) -> None:
@self.subscribe("metrics", name=name)
async def _message_received(message: servo.pubsub.Message, channel: servo.pubsub.Channel) -> None:
callback(message, channel)
class TestMixin:
@pytest.fixture
async def host_object(self) -> HostObject:
host_object = HostObject()
yield host_object
if host_object.pubsub_exchange.running:
await host_object.pubsub_exchange.shutdown()
else:
host_object.pubsub_exchange.clear()
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
[task.cancel() for task in tasks]
await asyncio.gather(*tasks, return_exceptions=True)
async def test_init_with_pubsub_exchange(self) -> None:
exchange = servo.pubsub.Exchange()
obj = HostObject(pubsub_exchange=exchange)
assert obj.pubsub_exchange == exchange
async def test_exchange_property(self, host_object: HostObject) -> None:
assert host_object.pubsub_exchange
async def test_exchange_property_setter(self, host_object: HostObject, exchange: servo.pubsub.Exchange) -> None:
assert host_object.pubsub_exchange
assert host_object.pubsub_exchange != exchange
host_object.pubsub_exchange = exchange
assert host_object.pubsub_exchange == exchange
async def test_publisher_decorator_repeating(self, host_object: HostObject) -> None:
assert len(host_object.pubsub_exchange._publishers) == 0
await host_object._test_repeating_publisher_decorator()
assert len(host_object.pubsub_exchange._publishers) == 1
assert host_object.pubsub_exchange._queue.qsize() == 0
await asyncio.sleep(0.2)
assert host_object.pubsub_exchange._queue.qsize() >= 10
async def test_publisher_decorator_manual(self, host_object: HostObject) -> None:
assert len(host_object.pubsub_exchange._publishers) == 0
await host_object._test_publisher_decorator()
assert len(host_object.pubsub_exchange._publishers) == 1
assert host_object.pubsub_exchange._queue.qsize() == 0
await asyncio.sleep(0.2)
assert host_object.pubsub_exchange._queue.qsize() == 1
async def test_publisher_context_manager(self, host_object: HostObject) -> None:
assert len(host_object.pubsub_exchange._publishers) == 0
async with host_object.publish('metrics') as publisher:
assert publisher
assert len(host_object.pubsub_exchange._publishers) == 1
assert host_object.pubsub_exchange._queue.qsize() == 0
await publisher(servo.pubsub.Message(text="context manager FTW!"))
assert host_object.pubsub_exchange._queue.qsize() == 1
assert len(host_object.pubsub_exchange._publishers) == 0
assert host_object.pubsub_exchange._queue.qsize() == 1
async def test_publisher_context_manager_rejects_every_arg(self, host_object: HostObject) -> None:
with pytest.raises(TypeError, match='Cannot create repeating publisher when used as a context manager: `every` must be None'):
async with host_object.publish('metrics', every="10s") as publisher:
...
async def test_publisher_callable_await(self, host_object: HostObject) -> None:
await host_object.publish(
servo.pubsub.Message(json={ 'whatever': ['you', 'want', 'if', 'json', 'serializable'] }),
'metrics'
)
assert len(host_object.pubsub_exchange._publishers) == 0
assert host_object.pubsub_exchange._queue.qsize() == 1
async def test_publisher_callable_direct(self, host_object: HostObject) -> None:
value = host_object.publish(
servo.pubsub.Message(json={ 'whatever': ['you', 'want', 'if', 'json', 'serializable'] }),
'metrics'
)
assert isinstance(value, servo.pubsub._PublisherMethod)
async def test_subscriber_decorator(self, host_object: HostObject, mocker: pytest_mock.MockFixture) -> None:
event = asyncio.Event()
stub = mocker.stub()
stub.side_effect=lambda x,y: event.set()
await host_object._test_subscriber_decorator(stub)
host_object.pubsub_exchange.start()
channel = host_object.pubsub_exchange.create_channel("metrics")
message = servo.pubsub.Message(json={"throughput": "31337rps"})
await host_object.pubsub_exchange.publish(message, channel)
await event.wait()
stub.assert_called_once_with(message, channel)
async def test_subscriber_context_manager(self, host_object: HostObject, mocker: pytest_mock.MockFixture) -> None:
stub = mocker.stub()
host_object.pubsub_exchange.start()
message = servo.pubsub.Message(json={"throughput": "31337rps"})
channel = host_object.pubsub_exchange.create_channel("metrics")
event = asyncio.Event()
async def _publisher() -> None:
await event.wait()
await host_object.pubsub_exchange.publish(message, channel)
async def _subscriber() -> None:
async with host_object.subscribe('metrics') as subscriber:
event.set()
async for message, channel in subscriber:
stub(message, channel)
subscriber.cancel()
await asyncio.wait_for(
asyncio.gather(_publisher(), _subscriber()),
timeout=3.0
)
stub.assert_called_once_with(message, channel)
async def test_pubsub_between_decorators(self, host_object: HostObject, mocker: pytest_mock.MockFixture) -> None:
notifications = []
def _callback(message, channel) -> None:
notification = f"Message #{len(notifications)} '{message.text}' (channel: '{channel.name}')"
notifications.append(notification)
await host_object._test_subscriber_decorator(_callback)
await host_object._test_repeating_publisher_decorator()
host_object.pubsub_exchange.start()
await asyncio.sleep(0.2)
assert len(notifications) > 10
assert notifications[0:5] == [
"Message #0 \'{\"throughput\": \"31337rps\"}\' (channel: 'metrics')",
"Message #1 \'{\"throughput\": \"31337rps\"}\' (channel: 'metrics')",
"Message #2 \'{\"throughput\": \"31337rps\"}\' (channel: 'metrics')",
"Message #3 \'{\"throughput\": \"31337rps\"}\' (channel: 'metrics')",
"Message #4 \'{\"throughput\": \"31337rps\"}\' (channel: 'metrics')",
]
async def test_cancel_subscribers(self, host_object: HostObject, mocker: pytest_mock.MockFixture) -> None:
stub = mocker.stub()
await host_object._test_subscriber_decorator(stub)
await host_object._test_subscriber_decorator(stub, name="another_subscriber")
with servo.utilities.pydantic.extra(host_object.pubsub_exchange):
spy = mocker.spy(host_object.pubsub_exchange, "remove_subscriber")
host_object.cancel_subscribers('_message_received')
spy.assert_called_once()
subscriber = spy.call_args.args[0]
assert subscriber.cancelled
assert subscriber not in host_object.pubsub_exchange._subscribers
assert len(host_object.pubsub_exchange._subscribers) == 1
assert len(host_object._subscribers_map) == 1
assert host_object._subscribers_map['another_subscriber']
async def test_cancel_all_subscribers(self, host_object: HostObject, mocker: pytest_mock.MockFixture) -> None:
stub = mocker.stub()
await host_object._test_subscriber_decorator(stub, name="one_subscriber")
await host_object._test_subscriber_decorator(stub, name="two_subscriber")
await host_object._test_subscriber_decorator(stub, name="three_subscriber")
assert len(host_object.pubsub_exchange._subscribers) == 3
with servo.utilities.pydantic.extra(host_object.pubsub_exchange):
spy = mocker.spy(host_object.pubsub_exchange, "remove_subscriber")
host_object.cancel_subscribers()
spy.assert_called()
assert spy.call_count == 3
for args in spy.call_args_list:
subscriber, = args[0]
assert subscriber.cancelled
assert subscriber not in host_object.pubsub_exchange._subscribers
assert len(host_object.pubsub_exchange._subscribers) == 0
assert len(host_object._subscribers_map) == 0
async def test_cancel_publishers(self, host_object: HostObject, mocker: pytest_mock.MockFixture) -> None:
await host_object._test_publisher_decorator()
await host_object._test_publisher_decorator(name="another_publisher")
assert len(host_object._publishers_map) == 2
assert host_object._publishers_map['_manual_publisher']
assert host_object._publishers_map['another_publisher']
with servo.utilities.pydantic.extra(host_object.pubsub_exchange):
spy = mocker.spy(host_object.pubsub_exchange, "remove_publisher")
host_object.cancel_publishers('_manual_publisher')
spy.assert_called_once()
publisher = spy.call_args.args[0]
assert subscriber not in host_object.pubsub_exchange._publishers
assert len(host_object.pubsub_exchange._publishers) == 1
assert len(host_object._publishers_map) == 1
assert host_object._publishers_map['another_publisher']
async def test_cancel_all_publishers(self, host_object: HostObject, mocker: pytest_mock.MockFixture) -> None:
await host_object._test_publisher_decorator(name="one_publisher")
await host_object._test_publisher_decorator(name="two_publisher")
await host_object._test_publisher_decorator(name="three_publisher")
with servo.utilities.pydantic.extra(host_object.pubsub_exchange):
spy = mocker.spy(host_object.pubsub_exchange, "remove_publisher")
host_object.cancel_publishers()
spy.assert_called()
assert spy.call_count == 3
for args in spy.call_args_list:
publisher, = args[0]
assert publisher not in host_object.pubsub_exchange._publishers
assert len(host_object.pubsub_exchange._publishers) == 0
assert len(host_object._publishers_map) == 0
async def test_channel_async_context_manager_temporary(self, host_object: HostObject) -> None:
assert host_object.pubsub_exchange.get_channel('metrics') is None
async with host_object.channel('metrics', 'the description') as channel:
assert channel
assert channel.name == 'metrics'
assert channel.description == 'the description'
assert host_object.pubsub_exchange.get_channel('metrics') == channel
assert host_object.pubsub_exchange.get_channel('metrics') is None
async def test_channel_async_context_manager_existing(self, host_object: HostObject) -> None:
existing_channel = host_object.pubsub_exchange.create_channel("metrics")
async with host_object.channel('metrics') as channel:
assert channel
assert channel == existing_channel
assert host_object.pubsub_exchange.get_channel('metrics') == existing_channel
assert not existing_channel.closed
async def test_channel_async_context_manager_temporary_random_name(self, host_object: HostObject) -> None:
name = None
channel_ref = None
async with host_object.channel() as channel:
assert channel
channel_ref = channel
name = channel.name
assert host_object.pubsub_exchange.get_channel(name) is None
assert channel_ref.closed
async def test_exception_handling_in_publisher(self, host_object: HostObject) -> None:
@host_object.publish("metrics", every='100ms')
async def _failio(publisher: servo.pubsub.Publisher) -> None:
raise RuntimeError("boom.")
await asyncio.sleep(0.001)
class CountDownLatch:
def __init__(self, count=1):
self._count = count
self._condition = asyncio.Condition()
@property
def count(self) -> int:
return self._count
async def decrement(self):
async with self._condition:
self._count -= 1
if self._count <= 0:
self._condition.notify_all()
async def wait(self):
async with self._condition:
await self._condition.wait()
async def task_graph(task, *dependencies, timeout: Optional[servo.Duration] = None):
async def _main_task():
await latch.wait()
await _run_task(task)
async def _run_task(task):
if asyncio.iscoroutinefunction(task):
await task()
elif asyncio.iscoroutine(task):
await task
else:
task()
async def _dependent_task(task):
await latch.decrement()
await _run_task(task)
latch = CountDownLatch(len(dependencies))
timeout_ = timeout and servo.Duration(timeout).total_seconds()
await asyncio.wait_for(
asyncio.gather(
_main_task(),
*list(map(_dependent_task, dependencies))
),
timeout=timeout_
)
def test_weakref_to_exchange() -> None:
exchange = servo.pubsub.Exchange()
assert weakref.proxy(exchange)
def test_garbage_collection_of_exchange() -> None:
exchange = servo.pubsub.Exchange()
channel = exchange.create_channel('foo')
publisher = exchange.create_publisher(channel)
subscriber = exchange.create_subscriber('foo')
assert channel.exchange == exchange
assert publisher.exchange == exchange
assert subscriber.exchange == exchange
del exchange
assert channel.exchange is None
assert publisher.exchange is None
assert subscriber.exchange is None
| StarcoderdataPython |
1884366 | <gh_stars>0
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('profile/', views.profile, name='profile'),
path('update/', views.update_profile, name='update'),
path('house/details/<int:id>/', views.details, name='details'),
path('houses/', views.houses, name='houses'),
path('search/', views.search_results, name='search_results'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | StarcoderdataPython |
6530478 | import sys, ast
import sage
class FileInfo():
def __init__(self, filename, node):
self.filename = filename
self.lineno = node.lineno
self.col_offset = node.col_offset
class stack():
def __init__(self):
self._stack = []
def push(self, obj):
self._stack.append(obj)
def pop(self, expected=None):
if expected != None and expected != self.peek():
raise Exception("Popped unexpected value from stack.")
self._stack.pop()
def peek(self):
return self._stack[-1]
class SageTranslator(ast.NodeVisitor):
def __init__(self, filename):
self.filename = filename
self.scopeStack = stack()
def __call__(self, syntax_tree):
return self.visit(syntax_tree)
def file_info(self, node):
return FileInfo(self.filename, node)
def generic_visit(self, node):
#print "generic_visit for class: ", node.__class__.__name__
return map(self.visit, ast.iter_child_nodes(node))
def visit_alias(self, node):
scope = self.scopeStack.peek()
id = node.asname or node.name
init = node.asname and self.visit(node.name)
name = sage.buildName(id, scope)
return (init and sage.buildAssign(name, init)) or name
def visit_arguments(self, node):
args = map(lambda arg: sage.buildInitializedName(arg.id), node.args)
kwargs = map(self.visit, node.defaults)
return sage.buildFunctionParameterList(args, kwargs)
def visit_Assert(self, node):
test = self.visit(node.test)
return sage.buildAssert(test)
def visit_Assign(self, node):
targets = map(self.visit, node.targets)
value = self.visit(node.value)
assert len(targets) == 1, "target assignment lists are yet to be supported"
return sage.buildAssign(targets[0], value)
def visit_Attribute(self, node):
value = self.visit(node.value)
scope = self.scopeStack.peek()
attr = sage.buildName(node.attr, scope)
return sage.buildAttr(value, attr)
def visit_AugAssign(self, node):
target = self.visit(node.target)
value = self.visit(node.value)
op = node.op.__class__
return sage.buildAugAssign(target, value, op)
def visit_BinOp(self, node):
lhs = self.visit(node.left)
rhs = self.visit(node.right)
op_str = node.op.__class__
return sage.buildBinOp(lhs, rhs, op_str)
def visit_BoolOp(self, node):
operands = map(self.visit, node.values)
operator = node.op.__class__
return sage.buildBoolOp(operator, operands)
def visit_Break(self, node):
return sage.buildBreak()
def visit_Call(self, node):
name = self.visit(node.func)
args = map(self.visit, node.args)
keywords = map(self.visit, node.keywords)
starargs = node.starargs and self.visit(node.starargs)
kwargs = node.kwargs and self.visit(node.kwargs)
return sage.buildCall(name, args, keywords, starargs, kwargs)
def visit_ClassDef(self, node):
scope = self.scopeStack.peek()
bases = node.bases and map(self.visit, node.bases)
decorators = node.decorator_list and sage.buildExprListExp(map(self.visit, node.decorator_list))
class_decl, scope = \
sage.buildClassDef(node.name, bases, decorators, scope)
self.scopeStack.push(scope)
body = map(self.visit, node.body)
sage.appendStatements(scope, body)
self.scopeStack.pop(scope)
return class_decl
def visit_Compare(self, node):
operands = map(self.visit, [node.left] + node.comparators)
operators = map(lambda op: op.__class__, node.ops)
return sage.buildCompare(operators, operands)
def visit_complex(self, n):
return sage.buildComplexVal(n)
def visit_comprehension(self, node):
ifs = node.ifs and sage.buildExprListExp(map(self.visit, node.ifs))
target = self.visit(node.target)
iter = self.visit(node.iter)
return sage.buildComprehension(target, iter, ifs)
def visit_Continue(self, node):
return sage.buildContinue()
def visit_Delete(self, node):
target = sage.buildExprListExp(map(self.visit, node.targets))
return sage.buildDelete(target)
def visit_Dict(self, node):
keys = map(self.visit, node.keys)
values = map(self.visit, node.values)
pairs = [sage.buildKeyDatumPair(keys[i], values[i]) for i in range(len(keys))]
return sage.buildDict(pairs)
def visit_DictComp(self, node):
key = self.visit(node.key)
value = self.visit(node.value)
elt = sage.buildKeyDatumPair(key, value)
gens = sage.buildExprListExp(map(self.visit, node.generators))
return sage.buildDictComp(elt, gens)
def visit_Ellipsis(self, node):
scope = self.scopeStack.peek()
return sage.buildName("...", scope)
def visit_ExceptHandler(self, node):
name = node.name and node.name.id
type = node.type and node.type.id
body = map(self.visit, node.body)
scope = self.scopeStack.peek()
return sage.buildExceptHandler(name, type, body, scope)
def visit_Exec(self, node):
body = self.visit(node.body)
globals = node.globals and self.visit(node.globals)
locals = node.locals and self.visit(node.locals)
return sage.buildExec(body, globals, locals)
def visit_Expr(self, node):
value = self.visit(node.value)
return sage.buildExpr(value)
def visit_FunctionDef(self, node):
scope = self.scopeStack.peek()
decorators = node.decorator_list and sage.buildExprListExp(map(self.visit, node.decorator_list))
stararg_id = node.args.vararg and sage.buildInitializedName(node.args.vararg, starred=True)
dstararg_id = node.args.kwarg and sage.buildInitializedName(node.args.kwarg, dstarred=True)
params = self.visit(node.args)
(capsule, scope) = \
sage.buildFunctionDef(node.name, params, decorators, stararg_id, dstararg_id, scope)
self.scopeStack.push(scope)
body_forest = map(self.visit, node.body)
sage.appendStatements(capsule, body_forest)
self.scopeStack.pop(scope)
return capsule
def visit_Global(self, node):
names = map(sage.buildInitializedName, node.names)
return sage.buildGlobal(names)
def visit_If(self, node):
test = self.visit(node.test)
body = map(self.visit, node.body)
orelse = map(self.visit, node.orelse)
return sage.buildIf(test, body, orelse)
def visit_IfExp(self, node):
test = self.visit(node.test)
body = self.visit(node.body)
orelse = self.visit(node.orelse)
return sage.buildIfExp(test, body, orelse)
def visit_Import(self, node):
names = map(self.visit, node.names)
return sage.buildImport(names)
def visit_Index(self, node):
return self.visit(node.value)
def visit_int(self, n):
return sage.buildLongIntVal(n)
def visit_float(self, n):
return sage.buildFloat(n)
def visit_For(self, node):
scope = self.scopeStack.peek()
target = self.visit(node.target)
iter = self.visit(node.iter)
body = sage.buildSuite(map(self.visit, node.body))
orelse = node.orelse and sage.buildSuite(map(self.visit, node.orelse))
return sage.buildFor(target, iter, body, orelse)
def visit_keyword(self, node):
value = self.visit(node.value)
return sage.buildKeyword(node.arg, value)
def visit_Lambda(self, node):
scope = self.scopeStack.peek()
params = self.visit(node.args)
(lambda_capsule, lambda_scope) = sage.buildLambda(params, scope)
self.scopeStack.push(scope)
expr = self.visit(node.body)
self.scopeStack.pop(scope)
sage.appendStatements(lambda_capsule, [expr])
return lambda_capsule
def visit_List(self, node):
return sage.buildListExp(map(self.visit, node.elts))
def visit_ListComp(self, node):
elt = self.visit(node.elt)
gens = sage.buildExprListExp(map(self.visit, node.generators))
return sage.buildListComp(elt, gens)
def visit_long(self, n):
return sage.buildLongIntVal(n)
def visit_Module(self, node):
scope = sage.buildGlobalScope(self.filename)
self.scopeStack.push(scope)
subforest = self.generic_visit(node)
self.scopeStack.pop(scope)
sage.appendStatements(scope, subforest)
return scope
def visit_Name(self, node):
scope = self.scopeStack.peek()
return sage.buildName(node.id, scope)
def visit_NoneType(self, node):
scope = self.scopeStack.peek()
return sage.buildName("None", scope)
def visit_Num(self, node):
return self.visit(node.n)
def visit_Pass(self, node):
return sage.buildPass()
def visit_Print(self, node):
dest = node.dest and self.visit(node.dest)
values = sage.buildExprListExp(map(self.visit, node.values))
return sage.buildPrintStmt(dest, values)
def visit_Repr(self, node):
value = self.visit(node.value)
return sage.buildRepr(value)
def visit_Return(self, node):
value = self.visit(node.value)
return sage.buildReturnStmt(value)
def visit_SetComp(self, node):
elt = self.visit(node.elt)
gens = sage.buildExprListExp(map(self.visit, node.generators))
return sage.buildSetComp(elt, gens)
def visit_Slice(self, node):
lower = node.lower and self.visit(node.lower)
upper = node.upper and self.visit(node.upper)
step = node.step and self.visit(node.step)
return sage.buildSlice(lower, upper, step)
def visit_Str(self, node):
return sage.buildStringVal(node.s)
def visit_str(self, str):
assert False, "unhandled raw string"
def visit_Subscript(self, node):
value = self.visit(node.value)
slice = self.visit(node.slice)
return sage.buildSubscript(value, slice)
def visit_TryExcept(self, node):
body = sage.buildSuite(map(self.visit, node.body))
handlers = node.handlers and map(self.visit, node.handlers)
orelse = node.orelse and sage.buildSuite(map(self.visit, node.orelse))
return sage.buildTryExcept(body, handlers, orelse)
def visit_TryFinally(self, node):
body = sage.buildSuite(map(self.visit, node.body))
finalbody = sage.buildSuite(map(self.visit, node.finalbody))
return sage.buildTryFinally(body, finalbody)
def visit_Tuple(self, node):
return sage.buildTuple(map(self.visit, node.elts))
def visit_UnaryOp(self, node):
operand = self.visit(node.operand)
op = node.op.__class__
return sage.buildUnaryOp(op, operand)
def visit_While(self, node):
test = self.visit(node.test)
body = sage.buildSuite(map(self.visit, node.body))
orelse = node.orelse and sage.buildSuite(map(self.visit, node.orelse))
return sage.buildWhile(test, body, orelse)
def visit_With(self, node):
exp = self.visit(node.context_expr)
var = node.optional_vars and self.visit(node.optional_vars)
body = sage.buildSuite(map(self.visit, node.body))
return sage.buildWith(exp, var, body)
def visit_Yield(self, node):
value = self.visit(node.value)
return sage.buildYield(value)
def translate(infilename):
try:
infile = open(infilename)
contents = infile.read()
infile.close()
except IOError:
print >>sys.stderr, "IO error when reading file: %s" % infilename
exit(1)
syntax_tree = ast.parse(contents)
return SageTranslator(infilename).visit(syntax_tree)
def main(argv):
map(translate, argv[1:])
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
3555274 | <filename>web_api/news/outputs/fetch_result.py
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: fetch_result.py
@time: 2018-05-30 19:34
"""
from __future__ import unicode_literals
from flask_restful import fields
fields_item_fetch_result = {
'id': fields.Integer,
'task_id': fields.Integer,
'platform_id': fields.Integer,
'platform_name': fields.String,
'channel_id': fields.Integer,
'channel_name': fields.String,
'article_id': fields.String,
'article_url': fields.String,
'article_title': fields.String,
'article_author_id': fields.String,
'article_author_name': fields.String,
'article_tags': fields.String,
'article_abstract': fields.String,
'article_content': fields.String,
'article_pub_time': fields.DateTime(dt_format=b'iso8601'),
'create_time': fields.DateTime(dt_format=b'iso8601'),
'update_time': fields.DateTime(dt_format=b'iso8601'),
}
| StarcoderdataPython |
8093365 | <reponame>ZhizhongPan/algo_trade<filename>algotrade/technical/abstract.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'phil.zhang'
import inspect
import talib.abstract as ab
import numpy as np
import ls_talib
_LS_FUNCTION_NAMES = set(ls_talib.__all__)
# TODO: 遇到问题:如果用jit修饰后,无法用inspect获得默认参数
class Function(ab.Function):
def __init__(self, func_name, *args, **kwargs):
"""
:type kwargs: object
"""
self.__name = func_name.upper()
self.__parameters = {}
# self.__opt_inputs = OrderedDict()
# self.__info = None
if self.__name in _LS_FUNCTION_NAMES:
self.__doc__ = ls_talib.__getattribute__(self.__name).func_doc
# self.parameters = {}
else:
super(Function, self).__init__(func_name, *args, **kwargs)
if kwargs:
self.parameters = kwargs
# raise Exception('%s not supported by LS_TA-LIB.' % self.__name)
# self.set_function_args(*args, **kwargs)
def __call__(self, *args, **kwargs):
if not self.parameters:
self.parameters.update(**kwargs)
if self.__name in _LS_FUNCTION_NAMES:
func = ls_talib.__getattribute__(self.__name)
return func(*args, **kwargs)
else:
return super(Function, self).__call__(*args, **kwargs)
@property
def parameters(self):
if self.__name in _LS_FUNCTION_NAMES:
return self.__parameters
else:
return super(Function, self).parameters
@parameters.setter
def parameters(self, parameters):
if self.__name in _LS_FUNCTION_NAMES:
self.__parameters.update(parameters)
else:
super(Function, self).set_parameters(parameters)
@property
def lookback(self):
if self.__name in _LS_FUNCTION_NAMES:
kwargs = self.parameters if self.parameters else self.__get_default_args(self.__name)
return self.__lookback(self.__name, **kwargs)
else:
return super(Function, self).lookback
@staticmethod
def __lookback(func_name, timeperiod=np.nan, timeperiod1=np.nan, timeperiod2=np.nan, timeperiod3=np.nan, timeperiod4=np.nan):
tables = {
# =====================================0 个周期
'WC' : 0,
'EMV' : 1,
'PVT' : 1,
'TR' : 1,
'PVI' : 0,
'NVI' : 0,
# =====================================1 个周期
'ACC' : timeperiod * 2,
'ACD' : timeperiod,
'ADTM' : timeperiod,
'AR' : timeperiod - 1,
'ARC' : timeperiod * 2,
'ASI' : timeperiod,
'BIAS' : timeperiod - 1,
'BR' : timeperiod,
'CMF' : timeperiod - 1,
'CR' : timeperiod,
'CVI' : 2 * timeperiod - 1,
'DDI' : timeperiod,
'DPO' : timeperiod - 1,
'IMI' : timeperiod - 1,
'MI' : (timeperiod - 1) * 3,
'MTM' : timeperiod,
'QST' : timeperiod - 1,
'TMA' : timeperiod - 1,
'TS' : timeperiod - 1,
'UI' : timeperiod * 2 - 2,
# 'UPN' : None,
'VAMA' : timeperiod - 1,
'VHF' : timeperiod,
'VIDYA': timeperiod,
'VR' : timeperiod - 1,
'VROC' : timeperiod,
'VRSI' : 1,
'WAD' : 0,
# =============================================== 4 个周期
'BBI' : max(timeperiod1, timeperiod2, timeperiod3, timeperiod4) - 1,
# =============================================== 3 个周期
'SMI' : timeperiod1 + timeperiod2 + timeperiod3 - 3,
'VMACD': max(timeperiod1, timeperiod2) + timeperiod3 - 2,
'DBCD' : timeperiod1 + timeperiod2 + timeperiod3 - 2,
'KVO' : max(timeperiod1, timeperiod2) + timeperiod3 - 2,
# ============================================== 2 个周期
'VOSC' : max(timeperiod1, timeperiod2) - 1,
'RVI' : timeperiod1 + timeperiod2,
'TSI' : timeperiod1 + timeperiod2 - 1,
'SRSI' : timeperiod1 + timeperiod2 - 1,
'DMI' : timeperiod1 + timeperiod2 - 2,
'RI' : timeperiod1 + timeperiod2 - 1,
'VO' : max(timeperiod1, timeperiod2) - 1,
'RMI' : timeperiod1 + timeperiod2,
'PFE' : timeperiod1 + timeperiod2 - 1
}
# print len(tables)
return tables[func_name]
@property
def default_args(self):
return self.__get_default_args(self.__name)
def __get_default_args(self, func_name):
"""
returns a dictionary of arg_name:default_values for the input function
"""
func = ls_talib.__getattribute__(func_name)
args, varargs, keywords, defaults = inspect.getargspec(func)
# print(func.__name__)
# print('args={0},varargs={1},keywords={2},defaults={3}'.format(
# args, varargs, keywords, defaults))
if defaults:
ret = dict(zip(reversed(args), reversed(defaults)))
# 本来函数的default应该是周期,是整型.
# 比如ret={'timeperiod1': 14, timeperiod2: 20}
# 但是有一些函数的缺省值是字符串。这些函数
# 是为了方便,可以使用不同的price来计算.
# 比如TMA(prices, timeperiod=14, price='high')
# 我们要去掉这些字符型的字典项
numeric_value_dict = {
key: val for key, val in ret.iteritems() if isinstance(val, int)}
return numeric_value_dict
else:
# print func_name
return {}
def test_ls_talib():
for func_name in _LS_FUNCTION_NAMES:
dict_param = dict(
timeperiod=np.random.randint(10, 100, 1)[0],
timeperiod1=np.random.randint(10, 100, 1)[0],
timeperiod2=np.random.randint(10, 100, 1)[0],
timeperiod3=np.random.randint(10, 100, 1)[0],
timeperiod4=np.random.randint(10, 100, 1)[0]
# timeperiod1 = np.random.randint(10,100,1),
)
func = Function(func_name, **dict_param)
lookback = func.lookback
default_args = func.default_args
real_args = default_args.copy()
for key, val in real_args.iteritems():
real_args[key] = dict_param[key]
NAs = func(p, **real_args).isnull().sum()
print(func_name)
print(dict_param)
print('lookback={0}'.format(lookback))
print('number of NA={0}'.format(NAs))
assert lookback == NAs
def test_talib():
func_names = filter(str.isupper, dir(ab))
func_names = filter(lambda x: not x.startswith('_'), func_names)
print func_names
ad = Function('ADOSC')
param = {'fastperiod': 20}
ad.parameters = param
ret = ad(p)
print(dir(ad))
print(ret)
print ad.lookback
# print(len(func_names))
for name in _LS_FUNCTION_NAMES:
exec "%s = Function('%s')" % (name, name)
__all__ = ['Function'] + list(_LS_FUNCTION_NAMES)
if __name__ == '__main__':
aroon = Function('AROON')
import pandas as pd
p = pd.read_csv('../orcl-2000.csv', index_col=0, parse_dates=True)
p.columns = [str.lower(col) for col in p.columns]
p = p.astype(float)
# print(rmi(p))
test_ls_talib()
# test_talib()
| StarcoderdataPython |
1784063 | <gh_stars>0
import yaml
import ocdsmerge
DEFAULT_EXTENSIONS = [
"https://raw.githubusercontent.com/open-contracting/api_extension/eeb2cb400c6f1d1352130bd65b314ab00a96d6ad/extension.json"
]
def prepare_record(releases, ocid):
if not isinstance(releases, list):
releases = [releases]
record = {
'releases': releases,
'compiledRelease': ocdsmerge.merge(releases),
'versionedRelease': ocdsmerge.merge_versioned(releases),
'ocid': ocid,
}
return record
def prepare_responce_doc(doc):
doc.pop('_rev')
doc.pop('$schema')
doc['id'] = doc.pop('_id')
return doc
def ids_only(doc):
return {
"id": doc.pop('id'),
"ocid": doc.pop('ocid')
}
def build_meta(options):
"""
Prepare package metadata(license, publicationPolicy ...)
"""
base = {
'publisher': {
'name': None,
'scheme': None,
'uri': None
},
'license': None,
'publicationPolicy': None,
'version': options.get('version', "1.1"),
'extensions': DEFAULT_EXTENSIONS
}
if 'metainfo.file' in options:
info = options['metainfo.file']
with open(info) as _in:
metainfo = yaml.load(_in)
base.update(metainfo)
return base
else:
return {
'publisher': {
'name': options.get('publisher.name'),
'scheme': options.get('publisher.scheme'),
'uri': options.get('publisher.uri')
},
'license': options.get('license'),
'publicationPolicy': options.get('publicationPolicy'),
'version': options.get('version', "1.1")
}
def get_or_create_db(server, name):
"""
Return existing db instance or create new one
"""
if name not in server:
server.create(name)
return server[name]
| StarcoderdataPython |
5138383 | <reponame>vaibhav92/op-test-framework
#!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
import unittest
import pexpect
import time
import OpTestConfiguration
from common.OpTestSystem import OpSystemState
from common.Exceptions import CommandFailed
class Console():
bs = 1024
count = 8
def setUp(self):
conf = OpTestConfiguration.conf
self.bmc = conf.bmc()
self.system = conf.system()
def runTest(self):
self.system.goto_state(OpSystemState.PETITBOOT_SHELL)
console = self.bmc.get_host_console()
self.system.host_console_unique_prompt()
bs = self.bs
count = self.count
self.assertTrue( (bs*count)%16 == 0, "Bug in test writer. Must be multiple of 16 bytes: bs %u count %u / 16 = %u" % (bs, count, (bs*count)%16))
try:
zeros = console.run_command("dd if=/dev/zero bs=%u count=%u|hexdump -C -v" % (bs, count), timeout=240)
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0)
expected = 3+(count*bs)/16
self.assertTrue( len(zeros) == expected, "Unexpected length of zeros %u != %u" % (len(zeros), expected))
class Console8k(Console, unittest.TestCase):
bs = 1024
count = 8
class Console16k(Console, unittest.TestCase):
bs = 1024
count = 16
class Console32k(Console, unittest.TestCase):
bs = 1024
count = 32
class ControlC(unittest.TestCase):
CONTROL = 'c'
def setUp(self):
conf = OpTestConfiguration.conf
self.bmc = conf.bmc()
self.system = conf.system()
def cleanup(self):
pass
def runTest(self):
self.system.goto_state(OpSystemState.PETITBOOT_SHELL)
console = self.bmc.get_host_console()
self.system.host_console_unique_prompt()
# I should really make this API less nasty...
raw_console = console.get_console()
#raw_console.sendline("hexdump -C -v /dev/zero")
raw_console.sendline("find /")
time.sleep(2)
raw_console.sendcontrol(self.CONTROL)
BMC_DISCONNECT = 'SOL session closed by BMC'
timeout = 15
try:
rc = raw_console.expect([BMC_DISCONNECT, "\[console-pexpect\]#$"], timeout)
if rc == 0:
raise BMCDisconnected(BMC_DISCONNECT)
self.assertEqual(rc, 1, "Failed to find expected prompt")
except pexpect.TIMEOUT as e:
print e
print "# TIMEOUT waiting for command to finish with ctrl-c."
print "# Everything is terrible. Fail the world, power cycle (if lucky)"
self.system.set_state(OpSystemState.UNKNOWN_BAD)
self.fail("Could not ctrl-c running command in reasonable time")
self.cleanup()
class ControlZ(ControlC):
CONTROL='z'
def cleanup(self):
console = self.bmc.get_host_console()
console.run_command("kill %1")
console.run_command_ignore_fail("fg")
def suite():
s = unittest.TestSuite()
s.addTest(Console32k())
s.addTest(ControlZ())
s.addTest(ControlC())
return s
| StarcoderdataPython |
3249127 | <filename>folderlib/data/excluded.py
__all__ = [
"binaries",
"symlinks",
]
binaries = [
"exe",
"out",
"bin"
]
symlinks = ["lnk"]
| StarcoderdataPython |
3595554 | <gh_stars>1-10
from ..models import District
legacy_districts = {
"md": [
District("12A", "lower", division_id=None),
District("12B", "lower", division_id=None),
District("2C", "lower", division_id=None),
District("30", "lower", division_id=None),
District("31", "lower", division_id=None),
District("33A", "lower", division_id=None),
District("33B", "lower", division_id=None),
District("42", "lower", division_id=None),
District("44", "lower", division_id=None),
District("47", "lower", division_id=None),
District("4A", "lower", division_id=None),
District("4B", "lower", division_id=None),
District("5A", "lower", division_id=None),
District("5B", "lower", division_id=None),
],
"nv": [
District("Capital Senatorial District", "upper", division_id=None),
District("Central Nevada Senatorial District", "upper", division_id=None),
District("Clark County, No. 1", "upper", division_id=None),
District("Clark County, No. 3", "upper", division_id=None),
District("Clark County, No. 4", "upper", division_id=None),
District("Clark County, No. 5", "upper", division_id=None),
District("Clark County, No. 6", "upper", division_id=None),
District("Clark County, No. 7", "upper", division_id=None),
District("Clark County, No. 8", "upper", division_id=None),
District("Clark County, No. 9", "upper", division_id=None),
District("Clark County, No. 9", "upper", division_id=None),
District("Clark County, No. 10", "upper", division_id=None),
District("Clark County, No. 11", "upper", division_id=None),
District("Clark County, No. 12", "upper", division_id=None),
District("Rural Nevada Senatorial District", "upper", division_id=None),
District("Washoe County, No. 1", "upper", division_id=None),
District("Washoe County, No. 2", "upper", division_id=None),
District("Washoe County, No. 3", "upper", division_id=None),
District("Washoe County, No. 4", "upper", division_id=None),
],
"pr": [District(str(n), "upper", division_id=None) for n in range(1, 9)],
"vt": [
District("Addison-Rutland-1", "lower", division_id=None),
District("Bennington-Rutland-1", "lower", division_id=None),
District("Chittenden-1-2", "lower", division_id=None),
District("Chittenden-3-1", "lower", division_id=None),
District("Chittenden-3-2", "lower", division_id=None),
District("Chittenden-3-3", "lower", division_id=None),
District("Chittenden-3-4", "lower", division_id=None),
District("Chittenden-3-5", "lower", division_id=None),
District("Chittenden-3-6", "lower", division_id=None),
District("Chittenden-3-7", "lower", division_id=None),
District("Chittenden-3-9", "lower", division_id=None),
District("Chittenden-4", "lower", division_id=None),
District("Chittenden-8", "lower", division_id=None),
District("Chittenden-9", "lower", division_id=None),
District("Franklin-3", "lower", division_id=None),
District("Grand Isle-Chittenden-1-1", "lower", division_id=None),
District("Lamoille-4", "lower", division_id=None),
District("Lamoille-Washington-1", "lower", division_id=None),
District("Orange-Addison-1", "lower", division_id=None),
District("Orleans-Caledonia-1", "lower", division_id=None),
District("Rutland-1-1", "lower", division_id=None),
District("Rutland-1-2", "lower", division_id=None),
District("Rutland-7", "lower", division_id=None),
District("Rutland-8", "lower", division_id=None),
District("Washington-3-2", "lower", division_id=None),
District("Washington-3-3", "lower", division_id=None),
District("Washington-Chittenden-1", "lower", division_id=None),
District("Windham-2", "lower", division_id=None),
District("Windham-3-1", "lower", division_id=None),
District("Windham-3-3", "lower", division_id=None),
District("Windham-Bennington-1", "lower", division_id=None),
District("Windham-Bennington-Windsor-1", "lower", division_id=None),
District("Windsor-1-1", "lower", division_id=None),
District("Windsor-1-2", "lower", division_id=None),
District("Windsor-3", "lower", division_id=None),
District("Windsor-4", "lower", division_id=None),
District("Windsor-6-1", "lower", division_id=None),
District("Windsor-6-2", "lower", division_id=None),
],
"us": [
District("IL-20", "lower", division_id=None),
District("MA-10", "lower", division_id=None),
District("MO-9", "lower", division_id=None),
District("NJ-13", "lower", division_id=None),
District("NY-29", "lower", division_id=None),
District("OH-17", "lower", division_id=None),
District("OH-18", "lower", division_id=None),
District("OK-6", "lower", division_id=None),
],
}
| StarcoderdataPython |
1924928 | <reponame>dirty-cat/categorical-encoding<filename>benchmarks/similarity_scores_time_benchmark.py
"""
Benchmark time consumption and scores for K-means and most frequent strategies.
We use the traffic_violations dataset to benchmark the different dimensionality
reduction strategies used in similarity encoding.
Parameters that are modified:
- Number of rows in datasets: 10k, 20k, 50k, 100k and nuniques.
- Hashing dimensions: 2 ** 14, 2 ** 16, 2 ** 18, 2 ** 20
- Ngram-range: (3, 3), (2, 4)
"""
# We filter out the warning asking us to specify the solver in the logistic_regression
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from time import time
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn import pipeline, linear_model, model_selection
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from dirty_cat import SimilarityEncoder
from dirty_cat.datasets import fetch_traffic_violations
data = fetch_traffic_violations()
dfr = pd.read_csv(data['path'])
transformers = [
('one_hot', OneHotEncoder(sparse=False, handle_unknown='ignore'),
['Alcohol',
'Arrest Type',
'Belts',
'Commercial License',
'Commercial Vehicle',
'Fatal',
'Gender',
'HAZMAT',
'Property Damage',
'Race',
'Work Zone']),
('pass', 'passthrough', ['Year']),
]
def benchmark(strat='k-means', limit=50000, n_proto=100, hash_dim=None, ngram_range=(3, 3)):
df = dfr[:limit].copy()
df = df.dropna(axis=0)
df = df.reset_index()
y = df['Violation Type']
if strat == 'k-means':
sim_enc = SimilarityEncoder(similarity='ngram', ngram_range=ngram_range, categories='k-means',
hashing_dim=hash_dim, n_prototypes=n_proto, random_state=3498)
else:
sim_enc = SimilarityEncoder(similarity='ngram', ngram_range=ngram_range, categories='most_frequent',
hashing_dim=hash_dim, n_prototypes=n_proto, random_state=3498)
column_trans = ColumnTransformer(
transformers=transformers + [('sim_enc', sim_enc, ['Description'])],
remainder='drop'
)
t0 = time()
X = column_trans.fit_transform(df)
t1 = time()
t_score_1 = t1 - t0
model = pipeline.Pipeline([('logistic', linear_model.LogisticRegression())])
t0 = time()
m_score = model_selection.cross_val_score(model, X, y, cv=20)
t1 = time()
t_score_2 = t1 - t0
return t_score_1, m_score, t_score_2
def plot(bench, title=''):
sns.set(style='ticks', palette='muted')
hash_dims = ['Count', '2 ** 14', '2 ** 16', '2 ** 18', '2 ** 20']
scores = []
vectorizer = []
strategy = []
for i, e in enumerate(bench):
vectorizer.extend([hash_dims[i % 5]] * (2 * len(e[0][1])))
strategy.extend(['k-means'] * len(e[0][1]))
strategy.extend(['most-frequent'] * len(e[1][1]))
scores.extend(e[0][1])
scores.extend(e[1][1])
df = pd.DataFrame(columns=['vectorizer', 'strategy', 'score'])
df['vectorizer'] = vectorizer
df['strategy'] = strategy
df['score'] = scores
first = plt.figure()
ax = sns.boxplot(x='vectorizer', y='score', hue='strategy', data=df)
ax.set(title=title, xlabel='Vectorizer used', ylabel='Mean score on 10 cross validations')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
first.tight_layout()
vectorizer.clear()
scores.clear()
strategy.clear()
times = []
for i, e in enumerate(bench):
vectorizer.extend([hash_dims[i % 5]] * 4)
strategy.extend(['K-means vect', 'K-means X-val', 'MF vect', 'MF X-val'])
times.extend([e[0][0], e[0][2] / 20, e[1][0], e[1][2] / 20])
df = pd.DataFrame(columns=['vectorizer', 'strategy/operation', 'time'])
df['vectorizer'] = vectorizer
df['strategy/operation'] = strategy
df['time'] = times
second = plt.figure()
ax1 = sns.barplot(x='vectorizer', y='time', hue='strategy/operation', data=df)
ax1.set(title=title, xlabel='Vectorizer used', ylabel='Time in seconds')
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5))
second.tight_layout()
title = title.replace(' ', '_').replace(':', '-').replace(',', '_').lower()
first.savefig(title + '_score.png')
second.savefig(title + '_time.png')
# first.show()
# second.show(t)
def loop(proto):
limits = sorted([dfr['Description'].nunique(), 10000, 20000, 50000, 100000])
hash_dims = [None, 2 ** 14, 2 ** 16, 2 ** 18, 2 ** 20]
bench = list()
ngram = [(3, 3), (2, 4)]
for limit in limits:
for r in ngram:
for h in hash_dims:
bench.append((benchmark(strat='k-means', limit=limit, n_proto=proto, hash_dim=h, ngram_range=r),
benchmark(strat='most-frequent', limit=limit, n_proto=proto, hash_dim=h, ngram_range=r)))
title = 'N-gram range: %s, Rows: %d, Prototypes: %d, 20 CV' % (r.__str__(), limit, proto)
plot(bench, title)
bench.clear()
if __name__ == '__main__':
loop(100)
| StarcoderdataPython |
1722422 | <filename>custom_imports/sample_importers/__init__.py
from custom_imports.sample_importers.config_importer import cfg_importer, ini_importer
from custom_imports.sample_importers.csv_importer import CSVImporter
from custom_imports.sample_importers.json_importer import json_importer
__all__ = ["json_importer", "cfg_importer", "ini_importer", "CSVImporter"]
| StarcoderdataPython |
11219852 | <reponame>shivharis/pybind
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class extended_data(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-ssm-operational - based on the path /acl-state/vxlan-acl/extended-data. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__acl_name','__seq_num','__permit_deny','__dst_vtep_ip','__src_vtep_ip','__vni','__vni_mask','__native_tag','__dst_ip','__dst_ip_mask','__src_ip','__src_ip_mask','__dst_port','__src_port','__count','__byte_count','__transit_name','__sflow','__redir_interface','__mirror_interface',)
_yang_name = 'extended-data'
_rest_name = 'extended-data'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="count", rest_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)
self.__sflow = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="sflow", rest_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='boolean', is_config=False)
self.__src_port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-port", rest_name="src-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
self.__native_tag = YANGDynClass(base=unicode, is_leaf=True, yang_name="native-tag", rest_name="native-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__acl_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="acl-name", rest_name="acl-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__seq_num = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seq-num", rest_name="seq-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint32', is_config=False)
self.__vni_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="vni-mask", rest_name="vni-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__redir_interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="redir-interface", rest_name="redir-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__vni = YANGDynClass(base=unicode, is_leaf=True, yang_name="vni", rest_name="vni", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__byte_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="byte-count", rest_name="byte-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)
self.__src_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-ip", rest_name="src-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
self.__permit_deny = YANGDynClass(base=unicode, is_leaf=True, yang_name="permit-deny", rest_name="permit-deny", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__dst_port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-port", rest_name="dst-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
self.__dst_ip_mask = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-ip-mask", rest_name="dst-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
self.__dst_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-ip", rest_name="dst-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
self.__src_ip_mask = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-ip-mask", rest_name="src-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
self.__src_vtep_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
self.__dst_vtep_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
self.__mirror_interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="mirror-interface", rest_name="mirror-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__transit_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="transit-name", rest_name="transit-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'acl-state', u'vxlan-acl', u'extended-data']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'acl-state', u'vxlan-acl', u'extended-data']
def _get_acl_name(self):
"""
Getter method for acl_name, mapped from YANG variable /acl_state/vxlan_acl/extended_data/acl_name (string)
YANG Description: input_Acl_name
"""
return self.__acl_name
def _set_acl_name(self, v, load=False):
"""
Setter method for acl_name, mapped from YANG variable /acl_state/vxlan_acl/extended_data/acl_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_acl_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_acl_name() directly.
YANG Description: input_Acl_name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="acl-name", rest_name="acl-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """acl_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="acl-name", rest_name="acl-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__acl_name = t
if hasattr(self, '_set'):
self._set()
def _unset_acl_name(self):
self.__acl_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="acl-name", rest_name="acl-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
def _get_seq_num(self):
"""
Getter method for seq_num, mapped from YANG variable /acl_state/vxlan_acl/extended_data/seq_num (uint32)
YANG Description: sequence number
"""
return self.__seq_num
def _set_seq_num(self, v, load=False):
"""
Setter method for seq_num, mapped from YANG variable /acl_state/vxlan_acl/extended_data/seq_num (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_seq_num is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_seq_num() directly.
YANG Description: sequence number
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seq-num", rest_name="seq-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """seq_num must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seq-num", rest_name="seq-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint32', is_config=False)""",
})
self.__seq_num = t
if hasattr(self, '_set'):
self._set()
def _unset_seq_num(self):
self.__seq_num = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seq-num", rest_name="seq-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint32', is_config=False)
def _get_permit_deny(self):
"""
Getter method for permit_deny, mapped from YANG variable /acl_state/vxlan_acl/extended_data/permit_deny (string)
YANG Description: permit or deny
"""
return self.__permit_deny
def _set_permit_deny(self, v, load=False):
"""
Setter method for permit_deny, mapped from YANG variable /acl_state/vxlan_acl/extended_data/permit_deny (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_permit_deny is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_permit_deny() directly.
YANG Description: permit or deny
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="permit-deny", rest_name="permit-deny", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """permit_deny must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="permit-deny", rest_name="permit-deny", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__permit_deny = t
if hasattr(self, '_set'):
self._set()
def _unset_permit_deny(self):
self.__permit_deny = YANGDynClass(base=unicode, is_leaf=True, yang_name="permit-deny", rest_name="permit-deny", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
def _get_dst_vtep_ip(self):
"""
Getter method for dst_vtep_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_vtep_ip (inet:ipv4-address)
YANG Description: dst vtep ip or any
"""
return self.__dst_vtep_ip
def _set_dst_vtep_ip(self, v, load=False):
"""
Setter method for dst_vtep_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_vtep_ip() directly.
YANG Description: dst vtep ip or any
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dst_vtep_ip must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__dst_vtep_ip = t
if hasattr(self, '_set'):
self._set()
def _unset_dst_vtep_ip(self):
self.__dst_vtep_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_src_vtep_ip(self):
"""
Getter method for src_vtep_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/src_vtep_ip (inet:ipv4-address)
YANG Description: src vtep ip or any
"""
return self.__src_vtep_ip
def _set_src_vtep_ip(self, v, load=False):
"""
Setter method for src_vtep_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/src_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_vtep_ip() directly.
YANG Description: src vtep ip or any
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_vtep_ip must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__src_vtep_ip = t
if hasattr(self, '_set'):
self._set()
def _unset_src_vtep_ip(self):
self.__src_vtep_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_vni(self):
"""
Getter method for vni, mapped from YANG variable /acl_state/vxlan_acl/extended_data/vni (string)
YANG Description: vni number or any
"""
return self.__vni
def _set_vni(self, v, load=False):
"""
Setter method for vni, mapped from YANG variable /acl_state/vxlan_acl/extended_data/vni (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vni is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vni() directly.
YANG Description: vni number or any
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vni", rest_name="vni", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vni must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vni", rest_name="vni", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__vni = t
if hasattr(self, '_set'):
self._set()
def _unset_vni(self):
self.__vni = YANGDynClass(base=unicode, is_leaf=True, yang_name="vni", rest_name="vni", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
def _get_vni_mask(self):
"""
Getter method for vni_mask, mapped from YANG variable /acl_state/vxlan_acl/extended_data/vni_mask (string)
YANG Description: vni mask
"""
return self.__vni_mask
def _set_vni_mask(self, v, load=False):
"""
Setter method for vni_mask, mapped from YANG variable /acl_state/vxlan_acl/extended_data/vni_mask (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vni_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vni_mask() directly.
YANG Description: vni mask
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vni-mask", rest_name="vni-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vni_mask must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vni-mask", rest_name="vni-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__vni_mask = t
if hasattr(self, '_set'):
self._set()
def _unset_vni_mask(self):
self.__vni_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="vni-mask", rest_name="vni-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
def _get_native_tag(self):
"""
Getter method for native_tag, mapped from YANG variable /acl_state/vxlan_acl/extended_data/native_tag (string)
YANG Description: native tag
"""
return self.__native_tag
def _set_native_tag(self, v, load=False):
"""
Setter method for native_tag, mapped from YANG variable /acl_state/vxlan_acl/extended_data/native_tag (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_native_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_native_tag() directly.
YANG Description: native tag
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="native-tag", rest_name="native-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """native_tag must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="native-tag", rest_name="native-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__native_tag = t
if hasattr(self, '_set'):
self._set()
def _unset_native_tag(self):
self.__native_tag = YANGDynClass(base=unicode, is_leaf=True, yang_name="native-tag", rest_name="native-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
def _get_dst_ip(self):
"""
Getter method for dst_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_ip (inet:ipv4-address)
YANG Description: dst ip or any
"""
return self.__dst_ip
def _set_dst_ip(self, v, load=False):
"""
Setter method for dst_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_ip() directly.
YANG Description: dst ip or any
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-ip", rest_name="dst-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dst_ip must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-ip", rest_name="dst-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__dst_ip = t
if hasattr(self, '_set'):
self._set()
def _unset_dst_ip(self):
self.__dst_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-ip", rest_name="dst-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_dst_ip_mask(self):
"""
Getter method for dst_ip_mask, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_ip_mask (uint16)
YANG Description: dst ip mask
"""
return self.__dst_ip_mask
def _set_dst_ip_mask(self, v, load=False):
"""
Setter method for dst_ip_mask, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_ip_mask (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_ip_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_ip_mask() directly.
YANG Description: dst ip mask
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-ip-mask", rest_name="dst-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dst_ip_mask must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-ip-mask", rest_name="dst-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)""",
})
self.__dst_ip_mask = t
if hasattr(self, '_set'):
self._set()
def _unset_dst_ip_mask(self):
self.__dst_ip_mask = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-ip-mask", rest_name="dst-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
def _get_src_ip(self):
"""
Getter method for src_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/src_ip (inet:ipv4-address)
YANG Description: src ip or any
"""
return self.__src_ip
def _set_src_ip(self, v, load=False):
"""
Setter method for src_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/src_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_ip() directly.
YANG Description: src ip or any
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-ip", rest_name="src-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_ip must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-ip", rest_name="src-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__src_ip = t
if hasattr(self, '_set'):
self._set()
def _unset_src_ip(self):
self.__src_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-ip", rest_name="src-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_src_ip_mask(self):
"""
Getter method for src_ip_mask, mapped from YANG variable /acl_state/vxlan_acl/extended_data/src_ip_mask (uint16)
YANG Description: src ip mask
"""
return self.__src_ip_mask
def _set_src_ip_mask(self, v, load=False):
"""
Setter method for src_ip_mask, mapped from YANG variable /acl_state/vxlan_acl/extended_data/src_ip_mask (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_ip_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_ip_mask() directly.
YANG Description: src ip mask
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-ip-mask", rest_name="src-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_ip_mask must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-ip-mask", rest_name="src-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)""",
})
self.__src_ip_mask = t
if hasattr(self, '_set'):
self._set()
def _unset_src_ip_mask(self):
self.__src_ip_mask = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-ip-mask", rest_name="src-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
def _get_dst_port(self):
"""
Getter method for dst_port, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_port (uint16)
YANG Description: dst port number
"""
return self.__dst_port
def _set_dst_port(self, v, load=False):
"""
Setter method for dst_port, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_port (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_port() directly.
YANG Description: dst port number
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-port", rest_name="dst-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dst_port must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-port", rest_name="dst-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)""",
})
self.__dst_port = t
if hasattr(self, '_set'):
self._set()
def _unset_dst_port(self):
self.__dst_port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-port", rest_name="dst-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
def _get_src_port(self):
"""
Getter method for src_port, mapped from YANG variable /acl_state/vxlan_acl/extended_data/src_port (uint16)
YANG Description: src port number
"""
return self.__src_port
def _set_src_port(self, v, load=False):
"""
Setter method for src_port, mapped from YANG variable /acl_state/vxlan_acl/extended_data/src_port (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_port() directly.
YANG Description: src port number
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-port", rest_name="src-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_port must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-port", rest_name="src-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)""",
})
self.__src_port = t
if hasattr(self, '_set'):
self._set()
def _unset_src_port(self):
self.__src_port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-port", rest_name="src-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
def _get_count(self):
"""
Getter method for count, mapped from YANG variable /acl_state/vxlan_acl/extended_data/count (uint64)
YANG Description: count
"""
return self.__count
def _set_count(self, v, load=False):
"""
Setter method for count, mapped from YANG variable /acl_state/vxlan_acl/extended_data/count (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_count() directly.
YANG Description: count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="count", rest_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """count must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="count", rest_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)""",
})
self.__count = t
if hasattr(self, '_set'):
self._set()
def _unset_count(self):
self.__count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="count", rest_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)
def _get_byte_count(self):
"""
Getter method for byte_count, mapped from YANG variable /acl_state/vxlan_acl/extended_data/byte_count (uint64)
YANG Description: byte_count
"""
return self.__byte_count
def _set_byte_count(self, v, load=False):
"""
Setter method for byte_count, mapped from YANG variable /acl_state/vxlan_acl/extended_data/byte_count (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_byte_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_byte_count() directly.
YANG Description: byte_count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="byte-count", rest_name="byte-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """byte_count must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="byte-count", rest_name="byte-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)""",
})
self.__byte_count = t
if hasattr(self, '_set'):
self._set()
def _unset_byte_count(self):
self.__byte_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="byte-count", rest_name="byte-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)
def _get_transit_name(self):
"""
Getter method for transit_name, mapped from YANG variable /acl_state/vxlan_acl/extended_data/transit_name (string)
YANG Description: transit_name
"""
return self.__transit_name
def _set_transit_name(self, v, load=False):
"""
Setter method for transit_name, mapped from YANG variable /acl_state/vxlan_acl/extended_data/transit_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_transit_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_transit_name() directly.
YANG Description: transit_name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="transit-name", rest_name="transit-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """transit_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="transit-name", rest_name="transit-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__transit_name = t
if hasattr(self, '_set'):
self._set()
def _unset_transit_name(self):
self.__transit_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="transit-name", rest_name="transit-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
def _get_sflow(self):
"""
Getter method for sflow, mapped from YANG variable /acl_state/vxlan_acl/extended_data/sflow (boolean)
YANG Description: sflow enable or disable
"""
return self.__sflow
def _set_sflow(self, v, load=False):
"""
Setter method for sflow, mapped from YANG variable /acl_state/vxlan_acl/extended_data/sflow (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_sflow is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sflow() directly.
YANG Description: sflow enable or disable
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="sflow", rest_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sflow must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="sflow", rest_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='boolean', is_config=False)""",
})
self.__sflow = t
if hasattr(self, '_set'):
self._set()
def _unset_sflow(self):
self.__sflow = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="sflow", rest_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='boolean', is_config=False)
def _get_redir_interface(self):
"""
Getter method for redir_interface, mapped from YANG variable /acl_state/vxlan_acl/extended_data/redir_interface (string)
YANG Description: redirect interface
"""
return self.__redir_interface
def _set_redir_interface(self, v, load=False):
"""
Setter method for redir_interface, mapped from YANG variable /acl_state/vxlan_acl/extended_data/redir_interface (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_redir_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redir_interface() directly.
YANG Description: redirect interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="redir-interface", rest_name="redir-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """redir_interface must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="redir-interface", rest_name="redir-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__redir_interface = t
if hasattr(self, '_set'):
self._set()
def _unset_redir_interface(self):
self.__redir_interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="redir-interface", rest_name="redir-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
def _get_mirror_interface(self):
"""
Getter method for mirror_interface, mapped from YANG variable /acl_state/vxlan_acl/extended_data/mirror_interface (string)
YANG Description: mirror interface
"""
return self.__mirror_interface
def _set_mirror_interface(self, v, load=False):
"""
Setter method for mirror_interface, mapped from YANG variable /acl_state/vxlan_acl/extended_data/mirror_interface (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_mirror_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mirror_interface() directly.
YANG Description: mirror interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="mirror-interface", rest_name="mirror-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mirror_interface must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="mirror-interface", rest_name="mirror-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__mirror_interface = t
if hasattr(self, '_set'):
self._set()
def _unset_mirror_interface(self):
self.__mirror_interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="mirror-interface", rest_name="mirror-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
acl_name = __builtin__.property(_get_acl_name)
seq_num = __builtin__.property(_get_seq_num)
permit_deny = __builtin__.property(_get_permit_deny)
dst_vtep_ip = __builtin__.property(_get_dst_vtep_ip)
src_vtep_ip = __builtin__.property(_get_src_vtep_ip)
vni = __builtin__.property(_get_vni)
vni_mask = __builtin__.property(_get_vni_mask)
native_tag = __builtin__.property(_get_native_tag)
dst_ip = __builtin__.property(_get_dst_ip)
dst_ip_mask = __builtin__.property(_get_dst_ip_mask)
src_ip = __builtin__.property(_get_src_ip)
src_ip_mask = __builtin__.property(_get_src_ip_mask)
dst_port = __builtin__.property(_get_dst_port)
src_port = __builtin__.property(_get_src_port)
count = __builtin__.property(_get_count)
byte_count = __builtin__.property(_get_byte_count)
transit_name = __builtin__.property(_get_transit_name)
sflow = __builtin__.property(_get_sflow)
redir_interface = __builtin__.property(_get_redir_interface)
mirror_interface = __builtin__.property(_get_mirror_interface)
_pyangbind_elements = {'acl_name': acl_name, 'seq_num': seq_num, 'permit_deny': permit_deny, 'dst_vtep_ip': dst_vtep_ip, 'src_vtep_ip': src_vtep_ip, 'vni': vni, 'vni_mask': vni_mask, 'native_tag': native_tag, 'dst_ip': dst_ip, 'dst_ip_mask': dst_ip_mask, 'src_ip': src_ip, 'src_ip_mask': src_ip_mask, 'dst_port': dst_port, 'src_port': src_port, 'count': count, 'byte_count': byte_count, 'transit_name': transit_name, 'sflow': sflow, 'redir_interface': redir_interface, 'mirror_interface': mirror_interface, }
| StarcoderdataPython |
6581829 | <gh_stars>1-10
from .contrib import drivers
from .contrib import providers | StarcoderdataPython |
1658019 | #!/usr/bin/env python
################################################################################
#
# A poorly written Slack integration that enables querying Virustotal
# directly from Slack
#
# https://github.com/ecapuano/slackbot
#
################################################################################
import bottle
import urllib
import urllib2
import argparse
import hashlib
import re
import requests
import json
import socket
import config
import sys
import logging
debug = "no" # set to 'yes' to print messages to console
logging.basicConfig(filename='virustotal.log',format='%(asctime)s %(message)s',level=logging.INFO)
logging.info('Server started.')
app = application = bottle.Bottle()
@app.route('/', method='POST')
def slack_post():
body = bottle.request.body.read()
token = bottle.request.forms.get('token')
team_id = bottle.request.forms.get('team_id')
team_domain = bottle.request.forms.get('team_domain')
service_id = bottle.request.forms.get('service_id')
channel_id = bottle.request.forms.get('channel_id')
channel_name = bottle.request.forms.get('channel_name')
timestamp = bottle.request.forms.get('timestamp')
user_id = bottle.request.forms.get('user_id')
user_name = bottle.request.forms.get('user_name')
vtarg = bottle.request.forms.get('text')
trigger_words = bottle.request.forms.get('trigger_words')
response_url = bottle.request.forms.get('response_url')
if token != config.slack_token: # integration token
print "INVALID REQUEST RECEIVED! --> %s" % body
logging.warning('Invalid Request Received %s', body)
return "LOL NOPE"
logging.debug('Incoming request: %s', body)
logging.info('VT request received from user: %s - resource: %s', user_name, vtarg)
if ("http" in vtarg) or ("www" in vtarg) or (".com" in vtarg):
if debug == "yes":
print "URL Detected"
logging.info('URL Detected')
vt.urlScan(vtarg,user_name,response_url)
elif re.findall(r"([a-fA-F\d]{32})", vtarg):
if debug == "yes":
print "MD5 detected"
logging.info('MD5 Detected')
vt.getReport(vtarg,user_name,response_url)
else:
if debug == "yes":
print "Not URL or MD5"
message = "You did not provide a valid URL or MD5 hash.\nPlease try again in the format `/virustotal http://malware.ru` or `/virustotal 99017f6eebbac24f351415dd410d522d`"
logging.warning('Invalid query passed by user: %s -- %s', user_name, vtarg)
status = "fail"
sendToSlack(message,response_url,status)
class vtAPI():
def __init__(self):
self.api = config.vt_api
self.base = 'https://www.virustotal.com/vtapi/v2/'
def getReport(self,md5,user_name,response_url):
param = {'resource':md5,'apikey':self.api}
url = self.base + "file/report"
data = urllib.urlencode(param)
result = urllib2.urlopen(url,data)
jdata = json.loads(result.read())
parse(jdata,user_name,response_url)
def rescan(self,md5):
param = {'resource':md5,'apikey':self.api}
url = self.base + "file/rescan"
data = urllib.urlencode(param)
result = urllib2.urlopen(url,data)
print "\n\tVirus Total Rescan Initiated for -- " + md5 + " (Requery in 10 Mins)"
def urlScan(self,vtarg,user_name,response_url):
param = {'resource':vtarg,'apikey':self.api}
url = self.base + "url/report"
data = urllib.urlencode(param)
result = urllib2.urlopen(url,data)
jdata = json.loads(result.read())
urlparse(jdata,user_name,response_url)
################### Not in use yet
def checkMD5(checkval):
if re.match(r"([a-fA-F\d]{32})", checkval) == None:
md5 = md5sum(checkval)
return md5.upper()
else:
return checkval.upper()
def md5sum(filename):
fh = open(filename, 'rb')
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
####################
def parse(jdata,user_name,response_url):
if jdata['response_code'] == 0:
message = "That Hash Not Found in VT"
logging.warning('Hash not found in VT')
status = "fail"
sendToSlack(message,response_url,status)
return 0
positives = str(jdata['positives'])
total = str(jdata['total'])
md5 = str(jdata['md5'])
message = "Results for File: \t" + md5 + "\n"
message += "Detected Malicious by: \t" + positives + "/" + total + "\n"
if 'Sophos' in jdata['scans']:
Sophos = "Sophos: \t" + jdata.get('scans', {}).get('Sophos').get('result') + "\n"
message += Sophos
if 'Kaspersky' in jdata['scans']:
Kaspersky = "Kaspersky: \t" + jdata.get('scans', {}).get('Kaspersky').get('result') + "\n"
message += Kaspersky
if 'ESET-NOD32' in jdata['scans']:
ESET = "ESET: \t" + jdata.get('scans', {}).get('ESET-NOD32').get('result') + "\n"
message += ESET
if 'AegisLab' in jdata['scans']:
Aegis = "AegisLab: \t" + jdata.get('scans', {}).get('AegisLab').get('result') + "\n"
message += Sophos
message += 'Scanned on: \t' + jdata['scan_date'] + "\n"
message += jdata['permalink'] + "\n"
if debug == "yes":
print message
status = "pass"
sendToSlack(message,response_url,status)
def urlparse(jdata,user_name,response_url):
if jdata['response_code'] == 0:
message = "That Site Not Found in VT"
logging.warning('Site not found in VT')
status = "fail"
sendToSlack(message,response_url,status)
if debug == "yes":
print "Request from " + user_name + " not found in VT database."
return 0
positives = str(jdata['positives'])
total = str(jdata['total'])
url = jdata['url']
message = "Results for Site: \t" + url + "\n"
message += "Determined Malicious by: \t" + positives + "/" + total + "\n"
logging.info('Determined Malicious by: %s / %s', positives, total)
if 'OpenPhish' in jdata['scans']:
openphish = "OpenPhish: \t" + jdata.get('scans', {}).get('OpenPhish').get('result') + "\n"
message += openphish
if 'PhishLabs' in jdata['scans']:
phishlabs = "PhishLabs: \t" + jdata.get('scans', {}).get('PhishLabs').get('result') + "\n"
message += phishlabs
if 'Sophos' in jdata['scans']:
Sophos = "Sophos: \t" + jdata.get('scans', {}).get('Sophos').get('result') + "\n"
message += Sophos
if 'BitDefender' in jdata['scans']:
BitDefender = "BitDefender: \t" + jdata.get('scans', {}).get('BitDefender').get('result') + "\n"
message += BitDefender
if 'Google Safebrowsing' in jdata['scans']:
googlesafe = "Google: \t" + jdata.get('scans', {}).get('Google Safebrowsing').get('result') + "\n"
message += googlesafe
if 'Avira' in jdata['scans']:
Avira = "Avira: \t" + jdata.get('scans', {}).get('Avira').get('result') + "\n"
message += Avira
message += 'Scanned on: \t' + jdata['scan_date'] + "\n"
message += jdata['permalink'] + "\n"
if debug == "yes":
print message
status = "pass"
sendToSlack(message,response_url,status)
def sendToSlack(message,response_url,status):
url = response_url
slack_url = config.slack_url
data = {"username": 'VirusTotal', "text": message}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
# this sends the response to the user privately
r = requests.post(url, data=json.dumps(data), headers=headers)
logging.info('Message returned to user')
# following only occurs if the query was a success and posts results publicly in a specified channel
if status == "pass":
r2 = requests.post(slack_url, data=json.dumps(data), headers=headers)
logging.info('Message sent to slack channel')
logging.debug('Outbound message status: %s', r.content)
if __name__ == '__main__':
vt = vtAPI()
bottle.run(app, host=config.listen_ip, port=config.listen_port)
| StarcoderdataPython |
4841826 | <gh_stars>1-10
import sys
def prt(out=sys.stdout):
out.write('Just a simple print\n')
| StarcoderdataPython |
8123448 | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
try:
import collections.abc as abc
except ImportError:
import collections as abc
from dimod.utilities import LockableDict
class BQMView(object):
__slots__ = '_adj',
def __init__(self, bqm):
self._adj = bqm._adj
# support python2 pickle
def __getstate__(self):
return {'_adj': self._adj}
# support python2 pickle
def __setstate__(self, state):
self._adj = state['_adj']
class LinearView(BQMView, abc.MutableMapping):
"""Acts as a dictionary `{v: bias, ...}` for the linear biases.
The linear biases are stored in a dict-of-dicts format, where 'self loops'
store the linear biases.
So `{v: bias}` is stored `._adj = {v: {v: Bias(bias)}}`.
If v is not in ._adj[v] then the bias is treated as 0.
"""
def __delitem__(self, v):
if v not in self:
raise KeyError
adj = self._adj
if len(adj[v]) - (v in adj[v]) > 0:
raise ValueError("there are interactions associated with {} that must be deleted first".format(v))
del adj[v]
def __getitem__(self, v):
# developer note: we could try to match the type with other biases in
# the bqm, but I think it is better to just use python int 0 as it
# is most likely to be compatible with other numeric types.
return self._adj[v].get(v, 0)
def __iter__(self):
return iter(self._adj)
def __len__(self):
return len(self._adj)
def __setitem__(self, v, bias):
adj = self._adj
if v in adj:
adj[v][v] = bias
else:
adj[v] = LockableDict({v: bias})
def __str__(self):
return str(dict(self))
def items(self):
return LinearItemsView(self)
class LinearItemsView(abc.ItemsView):
"""Faster items iteration for LinearView."""
__slots__ = ()
def __iter__(self):
for v, neighbours in self._mapping._adj.items():
# see note in LinearView.__getitem__
yield v, neighbours.get(v, 0)
class QuadraticView(BQMView, abc.MutableMapping):
"""Acts as a dictionary `{(u, v): bias, ...}` for the quadratic biases.
The quadratic biases are stored in a dict-of-dicts format. So `{(u, v): bias}` is stored as
`._adj = {u: {v: Bias(bias)}, v: {u: Bias(bias)}}`.
"""
def __delitem__(self, interaction):
u, v = interaction
if u == v:
raise KeyError('{} is not an interaction'.format(interaction))
adj = self._adj
del adj[v][u]
del adj[u][v]
def __getitem__(self, interaction):
u, v = interaction
if u == v:
raise KeyError('{} cannot have an interaction with itself'.format(u))
return self._adj[u][v]
def __iter__(self):
seen = set()
adj = self._adj
for u, neigh in adj.items():
for v in neigh:
if u == v:
# not adjacent to itself
continue
if v not in seen:
yield (u, v)
seen.add(u)
def __len__(self):
# remove the self-loops
return sum(len(neighbours) - (v in neighbours)
for v, neighbours in self._adj.items()) // 2
def __setitem__(self, interaction, bias):
u, v = interaction
if u == v:
raise KeyError('{} cannot have an interaction with itself'.format(u))
adj = self._adj
# we don't know what type we want the biases, so we require that the variables already
# exist before we can add an interaction between them
if u not in adj:
raise KeyError('{} is not already a variable in the binary quadratic model'.format(u))
if v not in adj:
raise KeyError('{} is not already a variable in the binary quadratic model'.format(v))
adj[u][v] = adj[v][u] = bias
def __str__(self):
return str(dict(self))
def items(self):
return QuadraticItemsView(self)
class QuadraticItemsView(abc.ItemsView):
"""Faster items iteration"""
__slots__ = ()
def __iter__(self):
adj = self._mapping._adj
for u, v in self._mapping:
yield (u, v), adj[u][v]
class NeighbourView(abc.Mapping):
"""Acts as a dictionary `{u: bias, ...}` for the neighbours of a variable `v`.
See Also:
:class:`AdjacencyView`
"""
__slots__ = '_adj', '_var'
def __init__(self, adj, v):
self._adj = adj
self._var = v
def __getitem__(self, v):
u = self._var
if u == v:
raise KeyError('{} cannot have an interaction with itself'.format(u))
return self._adj[u][v]
def __setitem__(self, u, bias):
v = self._var
if u == v:
raise KeyError('{} cannot have an interaction with itself'.format(u))
adj = self._adj
if u not in adj:
raise KeyError('{} is not an interaction'.format((u, v)))
adj[v][u] = adj[u][v] = bias
def __iter__(self):
v = self._var
for u in self._adj[v]:
if u != v:
yield u
def __len__(self):
v = self._var
neighbours = self._adj[v]
return len(neighbours) - (v in neighbours) # ignore self
def __str__(self):
return str(dict(self))
class AdjacencyView(BQMView, abc.Mapping):
"""Acts as a dict-of-dicts `{u: {v: bias}, v: {u: bias}}` for the quadratic biases.
The quadratic biases are stored in a dict-of-dicts format. So `{u: {v: bias}, v: {u: bias}}`
is stored as `._adj = {u: {v: Bias(bias)}, v: {u: Bias(bias)}}`.
"""
def __getitem__(self, v):
if v not in self._adj:
raise KeyError('{} is not a variable'.format(v))
return NeighbourView(self._adj, v)
def __iter__(self):
return iter(self._adj)
def __len__(self):
return len(self._adj)
| StarcoderdataPython |
170804 | """
Test of the Turbidity meter using an ADC
# The Turbidity sensor mapped from 0 to 1023 (0 - 5 volts)
# ADC maps values -32768 to 32767, GND is 0 (-5 - 5 v)
# Voltage conversion is volts = (reading / 32767) * 5
# This may need some calibration adjustment
"""
# Import the ADS1x15 module.
from ADS1115 import ADS1115
class Turbidity(object):
def __init__(self):
self._adc = ADS1115()
self._id = 0
self._gain = 1
def getRaw(self):
return adc.read_adc(self._id, gain=GAIN)
def test():
tur = Turbidity()
value = tur.getRaw()
print("Turbidity", value)
if __name__ == "__main__":
test()
| StarcoderdataPython |
3516504 | <gh_stars>10-100
import abc
from typing import cast
from overhave import db
from overhave.entities import FeatureTypeModel
class BaseFeatureTypeStorageException(Exception):
""" Base exception for :class:`FeatureTypeStorage`. """
class FeatureTypeNotExistsError(BaseFeatureTypeStorageException):
""" Exception for situation without specified feature type. """
class IFeatureTypeStorage(abc.ABC):
""" Abstract class for feature type storage. """
@staticmethod
@abc.abstractmethod
def get_default_feature_type() -> FeatureTypeModel:
pass
@staticmethod
@abc.abstractmethod
def get_feature_type_by_name(name: str) -> FeatureTypeModel:
pass
class FeatureTypeStorage(IFeatureTypeStorage):
""" Class for feature type storage. """
@staticmethod
def get_default_feature_type() -> FeatureTypeModel:
with db.create_session() as session:
feature_type: db.FeatureType = session.query(db.FeatureType).order_by(db.FeatureType.id.asc()).first()
return cast(FeatureTypeModel, FeatureTypeModel.from_orm(feature_type))
@staticmethod
def get_feature_type_by_name(name: str) -> FeatureTypeModel:
with db.create_session() as session:
feature_type: db.FeatureType = session.query(db.FeatureType).filter(
db.FeatureType.name == name
).one_or_none()
if feature_type is None:
raise FeatureTypeNotExistsError(f"Could not find feature type with name='{name}'!")
return cast(FeatureTypeModel, FeatureTypeModel.from_orm(feature_type))
| StarcoderdataPython |
1973581 | from flaskapp.models.persist import Persistent
import os
if __name__ == "__main__":
print(os.getcwd())
p = Persistent(max_tries=15)
del p
| StarcoderdataPython |
1589 | def main(expr):
openingParams = '({['
closingParams = ')}]'
stack = []
for c in expr:
if c in openingParams:
stack.append(c)
elif c in closingParams:
topOfStack = stack.pop()
openingIndex = openingParams.find(topOfStack)
closingIndex = closingParams.find(c)
if openingIndex is not closingIndex:
return False
if len(stack) == 0:
return True
return False
if __name__ =='__main__':
print main('{(abc})')
| StarcoderdataPython |
1934555 | <gh_stars>1-10
# Non Parsed URL Model
from crawler.crawler_instance.constants.strings import STRINGS
class image_model:
m_url = STRINGS.S_EMPTY
m_type = STRINGS.S_EMPTY
def __init__(self, p_url, p_type):
self.m_url = p_url
self.m_type = p_type
| StarcoderdataPython |
5180271 | import requests
from bs4 import BeautifulSoup
def macs_search(search_string):
"""
Search https://www.macscomics.com.au/
Email Us
<EMAIL>
Location
Shop 2/34 Sydney Street Mackay QLD
"""
shop = "Mac's Comics"
# ROOT OF URL FOR SEARCH
base_search_url = "https://www.macscomics.com.au/search?q="
# TERM THAT SEPARATE WORDS FOR SEARCH
separator = "+"
search_list = search_string.split(" ")
#IN MACS COMICS THEY USE VOL INSTEAD OF VOLUME
search_list = [x.lower() for x in search_list]
if "volume" in search_list:
search_list = [ "vol" if x=="volume" else x for x in search_list]
full_search_url = base_search_url + separator.join(search_list)
#print(full_search_url)
#GET WEB PAGE
response = requests.get(full_search_url, headers={"User-Agent":"Defined"})
# RETURN OBJECT IS A LIST OF DICT
result_holder = []
if response: # RESPONSE OK
text_response = response.text
soup = BeautifulSoup(text_response, 'html.parser')
for article in soup.find_all("div", class_="row results"):
comic_title = article.h3.text
comic_url = "https://www.macscomics.com.au" + article.h3.a["href"]
# HAS TO FIND PRICE ON ANOTHER PAGE
price_text = requests.get(comic_url, headers={"User-Agent":"Defined"}).text
comic_price = BeautifulSoup(price_text, 'html.parser').find("h2", class_="price").text
comic_price = float(comic_price.replace("$",""))
comic = {"title": comic_title,
"url": comic_url,
"price": comic_price,
"shop":shop,
"availability": "In Stock (auto)"}
# DO SOME MATCHING
# CRUDE = TAKE ALL 3+ LETTER WORDS IN SEARCH AND SEE IF THEY ARE IN COMIC TITLE
list_word_search = [x.lower() for x in search_list if len(x)> 2]
list_word_title = comic_title.split(" ")
list_word_title = [x.lower() for x in list_word_title if len(x)> 2]
# REMOVE COLONS, coma
list_word_title = [x.replace(":","") for x in list_word_title]
list_word_title = [x.replace(";","") for x in list_word_title]
list_word_title = [x.replace(",","") for x in list_word_title]
list_word_title = [x.replace(".","") for x in list_word_title]
list_word_title = [x.replace("(","") for x in list_word_title]
list_word_title = [x.replace(")","") for x in list_word_title]
#print(list_word_search)
#print(list_word_title)
#print(all(item in list_word_title for item in list_word_search))
# IF ALL WORDS FROM SEARCH ARE IN TITLE: RETURN
if all(item in list_word_title for item in list_word_search):
result_holder.append(comic)
# ON FAILURE
else:
print('An error has occurred searching {}.'.format(shop))
return result_holder
| StarcoderdataPython |
62913 | <filename>mopidy_sangu/api/__init__.py
import logging
import string
import random
from mopidy_sangu.storage import VoteDatabaseProvider
from mopidy_sangu.api.admin import AdminRequestHandler
from mopidy_sangu.api.unvote import UnVoteRequestHandler
from mopidy_sangu.api.vote import VoteRequestHandler
from mopidy_sangu.api.config import ConfigRequestHandler
from mopidy_sangu.api.vote_data import VoteDataRequestHandler
logger = logging.getLogger(__name__)
def sangu_factory(config, core):
session_id = get_random_alpha_numeric_string()
votes = VoteDatabaseProvider(config)
votes.setup()
votes.clear()
core.tracklist.set_consume(value=True).get()
return [
(
"/api/track/(\\d+)?/vote",
VoteRequestHandler,
{"core": core, "votes": votes},
),
(
"/api/track/(\\d+)?/unvote",
UnVoteRequestHandler,
{"core": core, "votes": votes},
),
(
"/api/votes",
VoteDataRequestHandler,
{"core": core, "votes": votes},
),
(
"/api/config",
ConfigRequestHandler,
{"core": core, "config": config, "session_id": session_id},
),
(
"/api/admin/login",
AdminRequestHandler,
{"core": core, "config": config},
),
]
def get_random_alpha_numeric_string(length=8):
return "".join(
(
random.choice(string.ascii_letters + string.digits)
for i in range(length)
)
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.