id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
79372 | # -*- coding: utf-8 -*-
import os
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import unittest
import meterbus
from meterbus.exceptions import *
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.dib_empty = meterbus.DataInformationBlock()
self.dib0 = meterbus.DataInformationBlock([0x0C])
self.dib7 = meterbus.DataInformationBlock([0x2F])
self.dib8 = meterbus.DataInformationBlock([0x0F])
self.dib9 = meterbus.DataInformationBlock([0x1F])
def test_empty_dib_has_extension_bit(self):
self.assertEqual(self.dib_empty.has_extension_bit, False)
def test_empty_dib_has_lvar_bit(self):
self.assertEqual(self.dib_empty.has_lvar_bit, False)
def test_empty_dib_is_eoud(self):
self.assertEqual(self.dib_empty.is_eoud, False)
def test_empty_dib_more_records_follow(self):
self.assertEqual(self.dib_empty.more_records_follow, False)
def test_empty_dib_is_variable_length(self):
self.assertEqual(self.dib_empty.is_variable_length, False)
def test_dib0_has_extension_bit(self):
self.assertEqual(self.dib0.has_extension_bit, False)
def test_dib0_has_lvar_bit(self):
self.assertEqual(self.dib0.has_lvar_bit, False)
def test_dib0_is_eoud(self):
self.assertEqual(self.dib0.is_eoud, False)
def test_dib0_is_variable_length(self):
self.assertEqual(self.dib0.is_variable_length, False)
def test_dib0_function_type(self):
self.assertEqual(self.dib0.function_type,
meterbus.FunctionType.INSTANTANEOUS_VALUE)
def test_dib7_function_type(self):
self.assertEqual(self.dib7.function_type,
meterbus.FunctionType.SPECIAL_FUNCTION_FILL_BYTE)
def test_dib8_function_type(self):
self.assertEqual(self.dib8.function_type,
meterbus.FunctionType.SPECIAL_FUNCTION)
def test_dib9_more_records_follow(self):
self.assertEqual(self.dib9.more_records_follow, True)
def test_dib9_function_type(self):
self.assertEqual(self.dib9.function_type,
meterbus.FunctionType.MORE_RECORDS_FOLLOW)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
27654 | <filename>fauxfactory/__init__.py
# -*- coding: utf-8 -*-
"""Generate random data for your tests."""
__all__ = (
'gen_alpha',
'gen_alphanumeric',
'gen_boolean',
'gen_choice',
'gen_cjk',
'gen_cyrillic',
'gen_date',
'gen_datetime',
'gen_email',
'gen_html',
'gen_integer',
'gen_ipaddr',
'gen_iplum',
'gen_latin1',
'gen_mac',
'gen_netmask',
'gen_negative_integer',
'gen_numeric_string',
'gen_positive_integer',
'gen_string',
'gen_time',
'gen_url',
'gen_utf8',
'gen_uuid',
)
import datetime
import random
import re
import string
import sys
import unicodedata
import uuid
import warnings
from collections import Iterable
from fauxfactory.constants import (
HTML_TAGS, LOREM_IPSUM_TEXT,
MAX_YEARS, MIN_YEARS,
SCHEMES, SUBDOMAINS, TLDS, VALID_NETMASKS
)
from functools import wraps
# Private Functions -----------------------------------------------------------
def _make_unicode(data):
"""Convert ``data`` to a unicode string if running Python 2.
:param str data: A string to be type cast.
:return: ``data``, but as unicode. ``data`` is never modified: if a type
cast is necessary, a copy of ``data`` is returned.
"""
if sys.version_info[0] == 2:
return unicode(data) # flake8:noqa pylint:disable=undefined-variable
return data
def _is_positive_int(length):
"""Check that ``length`` argument is an integer greater than zero.
:param int length: The desired length of the string
:raises: ``ValueError`` if ``length`` is not an ``int`` or is less than 1.
:returns: Nothing.
:rtype: None
"""
if not isinstance(length, int) or length <= 0:
raise ValueError("{0} is an invalid 'length'.".format(length))
def _unicode_letters_generator():
"""Generates unicode characters in the letters category
:return: a generator which will generates all unicode letters available
"""
if sys.version_info[0] == 2:
chr_function = unichr # pylint:disable=undefined-variable
range_function = xrange # pylint:disable=undefined-variable
else:
chr_function = chr
range_function = range
# Use sys.maxunicode instead of 0x10FFFF to avoid the exception below, in a
# narrow Python build (before Python 3.3)
# ValueError: unichr() arg not in range(0x10000) (narrow Python build)
# For more information, read PEP 261.
for i in range_function(sys.maxunicode):
char = chr_function(i)
if unicodedata.category(char).startswith('L'):
yield char
UNICODE_LETTERS = [c for c in _unicode_letters_generator()]
# Public Functions ------------------------------------------------------------
def gen_string(str_type, length=None):
"""A simple wrapper that calls other string generation methods.
:param str str_type: The type of string which should be generated.
:param int length: The length of the generated string. Must be 1 or
greater.
:raises: ``ValueError`` if an invalid ``str_type`` is specified.
:returns: A string.
:rtype: str
Valid values for ``str_type`` are as follows:
* alpha
* alphanumeric
* cjk
* cyrillic
* html
* latin1
* numeric
* utf8
"""
str_types_functions = {
u'alpha': gen_alpha,
u'alphanumeric': gen_alphanumeric,
u'cjk': gen_cjk,
u'cyrillic': gen_cyrillic,
u'html': gen_html,
u'latin1': gen_latin1,
u'numeric': gen_numeric_string,
u'utf8': gen_utf8,
}
str_type_lower = str_type.lower() # do not modify user data
if str_type_lower not in str_types_functions.keys():
raise ValueError(
'{0} is not a supported string type. Valid string types are {1}.'
''.format(str_type_lower, u','.join(str_types_functions.keys()))
)
method = str_types_functions[str_type_lower]
if length is None:
return method()
return method(length)
def gen_alpha(length=10):
"""Returns a random string made up of alpha characters.
:param int length: Length for random data.
:returns: A random string made up of alpha characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(string.ascii_letters) for i in range(length)
)
return _make_unicode(output_string)
def gen_alphanumeric(length=10):
"""Returns a random string made up of alpha and numeric characters.
:param int length: Length for random data.
:returns: A random string made up of alpha and numeric characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(
string.ascii_letters + string.digits
) for i in range(length))
return _make_unicode(output_string)
def gen_boolean():
"""Returns a random Boolean value.
:returns: A random Boolean value.
:rtype: bool
"""
choices = (True, False)
return gen_choice(choices)
def gen_choice(choices):
"""Returns a random choice from the available choices.
:param list choices: List of choices from which select a random value.
:raises: ``ValueError`` if ``choices`` is ``None`` or not ``Iterable`` or
a ``dict``.
:returns: A random element from ``choices``.
"""
# Validation for 'choices'
if choices is None:
raise ValueError("Choices argument cannot be None.")
# We don't want a single dictionary value.
if not isinstance(choices, Iterable) or isinstance(choices, dict):
raise ValueError("Choices argument is not iterable.")
if len(choices) == 0:
raise ValueError("Choices argument cannot be empty.")
# If only 1 item is present, return it right away
if len(choices) == 1:
return choices[0]
return random.choice(choices)
def gen_cjk(length=10):
"""Returns a random string made up of CJK characters.
(Source: Wikipedia - CJK Unified Ideographs)
:param int length: Length for random data.
:returns: A random string made up of CJK characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
# Generate codepoints, then convert the codepoints to a string. The
# valid range of CJK codepoints is 0x4E00 - 0x9FCC, inclusive. Python 2
# and 3 support the `unichr` and `chr` functions, respectively.
codepoints = [random.randint(0x4E00, 0x9FCC) for _ in range(length)]
if sys.version_info[0] == 2:
# pylint:disable=undefined-variable
output = u''.join(unichr(codepoint) for codepoint in codepoints)
else:
output = u''.join(chr(codepoint) for codepoint in codepoints)
return _make_unicode(output)
def gen_cyrillic(length=10):
"""Returns a random string made up of Cyrillic characters.
:param int length: Length for random data.
:returns: A random string made up of Cyrillic characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
# Generate codepoints, then convert the codepoints to a string. The
# valid range of Cyrillic codepoints is 0x410 - 0x4ff, inclusive. Python 2
# and 3 support the `unichr` and `chr` functions, respectively.
codepoints = [random.randint(0x0400, 0x04FF) for _ in range(length)]
try:
# (undefined-variable) pylint:disable=E0602
output = u''.join(unichr(codepoint) for codepoint in codepoints)
except NameError:
output = u''.join(chr(codepoint) for codepoint in codepoints)
return _make_unicode(output)
def gen_date(min_date=None, max_date=None):
"""Returns a random date value
:param min_date: A valid ``datetime.date`` object.
:param max_date: A valid ``datetime.date`` object.
:raises: ``ValueError`` if arguments are not valid ``datetime.date``
objects.
:returns: Random ``datetime.date`` object.
"""
_min_value = (datetime.date.today() -
datetime.timedelta(365 * MIN_YEARS))
_max_value = (datetime.date.today() +
datetime.timedelta(365 * MAX_YEARS))
if min_date is None:
min_date = _min_value
if max_date is None:
max_date = _max_value
# Validation
if not isinstance(min_date, datetime.date):
raise ValueError("%s is not a valid datetime.date object")
if not isinstance(max_date, datetime.date):
raise ValueError("%s is not a valid datetime.date object")
# Check that max_date is not before min_date
assert min_date < max_date
# Pick a day between min and max dates
diff = max_date - min_date
days = random.randint(0, diff.days)
date = min_date + datetime.timedelta(days=days)
return date
def gen_datetime(min_date=None, max_date=None):
"""Returns a random datetime value
:param min_date: A valid ``datetime.datetime`` object.
:param max_date: A valid ``datetime.datetime`` object.
:raises: ``ValueError`` if arguments are not valid ``datetime.datetime``
objects.
:returns: Random ``datetime.datetime`` object.
"""
_min_value = (datetime.datetime.now() -
datetime.timedelta(365 * MIN_YEARS))
_max_value = (datetime.datetime.now() +
datetime.timedelta(365 * MAX_YEARS))
if min_date is None:
min_date = _min_value
if max_date is None:
max_date = _max_value
# Validation
if not isinstance(min_date, datetime.datetime):
raise ValueError("%s is not a valid datetime.datetime object")
if not isinstance(max_date, datetime.datetime):
raise ValueError("%s is not a valid datetime.datetime object")
# Check that max_date is not before min_date
assert min_date < max_date
# Pick a time between min and max dates
diff = max_date - min_date
seconds = random.randint(0, diff.days * 3600 * 24 + diff.seconds)
return min_date + datetime.timedelta(seconds=seconds)
def gen_email(name=None, domain=None, tlds=None):
"""Generates a random email address.
:param str name: Email name.
:param str domain: Domain name.
:param str tlds: Top Level Domain Server
:returns: An email address.
:rtype: str
"""
# Generate a new name if needed
if name is None:
name = gen_alpha(8)
# Obtain a random domain if needed
if domain is None:
domain = gen_choice(SUBDOMAINS)
# Obtain a random top level domain if needed
if tlds is None:
tlds = gen_choice(TLDS)
email = u"{0}@{1}.{2}".format(name, domain, tlds)
return _make_unicode(email)
def gen_integer(min_value=None, max_value=None):
"""Returns a random integer value based on the current platform.
:param int min_value: The minimum allowed value.
:param int max_value: The maximum allowed value.
:raises: ``ValueError`` if arguments are not integers or if they are
less or greater than the system's allowed range for integers.
:returns: Returns a random integer value.
:rtype: int
"""
# Platform-specific value range for integers
_min_value = - sys.maxsize - 1
_max_value = sys.maxsize
if min_value is None:
min_value = _min_value
if max_value is None:
max_value = _max_value
if sys.version_info[0] < 3:
integer_types = (int, long,) # pylint:disable=undefined-variable
else:
integer_types = (int,)
# Perform some validations
if not isinstance(min_value, integer_types) or min_value < _min_value:
raise ValueError("\'%s\' is not a valid minimum." % min_value)
if not isinstance(max_value, integer_types) or max_value > _max_value:
raise ValueError("\'%s\' is not a valid maximum." % max_value)
value = random.randint(min_value, max_value)
return value
def gen_iplum(words=None, paragraphs=None):
"""Returns a lorem ipsum string. If no arguments are passed, then
return the entire default lorem ipsum string.
:param int words: The number of words to return.
:param int paragraphs: The number of paragraphs to return.
:raises: ``ValueError`` if ``words`` is not a valid positive integer.
:returns: A ``lorem ipsum`` string containing either the number of ``words``
or ``paragraphs``, extending and wrapping around the text as needed to
make sure that it has the specified length.
:rtype: str
"""
# Check parameters
if words is None or words == 0:
words = len(LOREM_IPSUM_TEXT.split())
if paragraphs is None:
paragraphs = 1
if not isinstance(words, int) or words < 0:
raise ValueError(
"Cannot generate a string with negative number of words.")
_is_positive_int(paragraphs)
# Original Lorem Ipsum string
all_words = LOREM_IPSUM_TEXT.split()
# How many words do we need?
total_words_needed = words * paragraphs
quotient = int(total_words_needed / len(all_words))
modulus = total_words_needed % len(all_words)
# Pool of words to use
all_words = all_words * (quotient + modulus)
result = u""
start_pos = 0
for _ in range(0, paragraphs):
sentence = u" ".join(
all_words[start_pos:start_pos + words])
# Remove comma from the end, if it exists
if sentence.endswith(','):
sentence = sentence.rstrip(',')
# Remove period from the end, if it exists
if sentence.endswith('.'):
sentence = sentence.rstrip('.')
# Each sentence should be properly capitalized
cap_sentence = [
frag.capitalize() + u'.' for frag in sentence.split('. ')]
# Add newline at the end
result += " ".join(cap_sentence) + u"\n"
# Increment positional counter
start_pos += words
return _make_unicode(result.rstrip())
def gen_latin1(length=10):
"""Returns a random string made up of UTF-8 characters.
(Font: Wikipedia - Latin-1 Supplement Unicode Block)
:param int length: Length for random data.
:returns: A random string made up of ``Latin1`` characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
range0 = range1 = range2 = []
range0 = ['00C0', '00D6']
range1 = ['00D8', '00F6']
range2 = ['00F8', '00FF']
output_array = []
for i in range(int(range0[0], 16), int(range0[1], 16)):
output_array.append(i)
for i in range(int(range1[0], 16), int(range1[1], 16)):
output_array.append(i)
for i in range(int(range2[0], 16), int(range2[1], 16)):
output_array.append(i)
if sys.version_info[0] == 2:
output_string = u''.join(
# pylint:disable=E0602
unichr(random.choice(output_array)) for _ in range(length)
)
else:
output_string = u''.join(
chr(random.choice(output_array)) for _ in range(length)
)
return _make_unicode(output_string)
def gen_negative_integer():
"""Returns a random negative integer based on the current platform.
:returns: Returns a random negative integer value.
:rtype: int
"""
max_value = 0
return gen_integer(max_value=max_value)
def gen_ipaddr(ip3=False, ipv6=False, prefix=()):
"""Generates a random IP address.
You can also specify an IP address prefix if you are interested in
local network address generation, etc.
:param bool ip3: Whether to generate a 3 or 4 group IP.
:param bool ipv6: Whether to generate IPv6 or IPv4
:param list prefix: A prefix to be used for an IP (e.g. [10, 0, 1]). It
must be an iterable with strings or integers. Can be left unspecified or
empty.
:returns: An IP address.
:rtype: str
:raises: ``ValueError`` if ``prefix`` would lead to no random fields at all.
This means the length that triggers the ``ValueError`` is 4 for regular
IPv4, 3 for IPv4 with ip3 and 8 for IPv6. It will be raised in any case
the prefix length reaches or exceeds those values.
"""
# Set the lengths of the randomly generated sections
if ipv6:
rng = 8
elif ip3:
rng = 3
else:
rng = 4
prefix = [str(field) for field in prefix]
# Prefix reduces number of random fields generated, so subtract the length
# of it from the rng to keep the IP address have correct number of fields
rng -= len(prefix)
if rng == 0:
raise ValueError(
"Prefix {} would lead to no randomness at all".format(
repr(prefix)))
elif rng < 0:
raise ValueError(
"Prefix {} is too long for this configuration".format(
repr(prefix)))
if ipv6:
# StackOverflow.com questions: generate-random-ipv6-address
random_fields = [
'{0:x}'.format(random.randint(0, 2**16 - 1)) for _ in range(rng)]
ipaddr = u':'.join(prefix + random_fields)
else:
random_fields = [str(random.randrange(0, 255, 1)) for _ in range(rng)]
ipaddr = u".".join(prefix + random_fields)
if ip3:
ipaddr = ipaddr + u".0"
return _make_unicode(ipaddr)
def gen_mac(delimiter=':', multicast=None, locally=None):
"""Generates a random MAC address.
For more information about how unicast or multicast and globally unique and
locally administered MAC addresses are generated check this link
https://en.wikipedia.org/wiki/MAC_address.
:param str delimeter: Valid MAC delimeter (e.g ':', '-').
:param bool multicast: Indicates if the generated MAC address should be
unicast or multicast. If no value is provided a random one will be
chosen.
:param bool locally: Indicates if the generated MAC address should be
globally unique or locally administered. If no value is provided a
random one will be chosen.
:returns: A random MAC address.
:rtype: str
"""
if delimiter not in [':', '-']:
raise ValueError('Delimiter is not a valid option: %s' % delimiter)
if multicast is None:
multicast = bool(random.randint(0, 1))
if locally is None:
locally = bool(random.randint(0, 1))
first_octet = random.randint(0, 255)
if multicast:
# Ensure that the first least significant bit is 1
first_octet |= 0b00000001
else:
# Ensure that the first least significant bit is 0
first_octet &= 0b11111110
if locally:
# Ensure that the second least significant bit is 1
first_octet |= 0b00000010
else:
# Ensure that the second least significant bit is 0
first_octet &= 0b11111101
octets = [first_octet]
octets.extend([
random.randint(0, 255) for _ in range(5)
])
mac = delimiter.join(['{0:02x}'.format(octet) for octet in octets])
return _make_unicode(mac)
def gen_netmask(min_cidr=1, max_cidr=31):
"""Generates a random valid netmask.
For more info: http://www.iplocation.net/tools/netmask.php
:param int min_cidr: Inferior CIDR limit
:param int max_cidr: Superior CIDR limit
:returns: The netmask is chosen from
:data:`fauxfactory.constants.VALID_NETMASKS` respecting the CIDR range
:rtype: str
:raises: ``ValueError`` if ``min_cidr`` or ``max_cidr`` have an invalid
value. For example, ``max_cidr`` cannot be 33.
"""
if min_cidr < 0:
raise ValueError(
'min_cidr must be 0 or greater, but is {0}'.format(min_cidr)
)
if max_cidr >= len(VALID_NETMASKS):
raise ValueError(
'max_cidr must be less than {0}, but is {1}'
.format(len(VALID_NETMASKS), max_cidr)
)
return VALID_NETMASKS[random.randint(min_cidr, max_cidr)]
def gen_numeric_string(length=10):
"""Returns a random string made up of numbers.
:param int length: Length for random data.
:returns: A random string made up of numbers.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(string.digits) for i in range(length)
)
return _make_unicode(output_string)
def gen_positive_integer():
"""Returns a random positive integer based on the current platform.
:returns: A random positive integer value.
:rtype: int
"""
min_value = 0
return gen_integer(min_value=min_value)
def gen_time():
"""Generates a random time.
:returns: A random ``datetime.time`` object.
"""
return datetime.time(
random.randint(0, 23),
random.randint(0, 59),
random.randint(0, 59),
random.randint(0, 999999),
)
def gen_url(scheme=None, subdomain=None, tlds=None):
"""Generates a random URL address
:param str scheme: Either http, https or ftp.
:param str subdomain: A valid subdmain
:param str tlds: A qualified top level domain name (e.g. 'com', 'net')
:raises: ``ValueError`` if arguments are not valid.
:returns: A random URL address.
:rtype: str
"""
# Regex for subdomain names
subdomainator = re.compile(r"^[a-zA-Z0-9][-\w.~]*$")
# Regex for URL scheme
schemenator = re.compile(r"^(https?|ftp)$")
# Regex for TLDS
tldsnator = re.compile(r"^[a-zA-Z]{1,3}$")
if scheme:
if schemenator.match(scheme) is None:
raise ValueError("Protocol {0} is not valid.".format(scheme))
else:
scheme = gen_choice(SCHEMES)
if subdomain:
if subdomainator.match(subdomain) is None:
raise ValueError("Subdomain {0} is invalid.".format(subdomain))
else:
subdomain = gen_choice(SUBDOMAINS)
if tlds:
if tldsnator.match(tlds) is None:
raise ValueError("TLDS name {0} is invalid.".format(tlds))
else:
tlds = gen_choice(TLDS)
url = u"{0}://{1}.{2}".format(scheme, subdomain, tlds)
return _make_unicode(url)
def gen_utf8(length=10):
"""Returns a random string made up of UTF-8 letters characters, as per
`RFC 3629`_.
:param int length: Length for random data.
:returns: A random string made up of ``UTF-8`` letters characters.
:rtype: str
.. _`RFC 3629`: http://www.rfc-editor.org/rfc/rfc3629.txt
"""
# Validate length argument
_is_positive_int(length)
return u''.join([random.choice(UNICODE_LETTERS) for _ in range(length)])
def gen_uuid():
"""Generates a UUID string (universally unique identifiers).
:returns: Returns a string representation for a UUID.
:rtype: str
"""
output_uuid = _make_unicode(str(uuid.uuid4()))
return output_uuid
def gen_html(length=10):
"""Returns a random string made up of html characters.
:param int length: Length for random data.
:returns: A random string made up of html characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
html_tag = random.choice(HTML_TAGS)
output_string = u'<{0}>{1}</{2}>'.format(
html_tag, gen_string("alpha", length), html_tag)
return _make_unicode(output_string)
# Backward Compatibility ------------------------------------------------------
# Code borrowed from http://code.activestate.com/recipes/391367-deprecated/
def deprecated(func):
"""A decorator used to mark functions as deprecated.
Emit a warning when the decorated function is called.
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""Emit a warning, then call ``func``."""
old_name = func.__name__
if old_name == 'codify':
new_name = '_make_unicode'
else:
new_name = old_name.replace('generate', 'gen')
warnings.warn(
'{0} is deprecated! Please use {1} instead.'
.format(old_name, new_name),
category=Warning
)
return func(*args, **kwargs)
return wrapper
@deprecated
def codify(data):
# pylint:disable=missing-docstring
return _make_unicode(data)
class FauxFactory(object):
# This issue is no longer relevant, as the class has been turned into a set
# of functions.
# pylint:disable=too-many-public-methods
#
# This code is not imported when `from fauxfactory import *` is called, nor
# does this code show up in Sphinx's output. See `__all__`.
# pylint:disable=missing-docstring
@classmethod
@deprecated
def generate_string(cls, str_type, length):
return gen_string(str_type, length)
@classmethod
@deprecated
def generate_alpha(cls, length=10):
return gen_alpha(length)
@classmethod
@deprecated
def generate_alphanumeric(cls, length=10):
return gen_alphanumeric(length)
@classmethod
@deprecated
def generate_boolean(cls):
return gen_boolean()
@classmethod
@deprecated
def generate_choice(cls, choices):
return gen_choice(choices)
@classmethod
@deprecated
def generate_cjk(cls, length=10):
return gen_cjk(length)
@classmethod
@deprecated
def generate_date(cls, min_date=None, max_date=None):
return gen_date(min_date, max_date)
@classmethod
@deprecated
def generate_datetime(cls, min_date=None, max_date=None):
return gen_datetime(min_date, max_date)
@classmethod
@deprecated
def generate_email(cls, name=None, domain=None, tlds=None):
return gen_email(name, domain, tlds)
@classmethod
@deprecated
def generate_integer(cls, min_value=None, max_value=None):
return gen_integer(min_value, max_value)
@classmethod
@deprecated
def generate_iplum(cls, words=None, paragraphs=None):
return gen_iplum(words, paragraphs)
@classmethod
@deprecated
def generate_latin1(cls, length=10):
return gen_latin1(length)
@classmethod
@deprecated
def generate_negative_integer(cls):
return gen_negative_integer()
@classmethod
@deprecated
def generate_ipaddr(cls, ip3=False, ipv6=False):
return gen_ipaddr(ip3, ipv6)
@classmethod
@deprecated
def generate_mac(cls, delimiter=":"):
return gen_mac(delimiter)
@classmethod
@deprecated
def generate_numeric_string(cls, length=10):
return gen_numeric_string(length)
@classmethod
@deprecated
def generate_positive_integer(cls):
return gen_integer()
@classmethod
@deprecated
def generate_time(cls):
return gen_time()
@classmethod
@deprecated
def generate_url(cls, scheme=None, subdomain=None, tlds=None):
return gen_url(scheme, subdomain, tlds)
@classmethod
@deprecated
def generate_utf8(cls, length=10):
return gen_utf8(length)
@classmethod
@deprecated
def generate_uuid(cls):
return gen_uuid()
@classmethod
@deprecated
def generate_html(cls, length=10):
return gen_html(length)
| StarcoderdataPython |
141910 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-20 06:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(default='buy', max_length=10)),
('quantity', models.IntegerField(default=1)),
('createdte', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| StarcoderdataPython |
119228 | <reponame>cazBlue/test<filename>python/placeholder/demo.py
import os
import sys
import nuclai.bootstrap # Demonstration specific setup.
import scipy.misc # Image loading and manipulation.
import vispy.scene # Canvas & visuals for rendering.
class Application(object):
def __init__(self):
self.canvas = vispy.scene.SceneCanvas(
title='nucl.ai Placeholder',
size=(1280, 720),
bgcolor='#F0F0F0',
show=False,
keys='interactive')
self.widget = self.canvas.central_widget
# Image CC-SA-NC by alexjc and iquilezles.
data = scipy.misc.imread('background.jpg')
vispy.scene.visuals.Image(data, parent=self.widget)
vispy.scene.visuals.Text(parent=self.widget,
text='nucl.ai Courses',
face='Questrial', color='#000000', font_size=20 * self.canvas.pixel_scale,
anchor_x='right', anchor_y='top',
pos=[1268.0, 12.0, 0.0])
vispy.scene.visuals.Text(parent=self.widget,
text='The Principles of Modern Game AI',
face='Questrial', color='#f0f0f0', font_size=12 * self.canvas.pixel_scale,
anchor_x='left', anchor_y='bottom',
pos=[16.0, 712.0, 0.0])
self.canvas.show(visible=True)
# HACK: Bug in VisPy 0.5.0-dev requires a click for layout to occur.
self.canvas.events.mouse_press()
def process(self, _):
return
def run(self):
timer = vispy.app.Timer(interval=1.0 / 30.0)
timer.connect(self.process)
timer.start()
vispy.app.run()
if __name__ == "__main__":
vispy.set_log_level('WARNING')
vispy.use(app='glfw')
app = Application()
app.run()
| StarcoderdataPython |
75572 | <gh_stars>0
from rest_framework import status
from django.contrib.auth import get_user_model
from rest_framework.response import Response
from rest_framework.generics import RetrieveUpdateDestroyAPIView, ListCreateAPIView
from .models import Saledata
from .permissions import IsOwnerOrReadOnly, IsAuthenticated
from .serializers import SaledataSerializer
from .pagination import CustomPagination
class get_delete_update_sale(RetrieveUpdateDestroyAPIView):
serializer_class = SaledataSerializer
permission_classes = (IsAuthenticated,IsOwnerOrReadOnly)
def get_queryset(self,pk):
try:
saledata = Saledata.objects.get(pk=pk)
except Saledata.DoesNotExit:
content = {
'status':'Not Found'
}
return Response(content,status=status.HTTP_404_NOT_FOUND)
return saledata
def get(self,request,pk):
saledata = self.get_queryset(pk)
serializer = SaledataSerializer(saledata)
return Response(serializer.data,status=status.HTTP_200_OK)
def put(self,request,pk):
saledata = self.get_queryset(pk)
if(request.user==saledata.reseller):
serializer = SaledataSerializer(saledata,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)
else:
content={
'status':'UNAUTHORIZED'
}
return Response(content,status=status.HTTP_401_UNAUTHORIZED)
def delete(self,request,pk):
saledata = self.get_queryset(pk)
if(request.user == saledata.reseller):
saledata.delete()
content={
'status':'NO CONTENT'
}
return Response(content,status=status.HTTP_204_NO_CONTENT)
else:
content ={
'status':'UNAUTHORIZED'
}
return Response(content,status=status.HTTP_401_UNAUTHORIZED)
class get_post_sale(ListCreateAPIView):
serializer_class = SaledataSerializer
permission_classes = (IsAuthenticated,)
pagination_class = CustomPagination
def get_queryset(self):
saledatas = Saledata.objects.filter(reseller=self.request.user)
return saledatas
def get(self,request):
saledatas = self.get_queryset()
paginate_queryset = self.paginate_queryset(saledatas)
serializer = self.serializer_class(paginate_queryset,many=True)
return self.get_paginated_response(serializer.data)
def post(self,request):
serializer = SaledataSerializer(data=request.data)
if serializer.is_valid():
serializer.save(reseller=request.user)
return Response(serializer.data,status=status.HTTP_201_CREATED)
Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)
| StarcoderdataPython |
1747979 | '''
first positions both controllers to ROOM_CENTER then waits for resume
then draws a box starting at START_POS with STEP*STEP_MUL for NUM_ITER times for each element of APPLY_DIM
needs aioconsole module installed
'''
import asyncio
import time
import numpy as np
import pyrr
from virtualreality import templates
from virtualreality.server import server
from aioconsole import ainput
poser = templates.PoserClient()
START_POS = np.array([-0.124638+(3.3/2), -1.439744, 0.623368+1], dtype=np.float64) # [x, y, z]
ROOM_CENTER = np.array([-0.124638, -3.439744, 0.623368], dtype=np.float64) # [x, y, z]
STEP = 0.1
STEP_MUL = -1
NUM_ITER = 33
APPLY_DIM = np.array([
[1, 0, 0],
[0, 0, 1],
[-1, 0, 0],
[0, 0, -1],
], dtype=np.float64) # this dictates the shape of the boundary
np.set_printoptions(formatter={'all':lambda x:'{:+.4f}, '.format(x)})
@poser.thread_register(1/30)
async def example_thread():
global START_POS
poser.pose_controller_r.x = ROOM_CENTER[0]
poser.pose_controller_r.y = ROOM_CENTER[1]
poser.pose_controller_r.z = ROOM_CENTER[2]
poser.pose_controller_l.x = ROOM_CENTER[0]
poser.pose_controller_l.y = ROOM_CENTER[1]
poser.pose_controller_l.z = ROOM_CENTER[2]
await ainput('controllers at ROOM_CENTER, press enter to continue...')
await asyncio.sleep(1)
print ('starting boundary draw...')
poser.pose_controller_r.trigger_value = 1
poser.pose_controller_r.trigger_click = 1
poser.pose_controller_l.trigger_value = 1
poser.pose_controller_l.trigger_click = 1
for i in APPLY_DIM:
for _ in range(NUM_ITER):
if not poser.coro_keep_alive["example_thread"][0]:
break
poser.pose.x = START_POS[0]
poser.pose.y = START_POS[1]
poser.pose.z = START_POS[2]
poser.pose_controller_r.x = START_POS[0]
poser.pose_controller_r.y = START_POS[1]
poser.pose_controller_r.z = START_POS[2]
poser.pose_controller_l.x = START_POS[0]
poser.pose_controller_l.y = START_POS[1]
poser.pose_controller_l.z = START_POS[2]
START_POS += i * (STEP * STEP_MUL)
print (START_POS)
await asyncio.sleep(poser.coro_keep_alive["example_thread"][1])
poser.pose_controller_r.trigger_value = 0
poser.pose_controller_r.trigger_click = 0
poser.pose_controller_l.trigger_value = 0
poser.pose_controller_l.trigger_click = 0
poser.coro_keep_alive["example_thread"][0] = False
await asyncio.sleep(1/10)
print ('drawing boundaries done')
poser.coro_keep_alive["close"][0] = False # close poser
asyncio.run(poser.main()) | StarcoderdataPython |
3248383 | <reponame>nforesperance/Tensorflow-Keras<filename>cnn/conv_1d.py
# example of calculation 1d convolutions
from numpy import asarray
from keras.models import Sequential
from keras.layers import Conv1D
# define input data
data = asarray([0, 0, 0, 1, 1, 0, 0, 0])
data = data.reshape(1, 8, 1) # (samples,lenght,channels)
# create model
model = Sequential()
#We will define a model that expects input samples to have the shape [lenght, chanels].
# Conv1D(number_samples, lenth(weight), input_shape=(lenght(input), channels)
model.add(Conv1D(1, 3, input_shape=(8, 1)))
# define a vertical line detector
weights = [asarray([[[0]],[[1]],[[0]]]), asarray([0.0])] #[0,1,0]
# store the weights in the model
model.set_weights(weights)
# confirm they were stored
print(model.get_weights())
# apply filter to input data
yhat = model.predict(data)
print(yhat) | StarcoderdataPython |
1769586 | from empstore import teams
import employee as emp
from empstore import employees
def manage_all_team_menu():
print("\t1.Create team")
print("\t2.Display team")
print("\t3.Manage team(Particular)")
print("\t4.Delete team")
print("\t5.Exit")
def manage_all_teams():
while True:
manage_all_team_menu()
ch = int(input("\tEnter your choice "))
if ch == 1:
#Create team
create_team()
elif ch == 2:
#display teams
display_teams()
elif ch == 3:
#Manage group(Particular)
manage_team()
elif ch == 4:
#Delete Group
delete_team()
elif ch == 5:
#exit
break
else:
print("\tInvalid choice")
def create_team():
team_name = input("\tEnter team name ")
teams[team_name] = []
def delete_team():
team_name = input("\tEnter team name ")
if team_name in teams.keys():
del teams[team_name]
print("\tDeleted the team")
else:
print("\tWrong team name")
def display_teams():
for key,value in teams.items(): #key is group_name,value is list of student serial no
name_string = ""
for i in value:
name_string = name_string +"|"+emp.employees[i]["name"]
print(f"{key} => {name_string}")
def manage_team_menu():
print("\t\t1.Add Member")
print("\t\t2.Delete Member")
print("\t\t3.List Members")
# print("\t\t4.Exit")
def manage_team():
team_name = input("\t\tEnter team name ")
manage_team_menu()
ch = int(input("\t\t Enter your Choice "))
if ch == 1:
#Add member
add_member(team_name)
elif ch == 2:
#Delete member
delete_member(team_name)
elif ch == 3:
#List member
list_member(team_name)
else:
print("\tInvalid choice")
def add_member(team_name):
display_employee()
serial_no = input("\t\tEnter the serial no of employee ")
if serial_no in emp.employees.keys():
teams[team_name].append(serial_no)
else:
print("\t\tWrong serial No.")
def list_member(team_name):
name_string=""
for i in teams[team_name]:
name_string = name_string +"|"+i+"."+emp.employees[i]["name"]
print(f"{name_string}")
def delete_member(team_name):
list_member(team_name)
serial_no = input("\t\tEnter serial no from list")
if serial_no in teams[team_name]:
groups[team_name].remove(serial_no)
else:
print("\t\tWrong serial No.")
| StarcoderdataPython |
3204797 | <gh_stars>0
from typing import TYPE_CHECKING, Dict, Tuple
if TYPE_CHECKING:
from core.cell import Cell
class Distances:
"""Gives distances for all cells linked to a starting cell, called root.
This datastructure starts at a `root` cell and gives the distance
from all cells linked to the root to the root. So, root -> A -> B
results in:
cells[root] = 0
cells[A] = 1
cells[B] = 2
TODO: Bulding the distances structure should probably happen here, and not in cell.
"""
def __init__(self, root: "Cell") -> None:
self.root: "Cell" = root
self.cells: Dict["Cell", int] = {}
self.cells[root] = 0
def __getitem__(self, key: "Cell") -> int:
return self.cells[key]
def __setitem__(self, key: "Cell", val: int) -> None:
self.cells[key] = val
def __contains__(self, key: "Cell") -> bool:
return key in self.cells
def get_path_to(self, goal: "Cell") -> "Distances":
"""Finds the shortest path from root to goal
Uses simplified dijkstra to find the shortest path from root to goal, and returns
this as a distance map, that can be handed of to a grid. Described on page 42.
"""
current = goal
breadcrumbs = Distances(self.root)
breadcrumbs[current] = self.cells[current]
while current is not self.root:
for neighbor in current.links:
if self.cells[neighbor] < self.cells[current]:
breadcrumbs[neighbor] = self.cells[neighbor]
current = neighbor
break
return breadcrumbs
@property
def max(self) -> Tuple["Cell", int]:
"""Returns the cell, and how far away it is, furthest away from the root."""
max_distance = 0
max_cell = self.root
for cell, distance in self.cells.items():
if distance > max_distance:
max_cell = cell
max_distance = distance
return (max_cell, max_distance)
def get_cells(self):
return self.cells.keys()
| StarcoderdataPython |
1743231 | import json
import uuid
from typing import Dict, List, Optional, Tuple, Union
import redis
from .com import Message, decrement_msg_id
class admin:
def __init__(self, link: redis.Redis) -> None:
self.link: redis.Redis = link
def get_streams(self, match: None = None) -> List["Stream"]:
match = match or ""
# We wanted to use `scan_iter` but for an yet unknown reason
# `SCAN` take hundreds of iterations to find all streams.
streams = self.link.keys(f"telstar:stream:{match}*")
return [Stream(self, s) for s in streams]
def get_consumers(self) -> List["Consumer"]:
return sum([g.get_consumers() for s in self.get_streams() for g in s.get_groups()], [])
class Stream:
def __init__(self, admin: admin, stream_name: str) -> None:
self.name = stream_name
self.admin = admin
self.link = admin.link
@property
def display_name(self) -> bytes:
return self.name.replace(b"telstar:stream:", b"")
def get_groups(self) -> List["Group"]:
return [Group(self, name=info["name"], **self.link.xpending(self.name, info["name"]))
for info in self.link.xinfo_groups(self.name)]
def get_pending_messages(self) -> List["AdminMessage"]:
return sum([g.get_pending_messages() for g in self.get_groups()], [])
def get_length(self) -> int:
return self.link.xlen(self.name)
class Group:
def __init__(self, stream: Stream, name: str, pending: int, min: Optional[bytes], max: Optional[bytes], consumers: List[Dict[str, Union[bytes, int]]]) -> None:
self.stream = stream
self.link = stream.link
self.name = name
self.pending, self.min, self.max, self.consumers = pending, min, max, consumers
def get_pending_messages(self) -> List["AdminMessage"]:
if self.pending == 0:
return []
return [AdminMessage(self, **info)
for info in self.link.xpending_range(self.stream.name, self.name, self.min, self.max, self.pending)]
def get_consumers(self) -> List["Consumer"]:
return [Consumer(self, **info) for info in self.link.xinfo_consumers(self.stream.name, self.name)]
def get_seen_messages(self) -> int:
stream_name = self.stream.name.replace(b"telstar:stream:", b"").decode("ascii")
name = self.name.decode("ascii")
return len(self.link.keys(f"telstar:seen:{stream_name}:{name}*"))
def delete(self) -> bool:
return self.link.xgroup_destroy(self.stream.name, self.name)
class Consumer:
def __init__(self, group: Group, name: bytes, pending: int, idle: int) -> None:
self.group = group
self.name = name
self.pending_messages = pending
self.idle_time = idle
def delete(self) -> int:
return self.group.stream.admin.link.xgroup_delconsumer(self.group.stream.name, self.group.name, self.name)
class AdminMessage:
def __init__(self, group: Group, message_id: bytes, consumer: str, time_since_delivered: int, times_delivered: int) -> None:
self.group = group
self.message_id = message_id
self.consumer = consumer
self.time_since_delivered = time_since_delivered
self.times_delivered = times_delivered
def remove(self):
pipe = self.group.stream.admin.link.pipeline()
pipe.xack(self.group.stream.name, self.group.name, self.message_id)
pipe.xdel(self.group.stream.name, self.message_id)
pipe.execute()
def read_raw(self) -> List[List[Union[bytes, List[Tuple[bytes, Dict[bytes, bytes]]]]]]:
return self.group.stream.admin.link.xread({
self.group.stream.name: decrement_msg_id(self.message_id)
}, count=1)
def read(self) -> Message:
for stream_name, records in self.read_raw():
for record in records:
stream_msg_id, record = record
return Message(stream_name,
uuid.UUID(record[Message.IDFieldName].decode("ascii")),
json.loads(record[Message.DataFieldName]))
| StarcoderdataPython |
3279031 | __version__ = "0.910"
| StarcoderdataPython |
1736656 | <gh_stars>0
# -*- coding: utf-8 -*-
import datetime as dt
from flask.ext.login import UserMixin
from metapp2.extensions import bcrypt
from metapp2.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK
)
class Meeting_Note(SurrogatePK, Model):
__tablename__ = 'meeting_notes'
content = Column(db.Integer, nullable=False)
is_important = Column(db.Integer, nullable=False)
is_question = Column(db.Integer, nullable=False)
date_created = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
meeting_id = ReferenceCol('meetings')
user_id = ReferenceCol('users')
def __init__(self, content, is_important, is_question, date_created, meeting, user):
db.Model.__init__(self, content=content, is_important=is_important, is_question=is_question, date_created=date_created, user=user, meeting=meeting)
def __repr__(self):
return 'Meeting Note' | StarcoderdataPython |
3266564 | <reponame>vencax/django-sql-nss-admin
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'README')
description = 'DJango module for SQL based Linux NSS authentication system'
if os.path.exists(README_PATH):
long_description = open(README_PATH).read()
else:
long_description = description
setup(name='django-sql-nss-admin',
version='0.3',
description=description,
license='BSD',
url='https://github.com/vencax/django-sql-nss-admin',
author='vencax',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=[
'django>=1.3',
],
keywords="django linux libnss-mysql nss admin",
include_package_data=True,
)
| StarcoderdataPython |
3277846 | import csv
xx = open("months.txt", "w" )
xx.truncate();
with open('/Users/munish/Desktop/aaa.csv', 'rb') as csvfile:
reader = csv.reader(csvfile,delimiter=' ', quotechar='|')
i=0
y=[]
for row in reader:
'''if (row[2] == 2) or (row[3] == 2'''
i=i+1
if(i>12):
x=row[0][:].split(';')
#x -- > variable with all info and all parameters in an array'''
if(x[2]=='13'):
print x[5]
xx.write(x[5])
xx.write(';')
y.append(x)
xx.close()
yy=open("months.txt","r")
print yy.readlines();
yy.close();
| StarcoderdataPython |
112708 | import numpy as np
import pandas as pd
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense, Dropout, Conv2D, MaxPool2D, Flatten, Reshape, BatchNormalization
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from tensorflow.keras.optimizers import SGD, Adam
from functools import partial
import time
class ann:
def __init__(self, nb_of_outputs, weights, X_train, X_test, y_train, y_test, entropy, nb_of_epochs):
self.nb_of_outputs = nb_of_outputs
self.weights = weights
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
self.entropy = entropy
self.nb_of_epochs = nb_of_epochs
def modelTrain(self, nb_of_outputs, weights, X_train, X_test, y_train, y_test, entropy, nb_of_epochs, save_name):
RegularizedDense = partial(keras.layers.Dense, activation="elu", kernel_initializer="he_normal", kernel_regularizer=keras.regularizers.l2(0.01))
model = Sequential()
model.add(Reshape(input_shape=(1,200,200),target_shape=(200,200,1)))
model.add(Conv2D(128, 1, activation='sigmoid'))
model.add(MaxPool2D(pool_size=(1,1)))
model.add(BatchNormalization())
model.add(Conv2D(64, 1, strides=(2,2), activation='sigmoid'))
model.add(MaxPool2D(pool_size=(1,1)))
model.add(BatchNormalization())
model.add(Conv2D(32, 1, strides=(2,2), activation='sigmoid'))
model.add(MaxPool2D(pool_size=(1,1)))
model.add(BatchNormalization())
model.add(Conv2D(16, 1, strides=(2,2), activation='sigmoid'))
model.add(MaxPool2D(pool_size=(1,1)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(RegularizedDense(48, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(RegularizedDense(64, activation='sigmoid'))
model.add(Dropout(0.4))
model.add(BatchNormalization())
model.add(RegularizedDense(24, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(Dense(nb_of_outputs, activation='softmax'))
lr_schedule = keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=1e-2,decay_steps=10000,decay_rate=0.9)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)
model.compile(optimizer=optimizer, loss=entropy, metrics=['accuracy'])
print(model.summary())
tb = TensorBoard('./logs')
checkpt = ModelCheckpoint(save_name,'val_accuracy',1,True)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3, )
time_to_train = time.time()
hist = model.fit(X_train,y_train,epochs=nb_of_epochs,validation_data=(X_test, y_test), callbacks=[tb, checkpt, es], class_weight=weights)
model = load_model(save_name)
final_predicts = model.predict(X_test)
time_diff = time.time()
time_train = -time_to_train + time_diff
exact_acc = np.count_nonzero(np.absolute((y_test.argmax(axis=-1)-final_predicts.argmax(axis=-1))) <= 0) / len(y_test)
one_off = np.count_nonzero(np.absolute((y_test.argmax(axis=-1)-final_predicts.argmax(axis=-1))) <= 1) / len(y_test)
two_off = np.count_nonzero(np.absolute((y_test.argmax(axis=-1)-final_predicts.argmax(axis=-1))) <= 2) / len(y_test)
print("Training time {}".format(time_train))
print("Exact_acc = {}".format(exact_acc))
print("One off acc = {}".format(one_off))
print("Two off acc = {}".format(two_off))
return exact_acc, one_off, two_off, time_train, hist
| StarcoderdataPython |
3346196 | #!/usr/bin/python
#-*- coding: utf-8 -*-
import cv2
from ler_imagem import ler_imagem
from escreve_imagem import escreve
#from configuracao import configuracao
from boi.b_pre_processamento import b_pre_processamento
from boi.b_regiao_interesse import regiao_interesse
from boi.b_watershed import watershed
from boi.b_mascara_bovino import mascara_bovino
from mosca.m_identifica_bordas import identifica_bordas
from mosca.m_melhora_imagem import melhora_imagem
from mosca.m_contornos import regiao_int
from mosca.m_identifica_mosca import identifica_mosca
#from parse_labelme_xml import valida
#from matplotlib import pyplot as plt
#from datetime import datetime
#from os import listdir
#from os.path import isfile, join
import time
from pathlib import Path
from com.facom.rvns.moscadochifreapp.interfaces import OutputWritable as PythonClass
from java import jclass
from java import cast
#img = 'image/img8.jpg'
def realiza_contagem(img, targetDir, activity_ref, suav_bov1, suav_bov2, suav_bov3, w_erode1, w_erode2, w_erode3, w_dilate1, w_dilate2, w_dilate3, pix_cont1, pix_cont2, perim1, perim2):
suav_bov = [suav_bov1, suav_bov2, suav_bov3]
w_erode = [w_erode1, w_erode2, w_erode3]
w_dilate = [w_dilate1, w_dilate2, w_dilate3]
pix_cont = [pix_cont1, pix_cont2]
perim = [perim1, perim2]
OutputWritable = jclass("com.facom.rvns.moscadochifreapp.interfaces.OutputWritable")
outputWritable = cast(OutputWritable, activity_ref)
inicio = time.time()
# Atributos
#data = datetime.now()
#fname = data.strftime('%Y-%m-%d-%H-%M-%S')
#inputPath = '../image/'
#outputPath = '../resultados/'
# Constantes
# suav_bov = [5, 25, 25] # Kernel para suavização da imagem do bovino
# w_erode = [3,3,1] # Kernel e repetição para aplicação de erosão do contorno do bovino
# w_dilate = [21,21,5] # Kernel e repetição para aplicação de dilatação do contorno do bovino
# pix_cont = [15,70] # Limiares de pixels de cada borda
# perim = [15,45] # Limiares do perimetro da borda identificada como mosca do chifre
# onlyfiles = [f for f in listdir(inputPath) if isfile(join(inputPath, f))]
#for n in range(0, len(onlyfiles)):
#img = inputPath + onlyfiles[n]
# Leitura da Imagem
original = ler_imagem(img)
imagem = ler_imagem(img)
msg = "Pre-processamento..."
# Pre processamento
print(msg)
outputWritable.writeOutput(msg)
pre = b_pre_processamento(imagem, suav_bov)
msg = "Definindo da região de interesse..."
print(msg)
outputWritable.writeOutput(msg)
# Definição da região de interesse
regiao = regiao_interesse(pre)
msg = "Aplicando Watershed para segmentar o bovino..."
print(msg)
outputWritable.writeOutput(msg)
# Algoritmo Watershed para segmentar o bovino
water = watershed(regiao, imagem, w_erode, w_dilate)
msg = "Extraindo a mascara do bovino da imagem original..."
print(msg)
outputWritable.writeOutput(msg)
# Extrai a mascara do bovino da imagem original
mascara = mascara_bovino(water[0], water[1], imagem)
msg = "Detectando as bordas da imagem..."
print(msg)
outputWritable.writeOutput(msg)
# Detecta as bordas da imagem
bordas = identifica_bordas(mascara)
msg = "Aplicando filtros de melhoramento na imagem..."
print(msg)
outputWritable.writeOutput(msg)
# Filtros de melhoramento na imagem
melhora = melhora_imagem(bordas[1])
msg = "Limpando os contornos do bovino.."
print(msg)
outputWritable.writeOutput(msg)
# Limpa contornos do bovino
contornos = regiao_int(melhora[0], original, pix_cont)
msg = "Identificando as moscas-do-chifre e realizando a contagem...\n\n.."
print(msg)
outputWritable.writeOutput(msg)
# Identifica as moscas-do-chifre e realiza a contagem
ident = identifica_mosca(contornos, imagem, perim)
total = ident[1]
resultad = escreve(ident[0], total)
outputWritable.writeResult(total)
# valida(ident[3], img) #comentado para a aplicacao mobile (nao havera validacao no aplicativo)
# Imprime as configurações utilizadas na imagem
#config = ['Imagem: '+str(img), 'Resolucao: '+str(original.shape), 'Suavizacao Bov: GaussianBlur '+str(suav_bov),
# 'Dilatacao: '+str(w_dilate),'Erosao: '+str(w_erode),
# 'Pixels do contorno: '+str(pix_cont),'Perimetro das bordas: '+str(perim)]
#config = configuracao(config)
# Converte imagens para RGB
#original = cv2.cvtColor(original,cv2.COLOR_BGR2RGB)
#mascara = cv2.cvtColor(mascara,cv2.COLOR_BGR2RGB)
#resultado = cv2.cvtColor(resultad,cv2.COLOR_BGR2RGB)
#teste1 = cv2.cvtColor(ident[2],cv2.COLOR_BGR2RGB)
#titles = ['(1)','(2)','(3)','(4)','(5)','(6)','(7)','(8)','(9)','(10)']
#images = [original, pre, regiao, water[0], mascara, bordas[0], bordas[1], melhora[0], teste1, resultado]
#for i in range(10):
# plt.subplot(3, 4, i+1), plt.imshow(images[i], 'gray')
# plt.title(titles[i], fontsize=8)
# plt.xticks([]), plt.yticks([])
# Path(outputPath).mkdir(parents=True, exist_ok=True)
#plt.savefig(img+'_etapas.jpg', dpi=1200)
output_filename = targetDir + '/' + Path(img).stem +'.jpg'
cv2.imwrite(output_filename, resultad)
print('Salvo ', output_filename+' total: ', total)
# plt.show()
fim = time.time()
print('duracao: %f'%(fim-inicio))
return output_filename
#key = cv2.waitKey(0)
#if key == 27:
#cv2.destroyAllWindows()
if __name__ == '__main__':
realiza_contagem() | StarcoderdataPython |
1762151 | #!/usr/bin/env python
""" """
# Standard library modules.
import os
# Third party modules.
import pytest
# Local modules.
from pymontecarlo_casino2.importer import Casino2Importer
from pymontecarlo.results.photonintensity import EmittedPhotonIntensityResult
from pymontecarlo.simulation import Simulation
# Globals and constants variables.
@pytest.fixture
def importer():
return Casino2Importer()
@pytest.mark.asyncio
async def test_import_(event_loop, importer, options, testdatadir):
dirpath = os.path.join(testdatadir, "sim1")
results = await importer.import_(options, dirpath)
simulation = Simulation(options, results)
assert len(simulation.results) == 1
result = simulation.find_result(EmittedPhotonIntensityResult)[0]
assert len(result) == 43
q = result[("Au", "La")]
assert q.n == pytest.approx(2.73255e-7, abs=1e-13)
q = result[("Si", "Ka1")]
assert q.n == pytest.approx(1.6331941e-6, abs=1e-13)
| StarcoderdataPython |
3261415 | import time
import maze
from tqdm import tqdm
c = maze.Connect("admin", "velociraptor")
field = c.get_all()
jmpLen = 4
inp = []
jumps = []
# for every line in the playing array add the bottom row of actual places where you jump on
for line in field:
inp.append(line[-1])
print("Aiming to hit:", len(inp))
# movement functions within the live maze
def jump():
c.move("w")
def move():
c.move("d")
# reverses the inp list, e.g. spot 1 is 10000th and 10000th spot is 1st
r = list(reversed(inp))
print(" working on backtracking")
# while i is smaller than the length of the array
for i in range(len(r)):
if i < 9990:
if (r[i + 3] == 1 and
r[i +
7] == 1): # there is a hole and it is not on the last spot
# if (r[i+2] == 0): #if 2 blocks ahead there is a hole
# append the location of the jump to the array, working from back to front
jumps.append(i)
print("jumping in 5s")
time.sleep(5)
# finished the process of finding jumps (hopefully correctly)
def doJumps():
global jumps
for i in range(10000):
x = c.x()
time.sleep(2)
print("att")
if jumps[i] == x: # if the x coordinate is a jump coordinate
c.move("w")
print("jumped")
else:
c.move("d")
print("moved right")
print(jumps[0])
doJumps()
| StarcoderdataPython |
3359978 | <reponame>crwsr124/GANsNRoses<gh_stars>0
import argparse
import math
import random
import os
from util import *
import numpy as np
import torch
torch.backends.cudnn.benchmark = True
from torch import nn, autograd
from torch import optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from tqdm import tqdm
from torch.optim import lr_scheduler
import copy
import kornia.augmentation as K
import kornia
import lpips
from model_cr import *
from dataset import ImageFolder
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
mse_criterion = nn.MSELoss()
smooth_l1 = nn.SmoothL1Loss()
@torch.no_grad()
def getGaussianKernel(ksize, sigma=0):
if sigma <= 0:
# 根据 kernelsize 计算默认的 sigma,和 opencv 保持一致
sigma = 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
center = ksize // 2
xs = (np.arange(ksize, dtype=np.float32) - center) # 元素与矩阵中心的横向距离
kernel1d = np.exp(-(xs ** 2) / (2 * sigma ** 2)) # 计算一维卷积核
# 根据指数函数性质,利用矩阵乘法快速计算二维卷积核
kernel = kernel1d[..., None] @ kernel1d[None, ...]
kernel = torch.from_numpy(kernel)
kernel = kernel / kernel.sum() # 归一化
return kernel
def bilateralFilter(batch_img, ksize, sigmaColor=None, sigmaSpace=None):
device = batch_img.device
if sigmaSpace is None:
sigmaSpace = 0.15 * ksize + 0.35 # 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
if sigmaColor is None:
sigmaColor = sigmaSpace
pad = (ksize - 1) // 2
batch_img_pad = F.pad(batch_img, pad=[pad, pad, pad, pad], mode='reflect')
# batch_img 的维度为 BxcxHxW, 因此要沿着第 二、三维度 unfold
# patches.shape: B x C x H x W x ksize x ksize
patches = batch_img_pad.unfold(2, ksize, 1).unfold(3, ksize, 1)
patch_dim = patches.dim() # 6
# 求出像素亮度差
diff_color = patches - batch_img.unsqueeze(-1).unsqueeze(-1)
# 根据像素亮度差,计算权重矩阵
weights_color = torch.exp(-(diff_color ** 2) / (2 * sigmaColor ** 2))
# 归一化权重矩阵
weights_color = weights_color / weights_color.sum(dim=(-1, -2), keepdim=True)
# 获取 gaussian kernel 并将其复制成和 weight_color 形状相同的 tensor
weights_space = getGaussianKernel(ksize, sigmaSpace).to(device)
weights_space_dim = (patch_dim - 2) * (1,) + (ksize, ksize)
weights_space = weights_space.view(*weights_space_dim).expand_as(weights_color)
# 两个权重矩阵相乘得到总的权重矩阵
weights = weights_space * weights_color
# 总权重矩阵的归一化参数
weights_sum = weights.sum(dim=(-1, -2))
# 加权平均
weighted_pix = (weights * patches).sum(dim=(-1, -2)) / weights_sum
return weighted_pix
def test(args, genA2B, genB2A, testA_loader, testB_loader, name, step, A_bg, B_bg):
testA_loader = iter(testA_loader)
testB_loader = iter(testB_loader)
with torch.no_grad():
test_sample_num = 16
genA2B.eval(), genB2A.eval()
A2B = []
B2A = []
for i in range(test_sample_num):
real_A = testA_loader.next()
real_B = testB_loader.next()
real_A, real_B = real_A.cuda(), real_B.cuda()
A2B_content, A2B_style = genA2B.encode(real_A)
B2A_content, B2A_style = genB2A.encode(real_B)
if i % 2 == 0:
A2B_mod1 = torch.randn([1, args.latent_dim]).cuda()
B2A_mod1 = torch.randn([1, args.latent_dim]).cuda()
A2B_mod2 = torch.randn([1, args.latent_dim]).cuda()
B2A_mod2 = torch.randn([1, args.latent_dim]).cuda()
a_c, a_s = G_A2B.encode(real_A)
fake_A2A, alphaA2A = G_B2A.decode(a_c, a_s)
b_c, b_s = G_B2A.encode(real_B)
fake_B2B, alphaB2B = G_A2B.decode(b_c, b_s)
# fake_B2B, _, _ = genA2B(real_B)
# fake_A2A, _, _ = genB2A(real_A)
colsA = [real_A, fake_A2A]
alphaA2A = alphaA2A.repeat(1, 3, 1, 1)
colsA.append(alphaA2A)
colsB = [real_B, fake_B2B]
alphaB2B = alphaB2B.repeat(1, 3, 1, 1)
colsB.append(alphaB2B)
fake_A2B_1, alpha = genA2B.decode(A2B_content, A2B_mod1)
fake_B2A_1, alpha = genB2A.decode(B2A_content, B2A_mod1)
fake_A2B_2, alpha = genA2B.decode(A2B_content, A2B_mod2)
fake_B2A_2, alpha = genB2A.decode(B2A_content, B2A_mod2)
fake_A2B_3, alpha1 = genA2B.decode(A2B_content, B2A_style)
fake_B2A_3, alpha2 = genB2A.decode(B2A_content, A2B_style)
fake_A2B_3 = fake_A2B_3*alpha1 + (1-alpha1)*B_bg[0:1, :, :, :]
fake_B2A_3 = fake_B2A_3*alpha2 + (1-alpha2)*A_bg[0:1, :, :, :]
fake_A2B_2[:, 0:1, :, :] = alpha1
fake_A2B_2[:, 1:2, :, :] = alpha1
fake_A2B_2[:, 2:3, :, :] = alpha1
fake_B2A_2[:, 0:1, :, :] = alpha2
fake_B2A_2[:, 1:2, :, :] = alpha2
fake_B2A_2[:, 2:3, :, :] = alpha2
colsA += [fake_A2B_3, fake_A2B_1, fake_A2B_2]
colsB += [fake_B2A_3, fake_B2A_1, fake_B2A_2]
fake_A2B2A, _, _, alpha = genB2A(fake_A2B_3, A2B_style)
fake_B2A2B, _, _, alpha = genA2B(fake_B2A_3, B2A_style)
colsA.append(fake_A2B2A)
colsB.append(fake_B2A2B)
fake_A2B2A, _, _, alpha = genB2A(fake_A2B_1, A2B_style)
fake_B2A2B, _, _, alpha = genA2B(fake_B2A_1, B2A_style)
colsA.append(fake_A2B2A)
colsB.append(fake_B2A2B)
# fake_A2B2A, _, _, alpha = genB2A(fake_A2B_2, A2B_style)
# fake_B2A2B, _, _, alpha = genA2B(fake_B2A_2, B2A_style)
# colsA.append(fake_A2B2A)
# colsB.append(fake_B2A2B)
fake_A2B2A, _, _, alpha = genB2A(fake_A2B_1, B2A_mod1)
fake_B2A2B, _, _, alpha = genA2B(fake_B2A_1, A2B_mod1)
colsA.append(fake_A2B2A)
colsB.append(fake_B2A2B)
colsA = torch.cat(colsA, 2).detach().cpu()
colsB = torch.cat(colsB, 2).detach().cpu()
A2B.append(colsA)
B2A.append(colsB)
A2B = torch.cat(A2B, 0)
B2A = torch.cat(B2A, 0)
utils.save_image(A2B, f'{im_path}/{name}_A2B_{str(step).zfill(6)}.jpg', normalize=True, range=(-1, 1), nrow=16)
utils.save_image(B2A, f'{im_path}/{name}_B2A_{str(step).zfill(6)}.jpg', normalize=True, range=(-1, 1), nrow=16)
genA2B.train(), genB2A.train()
def train(args, trainA_loader, trainB_loader, testA_loader, testB_loader, G_A2B, G_B2A, D_A, D_B, G_optim, D_optim, device, trainA_bg_loader, trainB_bg_loader):
G_A2B.train(), G_B2A.train(), D_A.train(), D_B.train()
trainA_loader = sample_data(trainA_loader)
trainB_loader = sample_data(trainB_loader)
trainA_bg_loader = sample_data(trainA_bg_loader)
trainB_bg_loader = sample_data(trainB_bg_loader)
G_scheduler = lr_scheduler.StepLR(G_optim, step_size=100000, gamma=0.5)
D_scheduler = lr_scheduler.StepLR(D_optim, step_size=100000, gamma=0.5)
pbar = range(args.iter)
if get_rank() == 0:
pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.1)
loss_dict = {}
mean_path_length_A2B = 0
mean_path_length_B2A = 0
path_loss = torch.tensor(0.0, device=device)
path_lengths = torch.tensor(0.0, device=device)
mean_path_length = 0
if args.distributed:
G_A2B_module = G_A2B.module
G_B2A_module = G_B2A.module
D_A_module = D_A.module
D_B_module = D_B.module
D_L_module = D_L.module
else:
G_A2B_module = G_A2B
G_B2A_module = G_B2A
D_A_module = D_A
D_B_module = D_B
D_L_module = D_L
for idx in pbar:
i = idx + args.start_iter
if i > args.iter:
print('Done!')
break
#G_A2B.train(), G_A2B.encoder.eval(), G_B2A.eval(), D_A.train(), D_B.train()
#for p_i in G_A2B.encoder.parameters():
# p_i.requires_grad=False
#for p_i in G_B2A.parameters():
# p_i.requires_grad=False
ori_A = next(trainA_loader)
ori_B = next(trainB_loader)
A_bg = next(trainA_bg_loader)
B_bg = next(trainB_bg_loader)
if isinstance(ori_A, list):
ori_A = ori_A[0]
if isinstance(ori_B, list):
ori_B = ori_B[0]
if isinstance(A_bg, list):
A_bg = A_bg[0]
if isinstance(B_bg, list):
B_bg = B_bg[0]
ori_A = ori_A.to(device)
ori_B = ori_B.to(device)
A_bg = augA2(A_bg.to(device))
B_bg = augB2(B_bg.to(device))
#aug_A = augA2(ori_A)
# aug_A_smooth = bilateralFilter(aug_A, 15, 0.15, 5).detach()
#aug_B = augB2(ori_B)
aug_A = augA(ori_A)
aug_B = augB(ori_B)
# aug_A = DiffAugment(ori_A, policy='color,translation,cutout')
# aug_B = DiffAugment(ori_B, policy='color,translation,cutout')
# A = augA(ori_A[[np.random.randint(args.batch)]].expand_as(ori_A))
# B = augB(ori_B[[np.random.randint(args.batch)]].expand_as(ori_B))
# A = augA(ori_A)
# B = augB(ori_B)
# A = ori_A
# B = ori_B
batch_id = np.random.randint(args.batch)
single_A_batch = ori_A[[batch_id]].expand_as(ori_A)
single_B_batch = ori_B[[batch_id]].expand_as(ori_B)
# single_A_batch = ori_A[[batch_id]].expand(ori_A.shape[0]+1, ori_A.shape[1], ori_A.shape[2], ori_A.shape[3])
# single_B_batch = ori_B[[batch_id]].expand(ori_B.shape[0]+1, ori_B.shape[1], ori_B.shape[2], ori_B.shape[3])
#A = augA3(single_A_batch)
A = augA(single_A_batch)
A[1] = torch.flip(A[0],[2])
# B = augB3(single_B_batch)
B = augB(single_B_batch)
B[1] = torch.flip(B[0],[2])
# A = augA2(ori_A)
# B = augB2(ori_B)
if i % args.d_reg_every == 0:
aug_A.requires_grad = True
aug_B.requires_grad = True
# if i % args.d_reg_every == 0:
# A.requires_grad = True
# B.requires_grad = True
A2B_content, A2B_style = G_A2B.encode(A)
B2A_content, B2A_style = G_B2A.encode(B)
A_aug_style = G_A2B.style_encode(augA(single_A_batch))
B_aug_style = G_B2A.style_encode(augB(single_B_batch))
# get new style
aug_A2B_style = G_B2A.style_encode(aug_B)
aug_B2A_style = G_A2B.style_encode(aug_A)
rand_A2B_style = torch.randn([args.batch, args.latent_dim]).to(device).requires_grad_()
rand_B2A_style = torch.randn([args.batch, args.latent_dim]).to(device).requires_grad_()
#print(rand_A2B_style.shape)
# styles
idx = torch.randperm(2*args.batch)
#print(idx)
#print(rand_A2B_style)
#print(aug_A2B_style)
input_A2B_style = torch.cat([rand_A2B_style, aug_A2B_style], 0)[idx][:args.batch]
#print(A2B_style.shape)
#print(input_A2B_style)
idx = torch.randperm(2*args.batch)
input_B2A_style = torch.cat([rand_B2A_style, aug_B2A_style], 0)[idx][:args.batch]
fake_A2B, fake_A2B_alpha = G_A2B.decode(A2B_content, input_A2B_style)
fake_B2A, fake_B2A_alpha = G_B2A.decode(B2A_content, input_B2A_style)
b_c, b_s = G_B2A.encode(B_bg)
B_bg, _ = G_A2B.decode(b_c, input_A2B_style)
a_c, a_s = G_A2B.encode(A_bg)
A_bg, _ = G_B2A.decode(a_c, input_B2A_style)
B_bg = B_bg.detach()
A_bg = A_bg.detach()
if i % 2 == 0:
A_bg[1] = torch.flip(A_bg[0],[2])
B_bg[1] = torch.flip(B_bg[0],[2])
fake_A2B_detach = fake_A2B.detach()
fake_B2A_detach = fake_B2A.detach()
fake_A2B = fake_A2B_detach*fake_A2B_alpha + (1.0-fake_A2B_alpha)*(B_bg)
fake_B2A = fake_B2A_detach*fake_B2A_alpha + (1.0-fake_B2A_alpha)*(A_bg)
# train disc
# aug_A_smooth = bilateralFilter(aug_A, 15, 0.15, 5)
real_A_logit = D_A(aug_A)
real_B_logit = D_B(aug_B)
# A_smooth = bilateralFilter(A, 15, 0.15, 5)
# real_A_logit = D_A(A_smooth)
# real_B_logit = D_B(B)
real_L_logit1 = D_L(rand_A2B_style)
real_L_logit2 = D_L(rand_B2A_style)
fake_B_logit = D_B(fake_A2B.detach())
fake_A_logit = D_A(fake_B2A.detach())
# fake_B_logit = D_B(DiffAugment(fake_A2B.detach(), policy='color,translation,cutout'))
# fake_A_logit = D_A(DiffAugment(fake_B2A.detach(), policy='color,translation,cutout'))
fake_L_logit1 = D_L(aug_A2B_style.detach())
fake_L_logit2 = D_L(aug_B2A_style.detach())
# global loss
D_loss = d_logistic_loss(real_A_logit, fake_A_logit) +\
d_logistic_loss(real_B_logit, fake_B_logit) +\
d_logistic_loss(real_L_logit1, fake_L_logit1) +\
d_logistic_loss(real_L_logit2, fake_L_logit2)
loss_dict['D_adv'] = D_loss
if i % args.d_reg_every == 0:
# r1_A_loss = d_r1_loss(real_A_logit, A)
# r1_B_loss = d_r1_loss(real_B_logit, B)
r1_A_loss = d_r1_loss(real_A_logit, aug_A)
r1_B_loss = d_r1_loss(real_B_logit, aug_B)
r1_L_loss = d_r1_loss(real_L_logit1, rand_A2B_style) + d_r1_loss(real_L_logit2, rand_B2A_style)
r1_loss = r1_A_loss + r1_B_loss + r1_L_loss
D_r1_loss = (args.r1 / 2 * r1_loss * args.d_reg_every)
D_loss += D_r1_loss
D_optim.zero_grad()
D_loss.backward()
D_optim.step()
#Generator
# adv loss
fake_B_logit = D_B(fake_A2B)
fake_A_logit = D_A(fake_B2A)
# fake_B_logit = D_B(DiffAugment(fake_A2B, policy='color,translation,cutout'))
# fake_A_logit = D_A(DiffAugment(fake_B2A, policy='color,translation,cutout'))
fake_L_logit1 = D_L(aug_A2B_style)
fake_L_logit2 = D_L(aug_B2A_style)
lambda_adv = (1, 1, 1)
G_adv_loss = 1 * (g_nonsaturating_loss(fake_A_logit, lambda_adv) +\
g_nonsaturating_loss(fake_B_logit, lambda_adv) +\
2*g_nonsaturating_loss(fake_L_logit1, (1,)) +\
2*g_nonsaturating_loss(fake_L_logit2, (1,)))
# style consis loss
G_con_loss = 50 * (A2B_style.var(0, unbiased=False).sum() + B2A_style.var(0, unbiased=False).sum())
# G_con_loss = 50 * (cosine_distance(A2B_style).sum() + cosine_distance(B2A_style).sum())
# cycle recon
A2B2A_content, A2B2A_style = G_B2A.encode(fake_A2B)
#print(A2B2A_content.shape)
B2A2B_content, B2A2B_style = G_A2B.encode(fake_B2A)
# fake_A2B2A = G_B2A.decode(A2B2A_content, shuffle_batch(A2B_style))
# fake_B2A2B = G_A2B.decode(B2A2B_content, shuffle_batch(B2A_style))
fake_A2B2A, fake_A2B2A_alpha = G_B2A.decode(A2B2A_content, shuffle_batch(A_aug_style))
fake_B2A2B, fake_B2A2B_alpha = G_A2B.decode(B2A2B_content, shuffle_batch(B_aug_style))
# fake_A2B2A, fake_A2B2A_alpha = G_B2A.decode(A2B2A_content, A2B_style)
# fake_B2A2B, fake_B2A2B_alpha = G_A2B.decode(B2A2B_content, B2A_style)
# fake_A2B2A, fake_A2B2A_alpha = G_B2A.decode(A2B2A_content, aug_B2A_style)
# fake_B2A2B, fake_B2A2B_alpha = G_A2B.decode(B2A2B_content, aug_A2B_style)
# fake_B2AA = G_B2A.decode(B2A_content, B2A2B_style)
# fake_A2BB = G_A2B.decode(A2B_content, A2B2A_style)
A_smooth = bilateralFilter(A, 15, 0.15, 5)
#G_cycle_loss = 0 * (F.mse_loss(fake_A2B2A, A) + F.mse_loss(fake_B2A2B, B))
G_cycle_loss = 0
# A_downsample = F.avg_pool2d(A_smooth, kernel_size=4, stride=4)
# fake_A2B2A_downsample = F.avg_pool2d(fake_A2B2A, kernel_size=4, stride=4)
# B_downsample = F.avg_pool2d(B, kernel_size=4, stride=4)
# fake_B2A2B_downsample = F.avg_pool2d(fake_B2A2B, kernel_size=4, stride=4)
# fake_A2B2A = fake_A2B2A*fake_A2B2A_alpha + (1.0-fake_A2B2A_alpha)*A_smooth
# fake_B2A2B = fake_B2A2B*fake_B2A2B_alpha + (1.0-fake_B2A2B_alpha)*B
lpips_loss10 = (F.l1_loss(fake_A2B2A, A_smooth) +\
F.l1_loss(fake_B2A2B, B))
# lpips_loss10 = (F.l1_loss(F.avg_pool2d(fake_A2B2A, kernel_size=4, stride=4), F.avg_pool2d(A_smooth, kernel_size=4, stride=4)) +\
# F.l1_loss(F.avg_pool2d(fake_B2A2B, kernel_size=4, stride=4), F.avg_pool2d(B, kernel_size=4, stride=4)))
# fake_A2B2A_alphakk = (fake_A2B2A_alpha + 2.0)/2.0
# fake_B2A2B_alphakk = (fake_B2A2B_alpha + 2.0)/2.0
# lpips_loss10 = (F.l1_loss(fake_A2B2A*fake_A2B2A_alphakk, A*fake_A2B2A_alphakk) +\
# F.l1_loss(fake_B2A2B*fake_B2A2B_alphakk, B*fake_B2A2B_alphakk))
lpips_loss = 30*lpips_loss10 + 15*(lpips_fn(fake_A2B2A, A_smooth).mean() + lpips_fn(fake_B2A2B, B).mean())
# lpips_loss = 120*lpips_loss10 + 60*(lpips_fn(F.avg_pool2d(fake_A2B2A, kernel_size=4, stride=4), F.avg_pool2d(A_smooth, kernel_size=4, stride=4)).mean() +\
# lpips_fn(F.avg_pool2d(fake_B2A2B, kernel_size=4, stride=4), F.avg_pool2d(B, kernel_size=4, stride=4)).mean())
if i % 2 == 0:
fake_Abg, alphaAg = G_B2A.decode(B2A2B_content, a_s)
fake_Bbg, alphaBg = G_A2B.decode(A2B2A_content, b_s)
# fake_Abg = F.avg_pool2d(fake_Abg, kernel_size=4, stride=4)
# alphaAg = F.avg_pool2d(alphaAg, kernel_size=4, stride=4)
# fake_Bbg = F.avg_pool2d(fake_Bbg, kernel_size=4, stride=4)
# alphaBg = F.avg_pool2d(alphaBg, kernel_size=4, stride=4)
# A_bg_s = F.avg_pool2d(A_bg, kernel_size=4, stride=4)
# B_bg_s = F.avg_pool2d(B_bg, kernel_size=4, stride=4)
# fake_A2B2A = F.avg_pool2d(fake_A2B2A, kernel_size=4, stride=4)
# fake_B2A2B = F.avg_pool2d(fake_B2A2B, kernel_size=4, stride=4)
# A_smooth_s = F.avg_pool2d(A_smooth, kernel_size=4, stride=4)
# B_s = F.avg_pool2d(B, kernel_size=4, stride=4)
# fake_A2B2A_alpha = F.avg_pool2d(fake_A2B2A_alpha, kernel_size=4, stride=4)
# fake_B2A2B_alpha = F.avg_pool2d(fake_B2A2B_alpha, kernel_size=4, stride=4)
# lpips_loss1 = (F.l1_loss(fake_Abg*(1-alphaAg), A_bg_s*(1-alphaAg)) +\
# F.l1_loss(fake_Bbg*(1-alphaBg), B_bg_s*(1-alphaBg)))
# lpips_loss2 = (F.l1_loss(fake_A2B2A*fake_A2B2A_alpha, A_smooth_s*fake_A2B2A_alpha) +\
# F.l1_loss(fake_B2A2B*fake_B2A2B_alpha, B_s*fake_B2A2B_alpha))
lpips_loss1 = (F.l1_loss(fake_Abg*(1-alphaAg), A_bg*(1-alphaAg)) +\
F.l1_loss(fake_Bbg*(1-alphaBg), B_bg*(1-alphaBg)))
lpips_loss2 = (F.l1_loss(fake_A2B2A*fake_A2B2A_alpha, A_smooth*fake_A2B2A_alpha) +\
F.l1_loss(fake_B2A2B*fake_B2A2B_alpha, B*fake_B2A2B_alpha))
# B_bg2A, _ = G_B2A.decode(b_c, A2B_style)
# A_bg2B, _ = G_A2B.decode(a_c, B2A_style)
# B_bg2A = B_bg2A.detach()
# A_bg2B = A_bg2B.detach()
# A_replace_bg = fake_A2B2A_alpha * A + (1.0-fake_A2B2A_alpha)*(B_bg2A)
# B_replace_bg = fake_B2A2B_alpha * B + (1.0-fake_B2A2B_alpha)*(A_bg2B)
lpips_loss = 40*lpips_loss1 + 20*lpips_loss2 #+ 10*(lpips_fn(fake_A2B2A, A_replace_bg).mean() + lpips_fn(fake_B2A2B, B_replace_bg).mean())
#lpips_loss = 160*lpips_loss1 + 80*lpips_loss2
# lpips_loss = 0
# lpips_loss1 = (F.l1_loss(fake_B2AA, fake_B2A) +\
# F.l1_loss(fake_A2BB, fake_A2B))
# lpips_loss = lpips_loss0 + 20*lpips_loss1 + 10*(lpips_fn(fake_A2BB, fake_A2B).mean() + lpips_fn(fake_B2AA, fake_B2A).mean())
#A_downsample = F.avg_pool2d(A_smooth, kernel_size=4, stride=4)
#fake_A2B2A_downsample = F.avg_pool2d(fake_A2B2A, kernel_size=4, stride=4)
#B_downsample = F.avg_pool2d(B, kernel_size=4, stride=4)
#fake_B2A2B_downsample = F.avg_pool2d(fake_B2A2B, kernel_size=4, stride=4)
#lpips_loss = 10 * (lpips_fn(fake_A2B2A_downsample.mean(1), A_downsample.mean(1)).mean() + lpips_fn(fake_B2A2B_downsample.mean(1), B_downsample.mean(1)).mean()) #10 for anime
# lpips_loss = 0
flip_loss = 5.0 * (F.l1_loss(fake_A2B[0], torch.flip(fake_A2B[1],[2])) + F.l1_loss(fake_B2A[0], torch.flip(fake_B2A[1],[2])))
# 1.0 * (F.l1_loss(fake_A2B_alpha[0], torch.flip(fake_A2B_alpha[1],[2])) + F.l1_loss(fake_B2A_alpha[0], torch.flip(fake_B2A_alpha[1],[2])))
# style reconstruction
# G_style_loss = 5 * (smooth_l1(A2B2A_style, input_A2B_style) +\
# smooth_l1(B2A2B_style, input_B2A_style))
# style_loss1 = 5 * (smooth_l1(A2B_style, aug_B2A_style) +\
# smooth_l1(B2A_style, aug_A2B_style))
style_loss2 = 5 * (smooth_l1(A2B_style, A_aug_style) +\
smooth_l1(B2A_style, B_aug_style))
G_style_loss = 10 * (smooth_l1(A2B2A_style, input_A2B_style) +\
smooth_l1(B2A2B_style, input_B2A_style)) + style_loss2
# G_style_loss = 0
# crloss
# c_fake_B_logit = D_B(fake_B2A2B)
# c_fake_A_logit = D_A(fake_A2B2A)
# lambda_adv = (1, 1, 1)
# c_adv_loss = 0.1 * (g_nonsaturating_loss(c_fake_A_logit, lambda_adv) +\
# g_nonsaturating_loss(c_fake_B_logit, lambda_adv) )
c_adv_loss = 0
# feature presering loss
# kk1 = 1 + i/300000.0 * 3
# cf_loss = 20 * (F.l1_loss(A2B2A_content, A2B_content) +\
# F.l1_loss(B2A2B_content, B2A_content))
cf_loss = 2 * (F.l1_loss(A2B2A_content, A2B_content) +\
F.l1_loss(B2A2B_content, B2A_content))
if i % 2 == 0:
cf_loss = 0
# identity loss
b_c_, b_s_ = G_B2A.encode(B)
fake_B2B,alpha1 = G_A2B.decode(b_c_, b_s_)
a_c_, a_s_ = G_A2B.encode(A)
fake_A2A,alpha2 = G_B2A.decode(a_c_, a_s_)
# A_smooth = bilateralFilter(A, 15, 0.15, 5)
# kk1 = 1 + i/300000.0 * 3.0
# alpha1 = (alpha1 + 2.0)/2.0
# alpha2 = (alpha2 + 2.0)/2.0
# cf_loss_p = 2.0 * (F.l1_loss(fake_A2A*alpha2, A*alpha2) +\
# F.l1_loss(fake_B2B*alpha1, B*alpha1))
cf_loss_p = 2.0 * (F.l1_loss(fake_A2A, A_smooth) +\
F.l1_loss(fake_B2B, B))
# ci_loss = cf_loss_p
ci_loss = cf_loss_p #+ 1.0 * (lpips_fn(fake_A2A, A_smooth).mean() + lpips_fn(fake_B2B, B).mean())
# ci_loss = 0
if i % 2 == 0:
#alpha_delta = torch.clamp(alpha2 - fake_A2B_alpha, 0, 1) #+ torch.clamp(fake_B2A_alpha - alpha1, 0, 1)
#c_alpha_loss = 0.001*alpha_delta.sum()
# c_alpha_loss = 1.0 * (F.l1_loss(fake_A2B_alpha, alpha2) +\
# F.l1_loss(fake_B2A_alpha, alpha1))
c_alpha_loss = 0
ci_loss = c_alpha_loss #+ ci_loss
G_loss = G_adv_loss + ci_loss + cf_loss + c_adv_loss + G_con_loss + lpips_loss + G_style_loss + flip_loss
if i % 2 == 0:
kkkk = (i-100000)/200000.
if kkkk > 1:
kkkk = 1
if kkkk < 0:
kkkk = 0
G_loss = G_loss * (0.5+0.5*kkkk)
loss_dict['G_adv'] = G_adv_loss
loss_dict['G_con'] = G_con_loss
loss_dict['G_cycle'] = G_cycle_loss
loss_dict['lpips'] = lpips_loss
loss_dict['ci_loss'] = ci_loss
loss_dict['cf_loss'] = cf_loss
loss_dict['c_adv_loss'] = c_adv_loss
G_optim.zero_grad()
G_loss.backward()
G_optim.step()
# g_regularize = i % args.g_reg_every == 0
# if g_regularize:
# # path_batch_size = max(1, args.batch // args.path_batch_shrink)
# rand_A2B_style = torch.randn([args.batch, args.latent_dim]).to(device).requires_grad_()
# fake_img, _ = G_A2B.decode(A2B_content.detach(), rand_A2B_style)
# path_loss, mean_path_length, path_lengths = g_path_regularize(
# fake_img, rand_A2B_style, mean_path_length
# )
# # G_A2B.decoder.zero_grad()
# G_A2B.encoder.eval(), G_B2A.eval()
# G_optim.zero_grad()
# weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss
# # if args.path_batch_shrink:
# # weighted_path_loss += 0 * fake_img[0, 0, 0, 0]
# weighted_path_loss.backward()
# G_optim.step()
# mean_path_length_avg = (
# reduce_sum(mean_path_length).item() / get_world_size()
# )
# if i % 50 == 0:
# print("path_loss: %.8f, mean_path_length_avg: %.8f, path_lengths: %.8f" % \
# (path_loss, mean_path_length_avg, path_lengths))
G_scheduler.step()
D_scheduler.step()
# accumulate(G_A2B_ema, G_A2B_module)
# accumulate(G_B2A_ema, G_B2A_module)
loss_reduced = reduce_loss_dict(loss_dict)
D_adv_loss_val = loss_reduced['D_adv'].mean().item()
G_adv_loss_val = loss_reduced['G_adv'].mean().item()
#G_cycle_loss_val = loss_reduced['G_cycle'].mean().item()
G_con_loss_val = loss_reduced['G_con'].mean().item()
G_cycle_loss_val = 0
lpips_val = 0
# lpips_val = loss_reduced['lpips'].mean().item()
ci_loss_val = 0
# ci_loss_val = loss_reduced['ci_loss'].mean().item()
# cf_loss_val = loss_reduced['cf_loss'].mean().item()
cf_loss_val = cf_loss
# c_adv_loss_val = loss_reduced['c_adv_loss'].mean().item()
c_adv_loss_val = 0
if get_rank() == 0:
pbar.set_description(
(
f'Dadv: {D_adv_loss_val:.2f}; lpips: {lpips_val:.2f} '
f'Gadv: {G_adv_loss_val:.2f}; Gcycle: {G_cycle_loss_val:.2f}; GMS: {G_con_loss_val:.2f} {G_style_loss:.2f}; Grrrrrrr: {ci_loss_val:.2f} {cf_loss_val:.2f} {c_adv_loss_val:.2f};'
)
)
if i % 500 == 0:
with torch.no_grad():
test(args, G_A2B, G_B2A, testA_loader, testB_loader, 'normal', i, A_bg, B_bg)
#test(args, G_A2B_ema, G_B2A_ema, testA_loader, testB_loader, 'ema', i)
if (i+1) % 2000 == 0:
torch.save(
{
# 'G_A2B': G_A2B_module.state_dict(),
'G_B2A': G_B2A_module.state_dict(),
'G_A2B_encoder': G_A2B.encoder.state_dict(),
'G_A2B_decoder': G_A2B.decoder.state_dict(),
# 'G_A2B_ema': G_A2B_ema.state_dict(),
# 'G_B2A_ema': G_B2A_ema.state_dict(),
'D_A': D_A_module.state_dict(),
'D_B': D_B_module.state_dict(),
'D_L': D_L_module.state_dict(),
'G_optim': G_optim.state_dict(),
'D_optim': D_optim.state_dict(),
'iter': i,
},
os.path.join(model_path, 'ck.pt'),
)
if __name__ == '__main__':
device = 'cuda'
parser = argparse.ArgumentParser()
parser.add_argument('--iter', type=int, default=600000)
parser.add_argument('--batch', type=int, default=4)
parser.add_argument('--n_sample', type=int, default=64)
parser.add_argument('--size', type=int, default=256)
parser.add_argument('--r1', type=float, default=10)
parser.add_argument('--lambda_cycle', type=int, default=1)
parser.add_argument('--path_regularize', type=float, default=1)
parser.add_argument('--path_batch_shrink', type=int, default=2)
parser.add_argument('--d_reg_every', type=int, default=15)
parser.add_argument('--g_reg_every', type=int, default=4)
parser.add_argument('--mixing', type=float, default=0.9)
parser.add_argument('--ckpt', type=str, default=None)
# parser.add_argument('--lr', type=float, default=2e-3)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--num_down', type=int, default=3)
parser.add_argument('--name', type=str, required=True)
parser.add_argument('--d_path', type=str, required=True)
parser.add_argument('--latent_dim', type=int, default=8)
parser.add_argument('--lr_mlp', type=float, default=0.01)
parser.add_argument('--n_res', type=int, default=1)
args = parser.parse_args()
n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
args.distributed = False
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
synchronize()
save_path = f'./{args.name}'
im_path = os.path.join(save_path, 'sample')
model_path = os.path.join(save_path, 'checkpoint')
os.makedirs(im_path, exist_ok=True)
os.makedirs(model_path, exist_ok=True)
args.n_mlp = 5
args.start_iter = 0
G_A2B = Generator2_alpha( args.size, args.num_down, args.latent_dim, args.n_mlp, lr_mlp=args.lr_mlp, n_res=args.n_res).to(device)
D_A = Discriminator(args.size).to(device)
G_B2A = Generator2_alpha( args.size, args.num_down, args.latent_dim, args.n_mlp, lr_mlp=args.lr_mlp, n_res=args.n_res).to(device)
D_B = Discriminator(args.size).to(device)
D_L = LatDiscriminator(args.latent_dim).to(device)
lpips_fn = lpips.LPIPS(net='vgg').to(device)
# G_A2B_ema = copy.deepcopy(G_A2B).to(device).eval()
# G_B2A_ema = copy.deepcopy(G_B2A).to(device).eval()
g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)
G_optim = optim.Adam( list(G_A2B.parameters()) + list(G_B2A.parameters()), lr=args.lr, betas=(0, 0.99))
D_optim = optim.Adam(
list(D_L.parameters()) + list(D_A.parameters()) + list(D_B.parameters()),
lr=args.lr, betas=(0**d_reg_ratio, 0.99**d_reg_ratio))
if args.ckpt is not None:
ckpt = torch.load("/data/cairui/CRGANsNRoses/GANsNRoses/results11/checkpoint/ck.pt", map_location=lambda storage, loc: storage)
try:
ckpt_name = os.path.basename(args.ckpt)
args.start_iter = int(os.path.splitext(ckpt_name)[0])
except ValueError:
pass
# G_A2B.load_state_dict(ckpt['G_A2B'])
G_A2B.encoder.load_state_dict(ckpt['G_A2B_encoder'])
G_A2B.decoder.load_state_dict(ckpt['G_A2B_decoder'])
G_B2A.load_state_dict(ckpt['G_B2A'])
# G_A2B_ema.load_state_dict(ckpt['G_A2B_ema'])
# G_B2A_ema.load_state_dict(ckpt['G_B2A_ema'])
D_A.load_state_dict(ckpt['D_A'])
D_B.load_state_dict(ckpt['D_B'])
D_L.load_state_dict(ckpt['D_L'])
G_optim.load_state_dict(ckpt['G_optim'])
D_optim.load_state_dict(ckpt['D_optim'])
args.start_iter = ckpt['iter']
# args.start_iter = 0
#crrrrrrrrrrrrrr add
#torch.save(
# {
# 'G_A2B_encoder': G_A2B.encoder.state_dict(),
# 'G_B2A': ckpt['G_B2A'],
# 'D_A': ckpt['D_A'],
# 'D_B': ckpt['D_B'],
# 'D_L': ckpt['D_L'],
# 'G_optim': ckpt['G_optim'],
# 'D_optim': ckpt['D_optim'],
# 'iter': 0,
# },
# os.path.join(model_path, 'ck_encoder.pt'),
#)
if args.distributed:
G_A2B = nn.parallel.DistributedDataParallel(
G_A2B,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
D_A = nn.parallel.DistributedDataParallel(
D_A,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
G_B2A = nn.parallel.DistributedDataParallel(
G_B2A,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
D_B = nn.parallel.DistributedDataParallel(
D_B,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
D_L = nn.parallel.DistributedDataParallel(
D_L,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), inplace=True)
])
test_transform = transforms.Compose([
transforms.Resize((args.size, args.size)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), inplace=True)
])
augB = nn.Sequential(
K.RandomAffine(degrees=(-20,20), scale=(0.8, 1.2), translate=(0.1, 0.1), shear=0.15),
kornia.geometry.transform.Resize(256+10),
K.RandomCrop((256,256)),
K.RandomHorizontalFlip(),
)
augA = nn.Sequential(
K.RandomAffine(degrees=(-20,20), scale=(0.8, 1.2), translate=(0.1, 0.1), shear=0.15),
kornia.geometry.transform.Resize(256+10),
K.RandomCrop((256,256)),
K.RandomHorizontalFlip(),
)
augB2 = nn.Sequential(
# K.RandomAffine(degrees=(-20,20), scale=(0.8, 1.2), translate=(0.1, 0.1), shear=0.15),
kornia.geometry.transform.Resize(256+10),
K.RandomCrop((256,256)),
K.RandomHorizontalFlip(),
)
augA2 = nn.Sequential(
# K.RandomAffine(degrees=(-20,20), scale=(0.8, 1.2), translate=(0.1, 0.1), shear=0.15),
kornia.geometry.transform.Resize(256+10),
K.RandomCrop((256,256)),
K.RandomHorizontalFlip(),
)
augB3 = nn.Sequential(
# K.RandomAffine(degrees=(-20,20), scale=(0.8, 1.2), translate=(0.1, 0.1), shear=0.15),
kornia.geometry.transform.Resize(256+10),
K.RandomCrop((256,256)),
# K.RandomHorizontalFlip(),
)
augA3 = nn.Sequential(
# K.RandomAffine(degrees=(-20,20), scale=(0.8, 1.2), translate=(0.1, 0.1), shear=0.15),
kornia.geometry.transform.Resize(256+10),
K.RandomCrop((256,256)),
# K.RandomHorizontalFlip(),
)
d_path = args.d_path
trainA = ImageFolder(os.path.join("/data/dataset/crdata/CRDATA/", 'A_256'), train_transform)
trainB = ImageFolder(os.path.join("/data/dataset/crdata/CRDATA/", 'B_256'), train_transform)
testB = ImageFolder(os.path.join("/data/cairui/UGATIT-pytorch/dataset/selfie2anime", 'testB'), test_transform)
testA = ImageFolder(os.path.join("/data/cairui/GANsNRoses/", 'testimg2'), test_transform)
# trainA_bg = ImageFolder(os.path.join("/data/dataset/crdata/CRDATA/", 'A_256_bg'), train_transform)
trainB_bg = ImageFolder(os.path.join("/data/dataset/crdata/CRDATA/", 'B_256_bg'), train_transform)
trainA_bg = ImageFolder(os.path.join("/data/dataset/kkk_bg", 'kkk_bg'), train_transform)
# trainB_bg = ImageFolder(os.path.join("/data/dataset/cartoon_bg", 'cartoon_bg'), train_transform)
trainA_loader = data.DataLoader(trainA, batch_size=args.batch,
sampler=data_sampler(trainA, shuffle=True, distributed=args.distributed), drop_last=True, pin_memory=True, num_workers=2)
trainB_loader = data.DataLoader(trainB, batch_size=args.batch,
sampler=data_sampler(trainB, shuffle=True, distributed=args.distributed), drop_last=True, pin_memory=True, num_workers=2)
trainA_bg_loader = data.DataLoader(trainA_bg, batch_size=args.batch,
sampler=data_sampler(trainA_bg, shuffle=True, distributed=args.distributed), drop_last=True, pin_memory=True, num_workers=2)
trainB_bg_loader = data.DataLoader(trainB_bg, batch_size=args.batch,
sampler=data_sampler(trainB_bg, shuffle=True, distributed=args.distributed), drop_last=True, pin_memory=True, num_workers=2)
testA_loader = data.DataLoader(testA, batch_size=1, shuffle=False)
testB_loader = data.DataLoader(testB, batch_size=1, shuffle=False)
train(args, trainA_loader, trainB_loader, testA_loader, testB_loader, G_A2B, G_B2A, D_A, D_B, G_optim, D_optim, device, trainA_bg_loader, trainB_bg_loader)
# with torch.no_grad():
# test(args, G_A2B, G_B2A, testA_loader, testB_loader, 'normal', 444444, trainA_bg_loader, trainB_bg_loader)
| StarcoderdataPython |
127701 | from __future__ import division, print_function, absolute_import
import os
import numpy as np
from dipy.direction.peaks import (PeaksAndMetrics,
reshape_peaks_for_visualization)
from dipy.core.sphere import Sphere
from dipy.io.image import save_nifti
import h5py
def _safe_save(group, array, name):
""" Safe saving of arrays with specific names
Parameters
----------
group : HDF5 group
array : array
name : string
"""
if array is not None:
ds = group.create_dataset(name, shape=array.shape,
dtype=array.dtype, chunks=True)
ds[:] = array
def load_peaks(fname, verbose=False):
""" Load a PeaksAndMetrics HDF5 file (PAM5)
Parameters
----------
fname : string
Filename of PAM5 file.
verbose : bool
Print summary information about the loaded file.
Returns
-------
pam : PeaksAndMetrics object
"""
if os.path.splitext(fname)[1].lower() != '.pam5':
raise IOError('This function supports only PAM5 (HDF5) files')
f = h5py.File(fname, 'r')
pam = PeaksAndMetrics()
pamh = f['pam']
version = f.attrs['version']
if version != '0.0.1':
raise IOError('Incorrect PAM5 file version {0}'.format(version,))
try:
affine = pamh['affine'][:]
except KeyError:
affine = None
peak_dirs = pamh['peak_dirs'][:]
peak_values = pamh['peak_values'][:]
peak_indices = pamh['peak_indices'][:]
try:
shm_coeff = pamh['shm_coeff'][:]
except KeyError:
shm_coeff = None
sphere_vertices = pamh['sphere_vertices'][:]
try:
odf = pamh['odf'][:]
except KeyError:
odf = None
pam.affine = affine
pam.peak_dirs = peak_dirs
pam.peak_values = peak_values
pam.peak_indices = peak_indices
pam.shm_coeff = shm_coeff
pam.sphere = Sphere(xyz=sphere_vertices)
pam.B = pamh['B'][:]
pam.total_weight = pamh['total_weight'][:][0]
pam.ang_thr = pamh['ang_thr'][:][0]
pam.gfa = pamh['gfa'][:]
pam.qa = pamh['qa'][:]
pam.odf = odf
f.close()
if verbose:
print('PAM5 version')
print(version)
print('Affine')
print(pam.affine)
print('Dirs shape')
print(pam.peak_dirs.shape)
print('SH shape')
if pam.shm_coeff is not None:
print(pam.shm_coeff.shape)
else:
print('None')
print('ODF shape')
if pam.odf is not None:
print(pam.odf.shape)
else:
print('None')
print('Total weight')
print(pam.total_weight)
print('Angular threshold')
print(pam.ang_thr)
print('Sphere vertices shape')
print(pam.sphere.vertices.shape)
return pam
def save_peaks(fname, pam, affine=None, verbose=False):
""" Save all important attributes of object PeaksAndMetrics in a PAM5 file
(HDF5).
Parameters
----------
fname : string
Filename of PAM5 file
pam : PeaksAndMetrics
Object holding peak_dirs, shm_coeffs and other attributes
affine : array
The 4x4 matrix transforming the date from native to world coordinates.
PeaksAndMetrics should have that attribute but if not it can be
provided here. Default None.
verbose : bool
Print summary information about the saved file.
"""
if os.path.splitext(fname)[1] != '.pam5':
raise IOError('This function saves only PAM5 (HDF5) files')
if not (hasattr(pam, 'peak_dirs') and hasattr(pam, 'peak_values') and
hasattr(pam, 'peak_indices')):
msg = 'Cannot save object without peak_dirs, peak_values'
msg += ' and peak_indices'
raise ValueError(msg)
f = h5py.File(fname, 'w')
group = f.create_group('pam')
f.attrs['version'] = u'0.0.1'
version_string = f.attrs['version']
affine = pam.affine if hasattr(pam, 'affine') else affine
shm_coeff = pam.shm_coeff if hasattr(pam, 'shm_coeff') else None
odf = pam.odf if hasattr(pam, 'odf') else None
_safe_save(group, affine, 'affine')
_safe_save(group, pam.peak_dirs, 'peak_dirs')
_safe_save(group, pam.peak_values, 'peak_values')
_safe_save(group, pam.peak_indices, 'peak_indices')
_safe_save(group, shm_coeff, 'shm_coeff')
_safe_save(group, pam.sphere.vertices, 'sphere_vertices')
_safe_save(group, pam.B, 'B')
_safe_save(group, np.array([pam.total_weight]), 'total_weight')
_safe_save(group, np.array([pam.ang_thr]), 'ang_thr')
_safe_save(group, pam.gfa, 'gfa')
_safe_save(group, pam.qa, 'qa')
_safe_save(group, odf, 'odf')
f.close()
if verbose:
print('PAM5 version')
print(version_string)
print('Affine')
print(affine)
print('Dirs shape')
print(pam.peak_dirs.shape)
print('SH shape')
if shm_coeff is not None:
print(shm_coeff.shape)
else:
print('None')
print('ODF shape')
if odf is not None:
print(pam.odf.shape)
else:
print('None')
print('Total weight')
print(pam.total_weight)
print('Angular threshold')
print(pam.ang_thr)
print('Sphere vertices shape')
print(pam.sphere.vertices.shape)
return pam
def peaks_to_niftis(pam,
fname_shm,
fname_dirs,
fname_values,
fname_indices,
fname_gfa,
reshape_dirs=False):
""" Save SH, directions, indices and values of peaks to Nifti.
"""
save_nifti(fname_shm, pam.shm_coeff.astype(np.float32), pam.affine)
if reshape_dirs:
pam_dirs = reshape_peaks_for_visualization(pam)
else:
pam_dirs = pam.peak_dirs.astype(np.float32)
save_nifti(fname_dirs, pam_dirs, pam.affine)
save_nifti(fname_values, pam.peak_values.astype(np.float32),
pam.affine)
save_nifti(fname_indices, pam.peak_indices, pam.affine)
save_nifti(fname_gfa, pam.gfa, pam.affine)
| StarcoderdataPython |
1689427 | from .geometry import *
from .obstacle_generation import *
from .plotting import *
from .transformations import * | StarcoderdataPython |
1707520 | #!/usr/bin/env python
import logging
from json import loads
from time import sleep
from threading import Thread
from requests import get
from requests.exceptions import ConnectionError, ChunkedEncodingError, ReadTimeout
from artemisremotecontrol.config import Config
from artemisremotecontrol import setleds
try:
config_name = __name__.split('.')[1]
except IndexError:
print('Run run.py after config.')
exit()
def loop():
while True:
data = {}
for device in config.config[config_name]:
for uri in device['data']:
url = device['base_url'] + uri['uri']
if not uri['uri'] in data:
data[uri['uri']] = {}
try:
response = get(url, timeout=0.3)
status = response.status_code
except (ConnectionError, ChunkedEncodingError, ReadTimeout):
status = False
if status != 200:
# send "Disconnected" state
logging.debug('Can\'t connect to device')
data[uri['uri']][device['name']] = 'down'
continue
response_content = loads(response.content.decode('utf-8'))
logging.debug(response_content)
value = response_content[uri['key']]
logging.debug(f'Value: {value}')
data[uri['uri']][device['name']] = value
sleep(0.1)
setleds(config_name, data)
sleep(5)
def save():
device = dict()
device['name'] = 'Dimmer'
device['base_url'] = 'http://192.168.1.13/cm?user=admin&password=pw&cmnd='
device['data'] = list()
# change values if needed, only for adding to the config.json
device['data'].append(dict())
device['data'][-1]['uri'] = 'Power'
device['data'][-1]['key'] = 'POWER'
config.add(config_name, device)
logging.info('Saved.')
config = Config()
config.load(config_name)
# Change the values in save() and uncomment these two lines, run run.py *once* and comment them again
#save()
#exit()
tloop = Thread(target=loop)
tloop.start()
| StarcoderdataPython |
43330 | <reponame>Sumityg/Image-Classifier
import numpy as np
import torch
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
from torchvision import datasets,transforms,models
import torch.nn.functional as F
from collections import OrderedDict
import json
from torch.autograd import Variable
import argparse
import os
import sys
def agr_paser():
paser=argparse.ArgumentParser(description='trainer file')
paser.add_argument('--data_dir',type=str,default='flowers',help='dataset directory')
paser.add_argument('--gpu',type=bool,default='True',help='True:gpu,False:cpu')
paser.add_argument('--lr',type=float,default=0.001,help='learning rate')
paser.add_argument('--epochs',type=int,default=10,help='number of epochs')
paser.add_argument('--arch',type=str,default='vgg11',help='architecture')
paser.add_argument('--hidden_units',type=int,default=[600,200],help='hidden units for layer')
paser.add_argument('--save_dir',type=str,default='checkpoint.pth',help='save trained model to disk')
args=paser.parse_args()
return args
def process_data(train_dir,test_dir,valid_dir):
train_transforms =transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225))])
test_transforms=transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225))])
valid_transforms=transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225))])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data=datasets.ImageFolder(test_dir, transform=test_transforms)
valid_data=datasets.ImageFolder(valid_dir, transform=valid_transforms)
trainloader=torch.utils.data.DataLoader(train_data,batch_size=64,shuffle=True)
testloader=torch.utils.data.DataLoader(test_data,batch_size=64)
validloader=torch.utils.data.DataLoader(valid_data,batch_size=64)
return trainloader,testloader,validloader,train_data,test_data,valid_data
def basic_model(arch):
#Load the pretrained network
if arch==None or arch=='vgg16':
load_model=models.vgg16(pretrained=True)
print('Use vgg16')
elif arch=='densenet121':
load_model=models.densenet121(pretrained=True)
else:
print('Use vgg16 or densenet only defaulting to vgg16')
load_model=models.vgg16(pretrained=True)
return load_model
def set_classifier(model,hidden_units):
if hidden_units==None:
hidden_units=500
input=model.classifier[0].in_features
classifier=nn.Sequential(OrderedDict([('fc1', nn.Linear(input,hidden_units,bias=True)),
('relu1', nn.ReLU()),
('dropout',nn.Dropout(p=0.5)),
('fc2', nn.Linear(hidden_units, 128)),
('relu2', nn.ReLU()),
('dropout',nn.Dropout(p=0.5)),
('fc3', nn.Linear(128, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
return model
def train_model(epochs,trainloader,validloader,device,model,optimizer,criterion):
if type(epochs)==type(None):
epochs=10
print("Epochs=10")
train_losses,validation_losses=[],[]
model.to(device)
for e in range(epochs):
running_loss=0
for images,labels in trainloader:
images,labels = images.to(device),labels.to(device)
optimizer.zero_grad()
logps=model.forward(images)
loss=criterion(logps,labels)
loss.backward()
optimizer.step()
running_loss+=loss.item()
else:
validation_loss=0
accuracy=0
#Turning off the gradient for validation,saves memory and computations
with torch.no_grad():
for images,labels in validloader:
images,labels=images.to(device),labels.to(device)
logps=model.forward(images)
batch_loss=criterion(logps,labels)
validation_loss+=batch_loss.item()
ps=torch.exp(logps)
top_p,top_class=ps.topk(1,dim=1)
equals=top_class==labels.view(*top_class.shape)
accuracy+=torch.mean(equals.type(torch.FloatTensor)).item()
train_losses.append(running_loss/len(trainloader))
validation_losses.append(validation_loss/len(validloader))
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)),
"Validation Loss: {:.3f}.. ".format(validation_loss/len(validloader)),
"Validation Accuracy: {:.3f}".format(accuracy/len(validloader)))
return model
def valid_model(epochs,model,testloader,device,criterion):
if type(epochs)==type(None):
epochs=10
print("Epochs=10")
test_losses=[]
for e in range(epochs):
test_loss=0
accuracy=0
model.eval()
with torch.no_grad():
for images,labels in testloader:
images,labels=images.to(device),labels.to(device)
logps=model.forward(images)
batch_loss=criterion(logps,labels)
test_loss+=batch_loss.item()
ps=torch.exp(logps)
top_p,top_class=ps.topk(1,dim=1)
equals=top_class==labels.view(*top_class.shape)
accuracy+=torch.mean(equals.type(torch.FloatTensor)).item()
test_losses.append(test_loss/len(testloader))
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
def save_checkpoint(model,train_data,save_dir,arch):
model.class_to_idx=train_data.class_to_idx
checkpoint={'arch':'vgg16',
'classifier':model.classifier,
'state_dic':model.state_dict(),
'class_to_idx':model.class_to_idx}
return torch.save(checkpoint,save_dir)
def main():
args=agr_paser()
is_gpu=args.gpu
use_cuda=torch.cuda.is_available()
device=torch.device("cpu")
if is_gpu and use_cuda:
device=torch.device("cuda:0")
print(f"Device is set to {device}")
else:
device=torch.device("cpu")
print(f"Device is set to {device}")
data_dir=args.data_dir
train_dir=data_dir+'/train'
valid_dir=data_dir+'/valid'
test_dir=data_dir+'/test'
trainloader,testloader,validloader,train_data,test_data,valid_data=process_data(train_dir,test_dir,valid_dir)
model=basic_model(args.arch)
for param in model.parameters():
param.requires_grad=False
model=set_classifier(model,args.hidden_units)
criterion=nn.NLLLoss()
optimizer=optim.Adam(model.classifier.parameters(),lr=args.lr)
trmodel=train_model(args.epochs,trainloader,validloader,device,model,optimizer,criterion)
valid_model(args.epochs,trmodel,testloader,device,criterion)
save_checkpoint(trmodel,train_data,args.save_dir,args.arch)
print('Done')
if __name__=='__main__':main()
| StarcoderdataPython |
3396263 | #!/usr/bin/python
import sys, urllib2, json, tower_cli, os, datetime
import splunk.entity as entity
# Tower Connect
#
# This script is used as wrapper to connect to Ansible Tower API.
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "1.0"
#Securely retrieve Ansible Tower Credentials from Splunk REST API password endpoint
def getCredentials(sessionKey,realm):
myapp = 'alert_ansible_tower'
try:
# list all credentials
entities = entity.getEntities(['admin', 'passwords'], namespace=myapp,
owner='nobody', sessionKey=sessionKey)
except Exception, e:
log("Could not get %s credentials from splunk. Error: %s"
% (myapp, str(e)))
# return first set of credentials
for i, c in entities.items():
if c.get('realm') == realm:
return c['username'], c['clear_password']
log("ERROR: No credentials have been found")
#Connect to Tower and authenticate using user/pass to receive auth token.
def tower_auth(hostname,username,password):
try:
req = urllib2.Request(
url = 'https://' + hostname + '/api/v2/authtoken/',
headers = {
"Content-Type": "application/json"
},
data = json.dumps({
"username": username,
"password": password
})
)
response = urllib2.urlopen(req)
results = json.loads(response.read())
token = results['token']
return token
except urllib2.URLError as error:
log(error.reason)
def tower_launch(hostname,username,password,job_id,extra_vars):
#Authenticate to Ansible Tower and receive Auth Token.
token = tower_auth(hostname,username,password)
#Attempt to Launch Ansible Tower Job Template
try:
req = urllib2.Request(
url = 'https://' + hostname + '/api/v2/job_templates/' + job_id +'/launch/',
headers = {
"Content-Type": "application/json",
"authorization": 'Token ' + token
},
data = json.dumps({
"extra_vars": extra_vars
})
)
response = urllib2.urlopen(req)
results = json.loads(response.read())
log("Job ID: " + str(results['job']) + " submitted successfully.")
except urllib2.URLError as error:
log(error.reason)
#Logging Function
def log(settings):
f = open(os.path.join(os.environ["SPLUNK_HOME"], "var", "log", "splunk", "tower_api.log"), "a")
print >> f, str(datetime.datetime.now().isoformat()), settings
f.close()
def main(payload):
#Retrieve session key from payload to authenticate to Splunk REST API for secure credential retrieval
sessionKey = payload.get('session_key')
#Retrieve Ansible Tower Hostname from Payload configuration
hostname = payload['configuration'].get('hostname')
#Retrieve Ansible Tower Job Template ID from Payload configuration
job_id = payload['configuration'].get('job_id')
#Retrieve realm from Payload configuration
realm = payload['configuration'].get('realm')
#Retrieve Ansible Tower extra_vars Variable Name from Payload configuration
var_name = payload['configuration'].get('var_name')
#Retrieve Ansible Tower extra_vars Field to pull search value from Payload configuration
var_field = payload['configuration'].get('var_field')
#Retrieve Ansible Tower extra_vars value from Payload configuration
var_value = payload['result'].get(var_field)
#Assign extra_vars variable a value
extra_vars = str(var_name) + ": " + str(var_value)
#Retrive Ansible Tower Credentials from Splunk REST API
username, password = getCredentials(sessionKey,realm)
#Submit Ansible Tower Job
tower_launch(hostname,username,password,job_id,extra_vars)
if __name__ == "__main__":
# Check if script initiated with --execute
if len(sys.argv) < 2 or sys.argv[1] != "--execute":
print >> sys.stderr, "FATAL Unsupported execution mode (expected --execute flag)"
sys.exit(1)
else:
#Get Payload
payload = json.loads(sys.stdin.read())
log("Job Started")
#Pass Pass Payload to main function
main(payload)
| StarcoderdataPython |
186837 | <filename>tools_box/tools_box/report/computing_asset_inspection_checklist_report/computing_asset_inspection_checklist_report.py
# Copyright (c) 2013, <EMAIL> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
#Employee Computing Asst Type Status Remarks ##Employee Sign
def execute(filters=None):
columns, data = ["Employee::200","Computing Asset Type::200","Status::150","Remark::200"], []
status=""
if filters.get("status"):
status=""" and d.status = "{}" """.format(filters.get("status"))
data = frappe.db.sql("""select e.employee_name,d.computing_asset_type,d.status,d.remark
from `tabComputing Asset Inspection Item` d
join `tabComputing Asset Inspection Checklist` p on d.parent=p.name
join `tabEmployee` e on d.employee = e.name
where p.docstatus=1 and (p.date between "{}" and "{}") {} """.format(filters.get("from"),filters.get("to"),status),as_list=1)
return columns, data
| StarcoderdataPython |
3223547 | <filename>chatbotenv/lib/python2.7/site-packages/chatterbot/conversation/statement.py
# -*- coding: utf-8 -*-
from .response import Response
from datetime import datetime
class Statement(object):
"""
A statement represents a single spoken entity, sentence or
phrase that someone can say.
"""
def __init__(self, text, **kwargs):
# Try not to allow non-string types to be passed to statements
try:
text = str(text)
except UnicodeEncodeError:
pass
self.text = text
self.in_response_to = kwargs.pop('in_response_to', [])
# The date and time that this statement was created at
self.created_at = kwargs.pop('created_at', datetime.now())
self.extra_data = kwargs.pop('extra_data', {})
# This is the confidence with which the chat bot believes
# this is an accurate response. This value is set when the
# statement is returned by the chat bot.
self.confidence = 0
self.storage = None
def __str__(self):
return self.text
def __repr__(self):
return '<Statement text:%s>' % (self.text)
def __hash__(self):
return hash(self.text)
def __eq__(self, other):
if not other:
return False
if isinstance(other, Statement):
return self.text == other.text
return self.text == other
def save(self):
"""
Save the statement in the database.
"""
self.storage.update(self)
def add_extra_data(self, key, value):
"""
This method allows additional data to be stored on the statement object.
Typically this data is something that pertains just to this statement.
For example, a value stored here might be the tagged parts of speech for
each word in the statement text.
- key = 'pos_tags'
- value = [('Now', 'RB'), ('for', 'IN'), ('something', 'NN'), ('different', 'JJ')]
:param key: The key to use in the dictionary of extra data.
:type key: str
:param value: The value to set for the specified key.
"""
self.extra_data[key] = value
def add_response(self, response):
"""
Add the response to the list of statements that this statement is in response to.
If the response is already in the list, increment the occurrence count of that response.
:param response: The response to add.
:type response: `Response`
"""
if not isinstance(response, Response):
raise Statement.InvalidTypeException(
'A {} was recieved when a {} instance was expected'.format(
type(response),
type(Response(''))
)
)
updated = False
for index in range(0, len(self.in_response_to)):
if response.text == self.in_response_to[index].text:
self.in_response_to[index].occurrence += 1
updated = True
if not updated:
self.in_response_to.append(response)
def remove_response(self, response_text):
"""
Removes a response from the statement's response list based
on the value of the response text.
:param response_text: The text of the response to be removed.
:type response_text: str
"""
for response in self.in_response_to:
if response_text == response.text:
self.in_response_to.remove(response)
return True
return False
def get_response_count(self, statement):
"""
Find the number of times that the statement has been used
as a response to the current statement.
:param statement: The statement object to get the count for.
:type statement: `Statement`
:returns: Return the number of times the statement has been used as a response.
:rtype: int
"""
for response in self.in_response_to:
if statement.text == response.text:
return response.occurrence
return 0
def serialize(self):
"""
:returns: A dictionary representation of the statement object.
:rtype: dict
"""
data = {}
data['text'] = self.text
data['in_response_to'] = []
data['created_at'] = self.created_at
data['extra_data'] = self.extra_data
for response in self.in_response_to:
data['in_response_to'].append(response.serialize())
return data
@property
def response_statement_cache(self):
"""
This property is to allow ChatterBot Statement objects to
be swappable with Django Statement models.
"""
return self.in_response_to
class InvalidTypeException(Exception):
def __init__(self, value='Recieved an unexpected value type.'):
self.value = value
def __str__(self):
return repr(self.value)
| StarcoderdataPython |
1755749 | <filename>InvenTree/stock/test_views.py<gh_stars>0
""" Unit tests for Stock views (see views.py) """
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from common.models import InvenTreeSetting
import json
from datetime import datetime, timedelta
from InvenTree.status_codes import StockStatus
class StockViewTestCase(TestCase):
fixtures = [
'category',
'part',
'company',
'location',
'supplier_part',
'stock',
]
def setUp(self):
super().setUp()
# Create a user
user = get_user_model()
self.user = user.objects.create_user(
username='username',
email='<EMAIL>',
password='password'
)
self.user.is_staff = True
self.user.save()
# Put the user into a group with the correct permissions
group = Group.objects.create(name='mygroup')
self.user.groups.add(group)
# Give the group *all* the permissions!
for rule in group.rule_sets.all():
rule.can_view = True
rule.can_change = True
rule.can_add = True
rule.can_delete = True
rule.save()
self.client.login(username='username', password='password')
class StockListTest(StockViewTestCase):
""" Tests for Stock list views """
def test_stock_index(self):
response = self.client.get(reverse('stock-index'))
self.assertEqual(response.status_code, 200)
class StockLocationTest(StockViewTestCase):
""" Tests for StockLocation views """
def test_location_edit(self):
response = self.client.get(reverse('stock-location-edit', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_qr_code(self):
# Request the StockLocation QR view
response = self.client.get(reverse('stock-location-qr', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Test for an invalid StockLocation
response = self.client.get(reverse('stock-location-qr', args=(999,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_create(self):
# Test StockLocation creation view
response = self.client.get(reverse('stock-location-create'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Create with a parent
response = self.client.get(reverse('stock-location-create'), {'location': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Create with an invalid parent
response = self.client.get(reverse('stock-location-create'), {'location': 999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
class StockItemTest(StockViewTestCase):
"""" Tests for StockItem views """
def test_qr_code(self):
# QR code for a valid item
response = self.client.get(reverse('stock-item-qr', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# QR code for an invalid item
response = self.client.get(reverse('stock-item-qr', args=(9999,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_adjust_items(self):
url = reverse('stock-adjust')
# Move items
response = self.client.get(url, {'stock[]': [1, 2, 3, 4, 5], 'action': 'move'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Count part
response = self.client.get(url, {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Remove items
response = self.client.get(url, {'location': 1, 'action': 'take'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Add items
response = self.client.get(url, {'item': 1, 'action': 'add'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Blank response
response = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# TODO - Tests for POST data
def test_edit_item(self):
# Test edit view for StockItem
response = self.client.get(reverse('stock-item-edit', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Test with a non-purchaseable part
response = self.client.get(reverse('stock-item-edit', args=(100,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_create_item(self):
"""
Test creation of StockItem
"""
url = reverse('stock-item-create')
response = self.client.get(url, {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
response = self.client.get(url, {'part': 999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Copy from a valid item, valid location
response = self.client.get(url, {'location': 1, 'copy': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Copy from an invalid item, invalid location
response = self.client.get(url, {'location': 999, 'copy': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_create_stock_with_expiry(self):
"""
Test creation of stock item of a part with an expiry date.
The initial value for the "expiry_date" field should be pre-filled,
and should be in the future!
"""
# First, ensure that the expiry date feature is enabled!
InvenTreeSetting.set_setting('STOCK_ENABLE_EXPIRY', True, self.user)
url = reverse('stock-item-create')
response = self.client.get(url, {'part': 25}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# We are expecting 10 days in the future
expiry = datetime.now().date() + timedelta(10)
expected = f'name=\\\\"expiry_date\\\\" value=\\\\"{expiry.isoformat()}\\\\"'
self.assertIn(expected, str(response.content))
# Now check with a part which does *not* have a default expiry period
response = self.client.get(url, {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
expected = 'name=\\\\"expiry_date\\\\" placeholder=\\\\"\\\\"'
self.assertIn(expected, str(response.content))
def test_serialize_item(self):
# Test the serialization view
url = reverse('stock-item-serialize', args=(100,))
# GET the form
response = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data_valid = {
'quantity': 5,
'serial_numbers': '1-5',
'destination': 4,
'notes': 'Serializing stock test'
}
data_invalid = {
'quantity': 4,
'serial_numbers': 'dd-23-adf',
'destination': 'blorg'
}
# POST
response = self.client.post(url, data_valid, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertTrue(data['form_valid'])
# Try again to serialize with the same numbers
response = self.client.post(url, data_valid, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertFalse(data['form_valid'])
# POST with invalid data
response = self.client.post(url, data_invalid, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertFalse(data['form_valid'])
class StockOwnershipTest(StockViewTestCase):
""" Tests for stock ownership views """
def setUp(self):
""" Add another user for ownership tests """
super().setUp()
# Promote existing user with staff, admin and superuser statuses
self.user.is_staff = True
self.user.is_admin = True
self.user.is_superuser = True
self.user.save()
# Create a new user
user = get_user_model()
self.new_user = user.objects.create_user(
username='john',
email='<EMAIL>',
password='<PASSWORD>',
)
# Put the user into a new group with the correct permissions
group = Group.objects.create(name='new_group')
self.new_user.groups.add(group)
# Give the group *all* the permissions!
for rule in group.rule_sets.all():
rule.can_view = True
rule.can_change = True
rule.can_add = True
rule.can_delete = True
rule.save()
def enable_ownership(self):
# Enable stock location ownership
InvenTreeSetting.set_setting('STOCK_OWNERSHIP_CONTROL', True, self.user)
self.assertEqual(True, InvenTreeSetting.get_setting('STOCK_OWNERSHIP_CONTROL'))
def test_owner_control(self):
# Test stock location and item ownership
from .models import StockLocation, StockItem
from users.models import Owner
user_group = self.user.groups.all()[0]
user_group_owner = Owner.get_owner(user_group)
new_user_group = self.new_user.groups.all()[0]
new_user_group_owner = Owner.get_owner(new_user_group)
user_as_owner = Owner.get_owner(self.user)
new_user_as_owner = Owner.get_owner(self.new_user)
test_location_id = 4
test_item_id = 11
# Enable ownership control
self.enable_ownership()
# Set ownership on existing location
response = self.client.post(reverse('stock-location-edit', args=(test_location_id,)),
{'name': 'Office', 'owner': user_group_owner.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": true', status_code=200)
# Set ownership on existing item (and change location)
response = self.client.post(reverse('stock-item-edit', args=(test_item_id,)),
{'part': 1, 'status': StockStatus.OK, 'owner': user_as_owner.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": true', status_code=200)
# Logout
self.client.logout()
# Login with new user
self.client.login(username='john', password='<PASSWORD>')
# Test location edit
response = self.client.post(reverse('stock-location-edit', args=(test_location_id,)),
{'name': 'Office', 'owner': new_user_group_owner.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Make sure the location's owner is unchanged
location = StockLocation.objects.get(pk=test_location_id)
self.assertEqual(location.owner, user_group_owner)
# Test item edit
response = self.client.post(reverse('stock-item-edit', args=(test_item_id,)),
{'part': 1, 'status': StockStatus.OK, 'owner': new_user_as_owner.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Make sure the item's owner is unchanged
item = StockItem.objects.get(pk=test_item_id)
self.assertEqual(item.owner, user_as_owner)
# Create new parent location
parent_location = {
'name': '<NAME>',
'description': 'John\'s desk',
'owner': new_user_group_owner.pk,
}
# Create new parent location
response = self.client.post(reverse('stock-location-create'),
parent_location, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": true', status_code=200)
# Retrieve created location
parent_location = StockLocation.objects.get(name=parent_location['name'])
# Create new child location
new_location = {
'name': 'Upper Left Drawer',
'description': 'John\'s desk - Upper left drawer',
}
# Try to create new location with neither parent or owner
response = self.client.post(reverse('stock-location-create'),
new_location, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": false', status_code=200)
# Try to create new location with invalid owner
new_location['parent'] = parent_location.id
new_location['owner'] = user_group_owner.pk
response = self.client.post(reverse('stock-location-create'),
new_location, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": false', status_code=200)
# Try to create new location with valid owner
new_location['owner'] = new_user_group_owner.pk
response = self.client.post(reverse('stock-location-create'),
new_location, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": true', status_code=200)
# Retrieve created location
location_created = StockLocation.objects.get(name=new_location['name'])
# Create new item
new_item = {
'part': 25,
'location': location_created.pk,
'quantity': 123,
'status': StockStatus.OK,
}
# Try to create new item with no owner
response = self.client.post(reverse('stock-item-create'),
new_item, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": false', status_code=200)
# Try to create new item with invalid owner
new_item['owner'] = user_as_owner.pk
response = self.client.post(reverse('stock-item-create'),
new_item, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": false', status_code=200)
# Try to create new item with valid owner
new_item['owner'] = new_user_as_owner.pk
response = self.client.post(reverse('stock-item-create'),
new_item, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": true', status_code=200)
# Logout
self.client.logout()
# Login with admin
self.client.login(username='username', password='password')
# Switch owner of location
response = self.client.post(reverse('stock-location-edit', args=(location_created.pk,)),
{'name': new_location['name'], 'owner': user_group_owner.pk},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(response, '"form_valid": true', status_code=200)
# Check that owner was updated for item in this location
stock_item = StockItem.objects.all().last()
self.assertEqual(stock_item.owner, user_group_owner)
| StarcoderdataPython |
177712 | from setuptools import setup
setup(name='pymongo_smart_auth',
version='1.2.1',
description='This package extends PyMongo to provide built-in smart authentication.',
url='https://github.com/PLPeeters/PyMongo-Smart-Auth',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['pymongo_smart_auth'],
install_requires=[
'pymongo'
],
keywords=['mongo', 'pymongo', 'authentication', 'seamless'],
zip_safe=True)
| StarcoderdataPython |
1653365 | <reponame>dfiel/greenwavecontrol<gh_stars>1-10
import requests
import xmltodict
import urllib3
def grab_xml(host, token=None):
"""Grab XML data from Gateway, returned as a dict."""
urllib3.disable_warnings()
if token:
scheme = "https"
if not token:
scheme = "http"
token = "<PASSWORD>"
url = (
scheme + '://' + host + '/gwr/gop.php?cmd=GWRBatch&data=<gwrcmds><gwrcmd><gcmd>RoomGetCarousel</gcmd><gdata><gip><version>1</version><token>' + token + '</token><fields>name,status</fields></gip></gdata></gwrcmd></gwrcmds>&fmt=xml')
response = requests.get(url, verify=False)
parsed = xmltodict.parse(response.content, force_list={'room', 'device'})
parsed = parsed['gwrcmds']['gwrcmd']['gdata']['gip']['room']
return parsed
def set_brightness(host, did, value, token=None):
"""Set brightness of a bulb or fixture."""
urllib3.disable_warnings()
if token:
scheme = "https"
if not token:
scheme = "http"
token = "<PASSWORD>"
url = (
scheme + '://' + host + '/gwr/gop.php?cmd=DeviceSendCommand&data=<gip><version>1</version><token>' + token + '</token><did>' + did + '</did><value>' + str(
value) + '</value><type>level</type></gip>&fmt=xml')
response = requests.get(url, verify=False)
if response.status_code == '200':
return True
else:
return False
def hass_brightness(device):
"""Home Assistant logic for determining brightness"""
if 'level' in device:
level = int((int(device['level']) / 100) * 255)
return level
else:
return 0
def turn_on(host, did, token=None):
"""Turn on bulb or fixture"""
urllib3.disable_warnings()
if token:
scheme = "https"
if not token:
scheme = "http"
token = "<PASSWORD>"
url = (
scheme + '://' + host + '/gwr/gop.php?cmd=DeviceSendCommand&data=<gip><version>1</version><token>' + token + '</token><did>' + did + '</did><value>1</value></gip>&fmt=xml')
response = requests.get(url, verify=False)
if response.status_code == '200':
return True
else:
return False
def turn_off(host, did, token=None):
"""Turn off bulb or fixture"""
urllib3.disable_warnings()
if token:
scheme = "https"
if not token:
scheme = "http"
token = "<PASSWORD>"
url = (
scheme + '://' + host + '/gwr/gop.php?cmd=DeviceSendCommand&data=<gip><version>1</version><token>' + token + '</token><did>' + did + '</did><value>0</value></gip>&fmt=xml')
response = requests.get(url, verify=False)
if response.status_code == '200':
return True
else:
return False
def check_online(device):
"""Home Assistant Logic for Determining Device Availability"""
return 'offline' not in device
def grab_token(host, email, password):
"""Grab token from gateway. Press sync button before running."""
urllib3.disable_warnings()
url = ('https://' + host + '/gwr/gop.php?cmd=GWRLogin&data=<gip><version>1</version><email>' + str(email) + '</email><password>' + str(password) + '</password></gip>&fmt=xml')
response = requests.get(url, verify=False)
if '<rc>404</rc>' in response.text:
raise PermissionError('Not In Pairing Mode')
parsed = xmltodict.parse(response.content)
parsed = parsed['gip']['token']
return parsed
def grab_bulbs(host, token=None):
"""Grab XML, then add all bulbs to a dict. Removes room functionality"""
xml = grab_xml(host, token)
bulbs = {}
for room in xml:
for device in room['device']:
bulbs[int(device['did'])] = device
return bulbs | StarcoderdataPython |
12380 | #!/usr/bin/python3
"""
| --------------------- Py include <Mauro Baladés> ---------------------
| ___ _ _ _ __ _ _ ___ ____
| | |_) \ \_/ | | | |\ | / /` | | | | | | | \ | |_
| |_| |_| |_| |_| \| \_\_, |_|__ \_\_/ |_|_/ |_|__
| ----------------------------------------------------------------------
| MIT License
|
| Copyright (c) 2022 <NAME>
|
| Permission is hereby granted, free of charge, to any person obtaining a copy
| of this software and associated documentation files (the "Software"), to deal
| in the Software without restriction, including without limitation the rights
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
| copies of the Software, and to permit persons to whom the Software is
| furnished to do so, subject to the following conditions:
|
| The above copyright notice and this permission notice shall be included in all
| copies or substantial portions of the Software.
|
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
| SOFTWARE.
|
"""
from pathlib import Path
import sys
def _exec_modules(*args, **kwargs):
# Get locals from kwargs
local = kwargs.get("local", None)
# Check if local is None,
# because user did not define it.
if local is None:
raise Exception("Need to pass the local variable")
# Iterate every path that user gives as
# arguments (stored in *args).
for arg in args:
# Store the path into a
# platform specific-path
path = Path(arg)
# Open the file and get it's
# content
with open(path, "r") as f:
data = f.read()
# Execute the file content.
exec(data, globals(), local)
def _ret_modules(*args, **kwargs):
pass
def include(*args, **kwargs):
"""Here is where all the magic ocour. This function takes an
infinite amount of paths and they are being executend to
feel like user imported it.
Note:
It can also be used to store it into a variable if user
needs it. This can be done by adding the argument `ret`
to True (more detail in #Args).
Note:
Please note how (for the import statement) you will need a
`__init__.py` and paths separated by dots. With py-include,
you don't need. Py-include will make your path supported
by the current platform and it will open it's content and
execute it, so you don't need a path divided by `.` or
a `__init__.py`
Args:
files [list(str)]: A list of paths to include.
ret [bool]: If it is set to True, return the module (defaults to False).
Note:
If `ret` is set to `True`, the function will return all modules
as user will need to unpack them.
"""
# Get the value whether user whan't to execute
# the module or to return it. (defaults to False)
ret = kwargs.get("ret", False)
# Check if user inserted `ret` as True. If it not,
# we will open the file and execute it's content.
# If it is True, we will return the module they
# whanted to import.
if not ret:
_exec_modules(*args, **kwargs)
return _ret_modules(*args, **kwargs)
| StarcoderdataPython |
4828142 | <reponame>ldkrsi/GAE-py-framework
import importlib, glob
from os.path import dirname, basename, isfile
__all__ = []
for f in glob.glob(dirname(__file__)+"/*.py"):
if not isfile(f) or f.endswith('__init__.py'):
continue
name = basename(f)[:-3]
tmp = importlib.import_module('controllers.' + name)
globals()[name] = tmp.__dict__[name]
__all__.append(name)
| StarcoderdataPython |
74501 | #!/usr/bin/env python3
import argparse
import itertools
from sys import argv
from vang.bitbucket.api import call
from vang.core.core import pmap_unordered
def get_repos_page(project, limit, start):
response = call(f'/rest/api/1.0/projects/{project}'
f'/repos?limit={limit}&start={start}')
return response['size'], response['values'], response[
'isLastPage'], response.get('nextPageStart', -1)
def get_repos(project, only_name=False, only_spec=False):
limit = 25
start = 0
is_last_page = False
while not is_last_page:
size, values, is_last_page, start = get_repos_page(
project, limit, start)
if size:
for value in values:
if only_name:
yield value['slug']
elif only_spec:
yield (value['project']['key'], value['slug'])
else:
yield value
def get_all_repos(projects, max_processes=10, only_name=False, only_spec=False):
return itertools.chain.from_iterable(
pmap_unordered(
lambda p: get_repos(p, only_name, only_spec),
projects,
processes=max_processes))
def main(projects, name, repo_specs):
for repo in get_all_repos(projects, only_name=name, only_spec=repo_specs):
if repo_specs:
print(f'{repo[0]}/{repo[1]}')
else:
print(repo)
def parse_args(args):
parser = argparse.ArgumentParser(description='Get repos from Bitbucket')
parser.add_argument('projects', nargs='+', help='Project keys')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-n', '--name', help='Print only repo name', action='store_true')
group.add_argument(
'-r',
'--repo_specs',
help='Print only project_key/name',
action='store_true')
return parser.parse_args(args)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
| StarcoderdataPython |
1647149 | from transformers import RobertaTokenizerFast
from tokenizers.processors import BertProcessing
class RoBERTaTokenizer():
def __init__(self, dataset, vocab_size=30000, min_frequency=2, special_tokens, max_len=512):
self.tokenizer = RobertaTokenizerFast.
| StarcoderdataPython |
1735498 | import os
import json
import numpy as np
from ..default import API_SCHEMA_FILE
from .. import ErsiliaBase
class ApiSchema(ErsiliaBase):
def __init__(self, model_id, config_json):
ErsiliaBase.__init__(self, config_json=config_json)
self.model_id = model_id
self.schema_file = os.path.join(
self._model_path(self.model_id), API_SCHEMA_FILE
)
self.logger.debug("Schema available in {0}".format(self.schema_file))
def _features(self, o):
if o["meta"] is not None:
return o["meta"]
if o["type"] == "array":
shape = o["shape"]
else:
return None
assert len(shape) == 1 # TODO: work with arbitrary shape arrays/tensors
n = shape[0]
chars = len(str(n))
names = []
for i in range(n):
i = str(i).zfill(chars)
names += ["f{0}".format(i)]
return names
def isfile(self):
return os.path.isfile(self.schema_file)
def get(self):
with open(self.schema_file) as f:
data = json.load(f)
for api, sc in data.items():
for k, o in sc["output"].items():
data[api]["output"][k]["meta"] = self._features(o)
return data
@property
def schema(self):
return self.get()
def get_schema_by_api(self, api_name):
return self.schema[api_name]
def get_output_by_api(self, api_name):
return self.schema[api_name]["output"]
def is_h5_serializable(self, api_name):
schema = self.get_output_by_api(api_name)
for k, v in schema.items():
if v["type"] != "numeric" and v["type"] != "array": # TODO generalize
return False
return True
def get_meta_by_api(self, api_name):
sc = self.schema[api_name]["output"]
meta = {}
for k, v in sc.items():
meta[k] = v["meta"]
return meta
def get_meta(self):
sc = self.schema
meta = {}
for api, _ in sc.items():
meta_ = self.get_meta_by_api(api)
meta[api] = meta_
return meta
def get_apis(self):
return sorted(self.schema.keys())
def empty_by_field(self, field):
if field["type"] == "array":
shape = tuple(field["shape"])
return np.full(shape, None).tolist()
return None
def empty_input_by_api(self, api_name):
sc = self.schema[api_name]["input"]
d = {}
for k, v in sc.items():
d[k] = self.empty_by_field(v)
return d
def empty_output_by_api(self, api_name):
sc = self.schema[api_name]["output"]
d = {}
for k, v in sc.items():
d[k] = self.empty_by_field(v)
return d
def empty_by_api(self, api_name):
return {
"input": self.empty_input_by_api(api_name),
"output": self.empty_output_by_api(api_name),
}
def empty(self):
d = {}
for api_name in self.get_apis():
d[api_name] = self.empty_by_api(api_name)
return d
| StarcoderdataPython |
1665910 | <gh_stars>1-10
import datetime as dt
import src.core.callbacks as cl
import src.custom as custom
import src.utils as utils
from pathlib import Path
from typing import List, Optional
from src.core.state import State
from src.core.callbacks import Callback
class SubRunner:
signature = ""
group = "main"
def __init__(self, config: dict, state: State):
self.config = config
self.state = state
self.callbacks: List[Callback] = []
def _run_callbacks(self, phase="start", signature: Optional[str] = None):
assert phase in {"start", "end"}
signature = self.signature if signature is None else signature
method = "on_" + signature + "_" + phase
# add user defined callbacks
callbacks_in_group = self.state.callbacks.get(self.group)
if callbacks_in_group is None:
user_defined_callbacks = None
else:
user_defined_callbacks = self.state.callbacks[self.group].get(
signature)
callbacks = self.callbacks
if user_defined_callbacks is not None:
preset_callback_names = [
callback.__class__.__name__ for callback in callbacks
]
for callback in user_defined_callbacks:
if callback.__class__.__name__ in preset_callback_names:
# overwrite
index = preset_callback_names.index(
callback.__class__.__name__)
callbacks[index] = callback
else:
callbacks.append(callback)
for callback in sorted(callbacks):
callback._precondition(self.state)
callback.__getattribute__(method)(self.state)
def run(self):
raise NotImplementedError
class Runner:
def __init__(self, config: dict):
self.config = config
log_dir = Path(config["log_dir"])
log_dir.mkdir(exist_ok=True, parents=True)
config_name = Path(config["config_path"]).name.replace(".yml", "")
log_dir = log_dir / config_name
log_dir.mkdir(parents=True, exist_ok=True)
self.init_time = dt.datetime.now().strftime("%Y%m%d-%H:%M:%S")
log_name = self.init_time + ".log"
logger = utils.get_logger(str(log_dir / log_name))
self.state = State(config, logger)
def _prepare_directories(self):
feature_dir = Path(self.config["feature_dir"])
output_root_dir = Path(self.config["output_dir"])
feature_dir.mkdir(exist_ok=True, parents=True)
output_root_dir.mkdir(exist_ok=True, parents=True)
self.state.feature_dir = feature_dir
config_name = self.config["config_path"].split("/")[-1].replace(
".yml", "")
output_dir = (output_root_dir / self.init_time) / config_name
output_dir.mkdir(parents=True, exist_ok=True)
self.state.output_dir = output_dir
def _prepare_callbacks(self):
all_callbacks = self.config["callbacks"]
for group in all_callbacks.keys():
self.state.callbacks[group] = {}
callbacks_in_group = all_callbacks[group]
for callback_type in callbacks_in_group:
self.state.callbacks[group][callback_type] = []
callbacks = callbacks_in_group[callback_type]
for callback in callbacks:
callback_name = callback["name"]
callback_params = {} if callback.get(
"params") is None else callback["params"]
definetion = callback["definetion"]
if "custom" in definetion:
submodule = definetion.split(".")[1]
instance = custom.__getattribute__(
submodule).__getattribute__(
"callbacks").__getattribute__(callback_name)(
**callback_params)
self.state.callbacks[group][callback_type].append(
instance)
else:
instance = cl.__getattribute__(
callback_type).__getattribute__(callback_name)(
**callback_params)
self.state.callbacks[group][callback_type].append(
instance)
def run(self):
self._prepare_directories()
self._prepare_callbacks()
for pl in self.config["pipeline"]:
for key, value in pl.items():
state = State(value, logger=self.state.logger)
state.callbacks = self.state.callbacks
state.misc = self.state.misc
state.output_dir = self.state.output_dir
state.feature_dir = self.state.feature_dir
if key == "data":
from .data import DataRunner
runner = DataRunner(value, state)
runner.run()
self.state.dataframes = state.dataframes
self.state.data_stats = state.data_stats
self.state.dataframe_roles = state.dataframe_roles
self.state.target_name = state.target_name
self.state.id_columns = state.id_columns
self.state.connect_to = state.connect_to
self.state.connect_on = state.connect_on
else:
pass
self.state.misc = state.misc
| StarcoderdataPython |
1734571 | <filename>ceilingfox.py<gh_stars>0
import psycopg2, psycopg2.extras
import os
import configparser
import random
def ceiling_fox_post():
data = []
with open((os.path.join(os.path.dirname(__file__), 'blobfox')), 'r', encoding='utf-8') as emojilist:
data = emojilist.read().splitlines()
random.seed()
zahl = random.randint(0, len(data) - 1)
return "$[rotate.deg=180 :" + data[zahl] + ":]"
def ceiling_fox_story():
text = ""
data = []
with open((os.path.join(os.path.dirname(__file__), 'blobfox')), 'r', encoding='utf-8') as emojilist:
data = emojilist.read().splitlines()
random.seed()
zahl = random.randint(5, 10)
for _ in range(zahl):
emoji = random.randint(0, len(data) - 1)
text += "$[rotate.deg=180 :" + data[emoji] + ":]"
return text
def ceiling_fox_yes_no():
text = ""
random.seed()
coin = random.randint(0, 100)
if (coin <= 45):
text = "$[rotate.deg=180 :vlpnsayyes:]"
elif (coin > 45 and coin < 55):
text = "$[rotate.deg=180 :blobfoxconfused:]"
elif (coin >= 55):
text = "$[rotate.deg=180 :vlpnsayno:]"
else:
text = "$[rotate.deg=180 :vlpnsayyip:]"
return text
def ceiling_fox_number():
fox = ""
text = ""
random.seed()
emoji = random.randint(0, 1)
if emoji == 1:
fox = ":blobfoxsignnoublush:"
else:
fox = ":blobfoxsignnou:"
number = random.randint(0, 9)
text="$[rotate.deg=180 "+fox+"\n\(\\\\[-18mu]\)$[rotate.deg=5 \(\scriptsize\colorbox{white}{\hspace{6mu}\\textcolor{black}{"+str(number)+".}\hspace{6mu}}\)]]"
return text
def load_emojis():
text = []
dirname = ""
# Load Postgres configuration
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'bot.cfg'))
misskeydb = psycopg2.connect(
host=config.get("postgres", "host"),
database=config.get("postgres", "database"),
user=config.get("postgres", "user"),
password=config.get("postgres", "password"))
mkcur = misskeydb.cursor(cursor_factory=psycopg2.extras.DictCursor)
mkcur.execute('SELECT name FROM emoji WHERE host IS NULL;')
rows = mkcur.fetchall()
dir = os.path.split(__file__) # Array with two Elements
dirname = dir[0]
for row in rows:
if (row['name'].startswith("blobfox") or (row['name'].startswith("vlpnsay") and not row['name']=="vlpnsay" and not row['name']=="vlpnsaynervous" and not row['name']=="vlpnsayhappy") or row['name']=="flanfox" or row['name']=="foxjump" or row['name'].startswith("mcfoxspin")):
text.append(f"{row['name']}")
file = open(dirname + '/blobfox', 'w')
for line in text:
file.write(line + "\n")
file.close()
mkcur.close()
misskeydb.close()
| StarcoderdataPython |
3292018 | <gh_stars>0
import os
import openpyxl as xl
import pandas as pd
import xlrd
import xlwt
import xlsxwriter
import pathlib
rootpath = 'E:\\College\\Semester 6\\Files\\python\\files iterate\\Test Data'
header = []
folders = []
files =[]
for r, d, f in os.walk(rootpath):
for folder in d:
folders.append(os.path.join(r,folder))
for _ in folders:
for r, d, f in os.walk(_):
data = []
for file in f:
if '.xlsx' in file:
print("[+] " + os.path.join(r,file) + " --> Done!")
# print(pathlib.PurePath(os.path.join(r,file)).parent.name)
wb = xlrd.open_workbook(os.path.join(r,file))
sheet = wb.sheet_by_index(0)
n_row = sheet.nrows-1
n_column = sheet.ncols-1
header = sheet.row_values(0)
data.insert(len(data)+1,sheet.row_values(n_row))
#Writing !!
wb_write = xl.Workbook()
### set file name as foldername.xlsx!!
f_path = pathlib.PurePath(os.path.join(r,file)).parent.name+ ".xlsx"
sheet_write = wb_write.active
sheet_write.append(header)
for row_data in data:
sheet_write.append(row_data)
wb_write.save(f_path)
# for folder in d:
# # folders.append(os.path.join(r,folder))
# print(os.path.join(r,folder))
###---------------------for single directory
# wb = xlrd.open_workbook(rootdir)
# sheet = wb.sheet_by_index(0)
# n_row = sheet.nrows-1
# n_column = sheet.ncols-1
# header = sheet.row_values(0)
# data.insert(len(data)+1,sheet.row_values(n_row))
# data.insert(len(data)+1,sheet.row_values(n_row-1))
# #### Writing into file!!!
# wb_write = xl.Workbook()
# path = os.getcwd()
# f_path = path + "\\demo.xlsx"
# sheet_write = wb_write.active
# sheet_write.append(header)
# for row_data in data:
# sheet_write.append(row_data)
# wb_write.save(f_path)
###---------------------------------
| StarcoderdataPython |
128986 | <gh_stars>1-10
import sys
from config import Config
from device.DeviceManager import DeviceManager
from logger import initLogger
class App:
"""
The application class.
"""
def __init__(self):
"""
Contructor.
"""
logger = initLogger()
self.logger = logger.getLogger('APP')
self.logger.info('Initializing the app.')
self.config = Config(logger)
self.deviceMngr = DeviceManager(logger, self.config)
self.logger.info('App initialized.')
def run(self):
"""
Run the application.
"""
self.logger.info('Running the app.')
self.deviceMngr.startLoops()
def stop(self):
"""
Stop the application.
"""
self.logger.info('Stopping the app.')
self.deviceMngr.stopLoops()
if __name__ == '__main__':
app = App()
try:
app.run()
while True:
pass
except Exception:
app.stop()
sys.exit(0)
| StarcoderdataPython |
1667921 | <gh_stars>1-10
# 读取一行
f = open('a.txt')
content = f.readline() # 读取一行
# print(content)
# 通过循环的方式读取一行
while len(content) > 0:
print(content, end="")
content = f.readline() # 接着读
f.close() # 读完关闭 | StarcoderdataPython |
1716031 | #!/usr/bin/python3
# encoding: utf-8
import asyncio
import threading
import time
import ArmController as controller #舵机转动
import random
import websockets
# 机械臂位置校准
def Arm_Pos_Corr():
controller.setServo(1, 1200, 500)
controller.setServo(2, 500, 500)
time.sleep(1)
def get_arm_pos():
while True:
pos = []
for i in [0,2,3,4]:
pos.append(controller.Servos[i].getPosition())
print(pos)
time.sleep(1.0 / 30) # 30 帧/秒
def animate_arm():
while True:
spend = 2000 + random.randint(-20, 20) * 40
# bottom = controller.Servos[4].getPosition() + random.randint(-20, 20) * 20
# middle = controller.Servos[3].getPosition() + random.randint(-20, 20) * 20
# head = controller.Servos[2].getPosition() + random.randint(-20, 20) * 20
# clow = controller.Servos[0].getPosition() + random.randint(-20, 20) * 20
controller.setServo(1, controller.Servos[0].getPosition() + random.randint(-20, 20) * 20, spend)
controller.setServo(3, controller.Servos[2].getPosition() + random.randint(-20, 20) * 20, spend)
controller.setServo(4, controller.Servos[3].getPosition() + random.randint(-20, 20) * 20, spend)
controller.setServo(5, controller.Servos[4].getPosition() + random.randint(-20, 20) * 20, spend)
time.sleep((spend + 100) / 1000)
if __name__ == "__main__":
print("start")
controller.initLeArm([0,0,0,0,0,0])
time.sleep(1)
Arm_Pos_Corr()
get_pos = threading.Thread(target=get_arm_pos)
animate = threading.Thread(target=animate_arm)
# get_pos.setDaemon(True)
# animate.setDaemon(True)
get_pos.start()
animate.start()
get_pos.join()
animate.join()
print("end")
| StarcoderdataPython |
3220111 | <reponame>tranquilitybase-io/tb-aws-dac
# Supports all actions concerning applications
import json
from pprint import pformat
from celery import states
from celery.result import AsyncResult
from flask import abort
import config
from gcpdac.application_ci import create_application, delete_application
from gcpdac.celery_tasks import deploy_application_task, destroy_application_task
logger = config.logger
def create(applicationDetails):
logger.debug(pformat(applicationDetails))
result = create_application(applicationDetails)
if result.get("tf_return_code") == 0:
return result, 201
else:
abort(500, "Failed to deploy your application")
def delete(oid):
logger.debug("Id is {}".format(oid))
applicationDetails = {"id": oid}
result = delete_application(applicationDetails)
if result.get("tf_return_code") == 0:
return {}, 200
else:
abort(500, "Failed to delete your application")
def create_async(applicationDetails):
logger.debug(pformat(applicationDetails))
result = deploy_application_task.delay(applicationDetails=applicationDetails)
logger.info("Task ID %s", result.task_id)
context = {"taskid": result.task_id}
# TODO handle celery failure
success = True
if success == True:
return context, 201
else:
abort(500, "Failed to create your application")
def delete_async(oid):
logger.debug("Id is {}".format(oid))
applicationDetails = {"id": oid}
result = destroy_application_task.delay(applicationDetails=applicationDetails)
logger.info("Task ID %s", result.task_id)
context = {"taskid": result.task_id}
# TODO handle celery failure
success = True
if success == True:
return context, 201
else:
abort(500, "Failed to delete your application")
def create_application_result(taskid):
logger.info("CREATE application RESULT %s", format(taskid))
status = AsyncResult(taskid).status
if status == states.SUCCESS or status == states.FAILURE:
retval = AsyncResult(taskid).get(timeout=1.0)
return_code = retval["return_code"]
# tf_outputs = retval["tf_outputs"]
if return_code > 0:
status = states.FAILURE
payload = {}
else:
payload = {}
return {'status': status, "payload": json.dumps(payload)}
else:
return {'status': status}
def delete_application_result(taskid):
logger.info("DELETE application RESULT %s", format(taskid))
status = AsyncResult(taskid).status
if status == states.SUCCESS or status == states.FAILURE:
retval = AsyncResult(taskid).get(timeout=1.0)
return_code = retval["return_code"]
if return_code > 0:
status = states.FAILURE
return {'status': status, "return_code": return_code}
else:
return {'status': status}
| StarcoderdataPython |
3294481 | <filename>var/spack/repos/builtin/packages/sicm/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sicm(CMakePackage):
"""SICM: Simplified Interface to Complex Memory."""
homepage = "https://github.com/lanl/SICM/"
git = "https://github.com/lanl/SICM.git"
maintainers = []
version('master')
depends_on('jemalloc jemalloc_prefix=je_')
depends_on('numactl')
def cmake_args(self):
return []
| StarcoderdataPython |
32819 | <gh_stars>1-10
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
# Copyright 2018 Sonus Networks, Inc. (d.b.a. Ribbon Communications Operating Company)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import errno as errno
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
import azurelinuxagent.common.conf as conf
from azurelinuxagent.common.exception import ResourceDiskError
from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler
class OpenWRTResourceDiskHandler(ResourceDiskHandler):
def __init__(self):
super(OpenWRTResourceDiskHandler, self).__init__()
# Fase File System (FFS) is UFS
if self.fs == 'ufs' or self.fs == 'ufs2':
self.fs = 'ffs'
def reread_partition_table(self, device):
ret, output = shellutil.run_get_output("hdparm -z {0}".format(device), chk_err=False)
if ret != 0:
logger.warn("Failed refresh the partition table.")
def mount_resource_disk(self, mount_point):
device = self.osutil.device_for_ide_port(1)
if device is None:
raise ResourceDiskError("unable to detect disk topology")
logger.info('Resource disk device {0} found.', device)
# 2. Get partition
device = "/dev/{0}".format(device)
partition = device + "1"
logger.info('Resource disk partition {0} found.', partition)
# 3. Mount partition
mount_list = shellutil.run_get_output("mount")[1]
existing = self.osutil.get_mount_point(mount_list, device)
if existing:
logger.info("Resource disk [{0}] is already mounted [{1}]",
partition,
existing)
return existing
try:
fileutil.mkdir(mount_point, mode=0o755)
except OSError as ose:
msg = "Failed to create mount point " \
"directory [{0}]: {1}".format(mount_point, ose)
logger.error(msg)
raise ResourceDiskError(msg=msg, inner=ose)
force_option = 'F'
if self.fs == 'xfs':
force_option = 'f'
mkfs_string = "mkfs.{0} -{2} {1}".format(self.fs, partition, force_option)
# Compare to the Default mount_resource_disk, we don't check for GPT that is not supported on OpenWRT
ret = self.change_partition_type(suppress_message=True, option_str="{0} 1 -n".format(device))
ptype = ret[1].strip()
if ptype == "7" and self.fs != "ntfs":
logger.info("The partition is formatted with ntfs, updating "
"partition type to 83")
self.change_partition_type(suppress_message=False, option_str="{0} 1 83".format(device))
self.reread_partition_table(device)
logger.info("Format partition [{0}]", mkfs_string)
shellutil.run(mkfs_string)
else:
logger.info("The partition type is {0}", ptype)
mount_options = conf.get_resourcedisk_mountoptions()
mount_string = self.get_mount_string(mount_options,
partition,
mount_point)
attempts = 5
while not os.path.exists(partition) and attempts > 0:
logger.info("Waiting for partition [{0}], {1} attempts remaining",
partition,
attempts)
sleep(5)
attempts -= 1
if not os.path.exists(partition):
raise ResourceDiskError("Partition was not created [{0}]".format(partition))
if os.path.ismount(mount_point):
logger.warn("Disk is already mounted on {0}", mount_point)
else:
# Some kernels seem to issue an async partition re-read after a
# command invocation. This causes mount to fail if the
# partition re-read is not complete by the time mount is
# attempted. Seen in CentOS 7.2. Force a sequential re-read of
# the partition and try mounting.
logger.info("Mounting after re-reading partition info.")
self.reread_partition_table(device)
logger.info("Mount resource disk [{0}]", mount_string)
ret, output = shellutil.run_get_output(mount_string)
if ret:
logger.warn("Failed to mount resource disk. "
"Attempting to format and retry mount. [{0}]",
output)
shellutil.run(mkfs_string)
ret, output = shellutil.run_get_output(mount_string)
if ret:
raise ResourceDiskError("Could not mount {0} "
"after syncing partition table: "
"[{1}] {2}".format(partition,
ret,
output))
logger.info("Resource disk {0} is mounted at {1} with {2}",
device,
mount_point,
self.fs)
return mount_point
| StarcoderdataPython |
1777886 | <gh_stars>0
# Librerias Django
from django.contrib.auth import views as auth_views
from django.urls import path
# Librerias en carpetas locales
from .subviews.pos import (
DeletePos, PosCreateView, PosDetailView, PosListView, PosUpdateView)
urlpatterns = [
path('pos', PosListView.as_view(), name='pos'),
path('pos/add/', PosCreateView.as_view(), name='pos-add'),
path('pos/<int:pk>/', PosDetailView.as_view(), name='pos-detail'),
path('pos/<int:pk>/update', PosUpdateView.as_view(), name='pos-update'),
path('pos/<int:pk>/delete/', DeletePos, name='pos-delete'),
]
| StarcoderdataPython |
3290914 | <filename>intranet/org/management/commands/send_diary.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from django.core.management.base import BaseCommand
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from django.template import Context
from django.template.loader import get_template
from django.contrib.sites.models import Site
from django.core.urlresolvers import set_script_prefix
from django.core.urlresolvers import clear_script_prefix
from intranet.org.models import Scratchpad, Diary, Lend, Event
class Command(BaseCommand):
args = "<date> in format dd.mm.yyyy"
help = "Sends daily email repot about intranet changes"
def handle(self, *args, **options):
if args:
interested_datetime = datetime.datetime.strptime(args[0], '%d.%m.%Y')
else:
# yesterday
interested_datetime = datetime.date.today() - datetime.timedelta(1)
subject = 'Kiberpipa, dnevno porocilo: %d. %d. %d' % (interested_datetime.day, interested_datetime.month, interested_datetime.year)
diaries = Diary.objects.filter(pub_date__year=interested_datetime.year, pub_date__month=interested_datetime.month, pub_date__day=interested_datetime.day)
try:
scratchpad = Scratchpad.objects.all()[0].content
except Scratchpad.DoesNotExist:
pass
lends = Lend.objects.filter(returned=False)
# warnings for events:
# today and tomorrow
events = Event.objects.get_date_events(
datetime.datetime(interested_datetime.year, interested_datetime.month, interested_datetime.day + 1, 0, 0),
datetime.datetime(interested_datetime.year, interested_datetime.month, interested_datetime.day + 3, 0, 0)
)
# no technician
no_tech = events.filter(require_technician__exact=True).filter(technician__isnull=True)
# no officers on duty
no_responsible = events.filter(require_officers_on_duty__exact=True).filter(officers_on_duty__isnull=True)
if diaries or no_tech or no_responsible:
pass
else:
print "nothing to send"
return
# this causes url handling to force absolute urls
url = "https://%s/" % Site.objects.get_current().domain
set_script_prefix(url)
try:
text = get_template('mail/diary_report.txt').render(Context(locals()))
html = get_template('mail/diary_report.html').render(Context(locals()))
email = EmailMultiAlternatives(subject, text, settings.DEFAULT_FROM_EMAIL, ['<EMAIL>'])
email.attach_alternative(html, 'text/html')
email.send()
print "email sent"
finally:
# set_script_prefix is global for current thread
clear_script_prefix()
| StarcoderdataPython |
3293343 | <reponame>soybean217/lora-python
#! /usr/bin/env python
#-*- coding:utf-8 -*-
import os
import sys
import time
import socket
# import crc16
from binascii import unhexlify
from binascii import hexlify
import crcmod
import codecs
def doConnect(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
except:
pass
return sock
header = {}
# 地磁数据报文 0x20
# 主机状态报文 0x21
header['type'] = b'\x20'
header['host_code_machine_id'] = 3
# B 代表市场展示;D 代表销售订单;E 代表测试项目
header['host_code_sale_type'] = b'\x0d'
header['host_code_sale_year'] = 17
header['host_code_project_id'] = 1
sensorBody = {}
sensorBody['position_id'] = 1
sensorBody['sensor_state'] = 1
sensorBody['park_count'] = 1
sensorBody['voltage'] = 1
sensorBody['reserved_field'] = 0
heartbeatBody = {}
heartbeatBody['alarm'] = 0
heartbeatBody['voltage'] = 13000
heartbeatBody['reserved'] = 0
crc16 = crcmod.mkCrcFun(0x18005, rev=True, initCrc=0xFFFF, xorOut=0x0000)
def procHeaderer():
return header['host_code_machine_id'].to_bytes(4, byteorder='little') + header['host_code_project_id'].to_bytes(3, byteorder='little') + header['host_code_sale_year'].to_bytes(2, byteorder='big') + header['host_code_sale_type'] + int(round(time.time())).to_bytes(8, byteorder='little')
def procSensor():
tmpHeader = b'\x20' + procHeaderer()
tmpBody = heartbeatBody['alarm'].to_bytes(1, byteorder='little') + sensorBody['sensor_state'].to_bytes(
4, byteorder='little') + sensorBody['park_count'].to_bytes(2, byteorder='little') + sensorBody['voltage'].to_bytes(1, byteorder='little') + sensorBody['reserved_field'].to_bytes(7, byteorder='little')
crc = crc16(tmpHeader + tmpBody)
return tmpHeader + tmpBody + crc.to_bytes(2, byteorder='big')
def procHeartbeat():
tmpHeader = b'\x21' + procHeaderer()
tmpBody = sensorBody['position_id'].to_bytes(1, byteorder='little') + heartbeatBody[
'voltage'].to_bytes(2, byteorder='little') + heartbeatBody['reserved'].to_bytes(1, byteorder='big')
crc = crc16(tmpHeader + tmpBody)
# crc = crc16.crc16xmodem(tmpHeader)
return tmpHeader + tmpBody + crc.to_bytes(2, byteorder='big')
def main():
host, port = "testgeo.bolinparking.com", 12366
sockLocal = doConnect(host, port)
while True:
try:
msg = procHeartbeat()
msg = procSensor()
sockLocal.send(msg)
print("send msg ok : ", len(msg))
print("send msg ok : ", hexlify(msg).decode())
recv = sockLocal.recv(1024)
print("recv data :", recv)
print("recv data :", hexlify(recv).decode())
except socket.error:
print("socket error,do reconnect ")
time.sleep(3)
sockLocal = doConnect(host, port)
# except:
# print('other error occur ')
# time.sleep(3)
time.sleep(1)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1612583 | <reponame>sburden-group/pareto_leg_hardware
from time import perf_counter
from motor_control_process import MotorControl
from leg_controllers.designs import Params
import yaml
import keyboard
import argparse
parser = argparse.ArgumentParser(
prog = "Foo Bar Baz",
description = "Foo Bar Baz",
)
parser.add_argument('design', type=str, help="YAML file containing the design parameters")
parser.add_argument('motor_config', type=str, help="YAML file containing motor calibration data (calibration position, measurement, axis sign")
if __name__ == "__main__":
try:
print("FOO")
args = parser.parse_args()
leg_params = None
with open(args.design,"r") as file:
leg_params = Params(*((yaml.load(file,yaml.Loader)).values()))
motor_config = None
with open(args.motor_config) as file:
motor_config = yaml.load(file,yaml.Loader)
motorcontrol = MotorControl(leg_params, motor_config)
keyboard.add_hotkey('alt+q',lambda: motorcontrol.stop())
motorcontrol.start()
start_time = perf_counter()
while motorcontrol.is_alive():
if motorcontrol.poll_err():
print(motorcontrol.recv_err())
elif motorcontrol.poll_msg():
print(motorcontrol.recv_msg())
except:
print("Shutting down!")
motorcontrol.stop()
motorcontrol.join(timeout=10.)
motorcontrol.terminate() | StarcoderdataPython |
3367397 | <reponame>messiaen/csv2md
import sys
import argparse
import csv
def csv_to_table(file, delimiter=',', quotechar='"'):
try:
return list(csv.reader(file, delimiter=delimiter, quotechar=quotechar))
except csv.Error as e:
print(e, file=sys.stderr)
print('Something went wrong...')
print('Exiting...')
sys.exit(1)
def get_table_widths(table):
table_lengths = [[len(cell) for cell in row] for row in table]
return list(map(max, zip(*table_lengths)))
def table_to_md(table):
table_widths = get_table_widths(table)
md_table = ['| ' + ' | '.join([cell.ljust(width) for cell, width in zip(row, table_widths)]) + ' |'
for row in table]
md_table.insert(1, '| ' + ' | '.join(['-' * width for width in table_widths]) + ' |')
return '\n'.join(md_table)
def main():
parser = argparse.ArgumentParser(description='Parse CSV files into Markdown tables.')
parser.add_argument('files', metavar='CSV_FILE', type=argparse.FileType('r'), nargs='*',
help='One or more CSV files to parse')
parser.add_argument('-d', '--delimiter', metavar='DELIMITER', type=str, default=',',
help='delimiter character. Default is \',\'')
parser.add_argument('-q', '--quotechar', metavar='QUOTECHAR', type=str, default='"',
help='quotation character. Default is \'"\'')
args = parser.parse_args()
if not args.files:
print(table_to_md(csv_to_table(sys.stdin, delimiter=args.delimiter, quotechar=args.quotechar)))
else:
for file in args.files:
print(table_to_md(csv_to_table(file, delimiter=args.delimiter, quotechar=args.quotechar)))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1683975 | <reponame>jiahuanluo/attention-is-all-you-need-pytorch
''' Translate input text with trained model. '''
import torch
import argparse
import dill as pickle
from tqdm import tqdm
import transformer.Constants as Constants
from torchtext.data import Dataset
from transformer.Models import Transformer
from transformer.Translator import Translator
from torch.utils.data import DataLoader
import utils
def prepare_mydataloaders(opt, device):
data = pickle.load(open(opt.data_pkl, 'rb'))
opt.max_token_seq_len = 140
opt.src_pad_idx = data['dict']['src'].labelToIdx[Constants.PAD_WORD]
opt.trg_pad_idx = data['dict']['tgt'].labelToIdx[Constants.PAD_WORD]
opt.trg_bos_idx = data['dict']['tgt'].labelToIdx[Constants.BOS_WORD]
opt.trg_eos_idx = data['dict']['tgt'].labelToIdx[Constants.EOS_WORD]
opt.unk_idx = 1
opt.src_vocab_size = len(data['dict']['src'].labelToIdx)
opt.trg_vocab_size = len(data['dict']['tgt'].labelToIdx)
# ========= Preparing Model =========#
# if opt.embs_share_weight:
# assert data['dict']['src'].labelToIdx == data['dict']['tgt'].labelToIdx, \
# 'To sharing word embedding the src/trg word2idx table shall be the same.'
testset = utils.BiDataset(data['test'])
testloader = torch.utils.data.DataLoader(dataset=testset,
batch_size=1,
shuffle=False,
collate_fn=utils.padding)
return data['dict']['tgt'], testloader
def load_model(opt, device):
checkpoint = torch.load(opt.model, map_location=device)
model_opt = checkpoint['settings']
model = Transformer(
model_opt.src_vocab_size,
model_opt.trg_vocab_size,
model_opt.src_pad_idx,
model_opt.trg_pad_idx,
trg_emb_prj_weight_sharing=model_opt.proj_share_weight,
emb_src_trg_weight_sharing=model_opt.embs_share_weight,
d_k=model_opt.d_k,
d_v=model_opt.d_v,
d_model=model_opt.d_model,
d_word_vec=model_opt.d_word_vec,
d_inner=model_opt.d_inner_hid,
n_layers=model_opt.n_layers,
n_head=model_opt.n_head,
dropout=model_opt.dropout).to(device)
model.load_state_dict(checkpoint['model'])
print('[Info] Trained model state loaded.')
return model
def main():
'''Main Function'''
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model weight file')
parser.add_argument('-data_pkl', required=True,
help='Pickle file with both instances and vocabulary.')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5)
parser.add_argument('-max_seq_len', type=int, default=100)
parser.add_argument('-no_cuda', action='store_true')
# TODO: Translate bpe encoded files
# parser.add_argument('-src', required=True,
# help='Source sequence to decode (one line per sequence)')
# parser.add_argument('-vocab', required=True,
# help='Source sequence to decode (one line per sequence)')
# TODO: Batch translation
# parser.add_argument('-batch_size', type=int, default=30,
# help='Batch size')
# parser.add_argument('-n_best', type=int, default=1,
# help="""If verbose is set, will output the n_best
# decoded sentences""")
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
# data = pickle.load(open(opt.data_pkl, 'rb'))
# SRC, TRG = data['vocab']['src'], data['vocab']['trg']
# opt.src_pad_idx = SRC.vocab.stoi[Constants.PAD_WORD]
# opt.trg_pad_idx = TRG.vocab.stoi[Constants.PAD_WORD]
# opt.trg_bos_idx = TRG.vocab.stoi[Constants.BOS_WORD]
# opt.trg_eos_idx = TRG.vocab.stoi[Constants.EOS_WORD]
# test_loader = Dataset(examples=data['test'], fields={'src': SRC, 'trg': TRG})
device = torch.device('cuda' if opt.cuda else 'cpu')
TRG, test_loader = prepare_mydataloaders(opt, device)
translator = Translator(
model=load_model(opt, device),
beam_size=opt.beam_size,
max_seq_len=opt.max_seq_len,
src_pad_idx=opt.src_pad_idx,
trg_pad_idx=opt.trg_pad_idx,
trg_bos_idx=opt.trg_bos_idx,
trg_eos_idx=opt.trg_eos_idx).to(device)
with open(opt.output, 'w+', encoding='utf-8') as f:
for example in tqdm(test_loader, mininterval=2, desc=' - (Test)', leave=False):
sec_seq = example[0].view(1, -1)
pred_seq = translator.translate_sentence(sec_seq.to(device))
pred_line = ' '.join(TRG.idxToLabel[idx] for idx in pred_seq)
pred_line = pred_line.replace(Constants.BOS_WORD, '').replace(Constants.EOS_WORD, '\n')
f.write(pred_line)
print('[Info] Finished.')
if __name__ == "__main__":
'''
Usage: python translate.py -model trained.chkpt -data multi30k.pt -no_cuda
'''
main()
| StarcoderdataPython |
29144 | # -*- coding: utf-8 -*-
# https://github.com/Kodi-vStream/venom-xbmc-addons
import xbmcaddon, xbmcgui, xbmc
"""System d'importation
from resources.lib.comaddon import addon, dialog, VSlog, xbmcgui, xbmc
"""
"""
from resources.lib.comaddon import addon
addons = addon() en haut de page.
utiliser une fonction comaddon ou xbmcaddon
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcaddon.html
addons.VSlang(30305)
addons.getLocalizedString(30305)
addons.openSettings()
utiliser la fonction avec un autre addon
addons2 = addon('plugin.video.youtube')
addons2.openSettings()
"""
class addon(xbmcaddon.Addon):
#def __init__(self, id='plugin.video.vstream'):
# xbmcaddon.__init__(id)
# pass
def VSlang(self, lang):
return xbmc.translatePath(self.getLocalizedString(lang))
#xbmcaddon.Addon('plugin.video.vstream').getLocalizedString(lang))
#Bug avec accent xbmc.translatePath(xbmcaddon.Addon('plugin.video.vstream').getLocalizedString(lang)).decode('utf-8')
#deprecier utiliser addons.setSetting et addons.getSetting
def VSsetting(self, name, value = False):
#addons = addon()
#use addons.setting('name') pour getsetting
#use addons.setting('name', 'value) pour setsetting
if value:
return self.setSetting(name, value)
else:
return self.getSetting(name)
"""
from resources.lib.comaddon import dialog
ne peux pas utiliser les autres fonction que dialog
dialogs = dialog()
dialogs.VSinfo('test')
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#Dialog
"""
class dialog(xbmcgui.Dialog):
#def __init__(self):
# xbmcgui.__init__('')
# pass
def VSok(self, desc, title = 'vStream'):
dialog = self.ok(title, desc)
return dialog
def VSyesno(self, desc, title = 'vStream'):
dialog = self.yesno(title, desc)
return dialog
def VSselect(self, desc, title = 'vStream'):
ret = self.select(title, desc)
return ret
def VSselectqual(self, list_qual, list_url):
if len(list_url) == 0:
return ''
if len(list_url) == 1:
return list_url[0]
ret = self.select(addon().VSlang(30448), list_qual)
if ret > -1:
return list_url[ret]
return ''
def VSinfo(self, desc, title = 'vStream', iseconds = 0, sound = False):
if (iseconds == 0):
iseconds = 1000
else:
iseconds = iseconds * 1000
if (addon().getSetting('Block_Noti_sound') == 'true'):
sound = True
return self.notification(str(title), str(desc), xbmcgui.NOTIFICATION_INFO, iseconds, sound)
def VSerror(self, e):
return self.notification('vStream', 'Erreur: ' + str(e), xbmcgui.NOTIFICATION_ERROR, 2000), VSlog('Erreur: ' + str(e))
"""
from resources.lib.comaddon import progress
progress_ = progress()
progress_.VScreate(SITE_NAME)
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
progress_.VSclose(progress_)
dialog = progress() non recommander
progress = progress() non recommander
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#DialogProgress
"""
COUNT = 0
DIALOG2 = None
class empty():
def VSupdate(self, dialog, total, text = '', search = False):
pass
def iscanceled(self):
pass
def VSclose(self, dialog):
pass
class progress(xbmcgui.DialogProgress):
def VScreate(self, title = 'vStream', desc = ''):
global DIALOG2
currentWindow = xbmcgui.getCurrentWindowId()
if currentWindow == 10000:
return empty()
if DIALOG2 == None:
self.create(title, desc)
VSlog('create dialog')
DIALOG2 = self
return self
else:
return DIALOG2
def VSupdate(self, dialog, total, text = '', search = False):
if not search and window(10101).getProperty('search') == 'true':
return
global COUNT
COUNT += 1
iPercent = int(float(COUNT * 100) / total)
dialog.update(iPercent, 'Loading: ' + str(COUNT) + '/' + str(total), text)
def VSclose(self, dialog = ''):
if not dialog and DIALOG2:
dialog = DIALOG2
if not dialog:
return
if window(10101).getProperty('search') == 'true':
return
dialog.close()
VSlog('close dialog')
del dialog
return False
"""
from resources.lib.comaddon import window
window(10101).getProperty('test')
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#Window
"""
class window(xbmcgui.Window):
def __init__(self, id):
pass
"""
from resources.lib.comaddon import listitem
listitem.setLabel('test')
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#ListItem
"""
class listitem(xbmcgui.ListItem):
#ListItem([label, label2, iconImage, thumbnailImage, path])
def __init__(self, label = '', label2 = '', iconImage = '', thumbnailImage = '', path = ''):
pass
"""
from resources.lib.comaddon import VSlog
VSlog('testtttttttttttt')
ou
xbmc.log
"""
#xbmc des fonctions pas des class
def VSlog(e, level = xbmc.LOGDEBUG):
#rapelle l'ID de l'addon pour être appelé hors addon
if (addon('plugin.video.vstream').getSetting('debug') == 'true'):
level = xbmc.LOGNOTICE
return xbmc.log('\t[PLUGIN] vStream: ' + str(e), level)
def VSupdate():
return xbmc.executebuiltin('Container.Refresh')
def VSshow_busy():
xbmc.executebuiltin('ActivateWindow(busydialog)')
def VShide_busy():
xbmc.executebuiltin('Dialog.Close(busydialog)')
while xbmc.getCondVisibility('Window.IsActive(busydialog)'):
xbmc.sleep(100)
def isKrypton():
try:
version = xbmc.getInfoLabel('system.buildversion')
if version[0:2] >= '17':
return True
else:
return False
except:
return False
def VSread(sHtmlContent):
import xbmcvfs
file = 'special://userdata/addon_data/plugin.video.vstream/html.txt'
if xbmcvfs.exists(file):
xbmcvfs.delete(file)
f = xbmcvfs.File (file, 'w')
result = f.write(sHtmlContent)
f.close()
#use cGui.showKeyBoard
def VSkeyboard(sDefaultText = ''):
return False
| StarcoderdataPython |
1656442 | <filename>python/ex8_anomaly_recommender/ano_rec_funcs/cofi_cost_func.py
import numpy
def cofi_cost_func(params, Y, R, num_users, num_movies, num_features, reg_lambda=0.0):
"""
Collaborative filtering cost function.
Parameters
----------
params : array_like
The parameters which will be optimized. This is a one
dimensional vector of shape (num_movies x num_users, 1). It is the
concatenation of the feature vectors X and parameters Theta.
Y : array_like
A matrix of shape (num_movies x num_users) of user ratings of movies.
R : array_like
A (num_movies x num_users) matrix, where R[i, j] = 1 if the
i-th movie was rated by the j-th user.
num_users : int
Total number of users.
num_movies : int
Total number of movies.
num_features : int
Number of features to learn.
reg_lambda : float, optional
The regularization coefficient.
Returns
-------
cost_J : float
The value of the cost function at the given params.
grad : array_like
The gradient vector of the cost function at the given params.
grad has a shape (num_movies x num_users, 1)
Instructions
------------
Compute the cost function and gradient for collaborative filtering.
Concretely, you should first implement the cost function (without
regularization) and make sure it is matches our costs. After that,
you should implement thegradient and use the checkCostFunction routine
to check that the gradient is correct. Finally, you should implement
regularization.
Notes
-----
- The input params will be unraveled into the two matrices:
x_array : (num_movies x num_features) matrix of movie features
Theta : (num_users x num_features) matrix of user features
- You should set the following variables correctly:
x_grad : (num_movies x num_features) matrix, containing the
partial derivatives w.r.t. to each element of x_array
Theta_grad : (num_users x num_features) matrix, containing the
partial derivatives w.r.t. to each element of Theta
- The returned gradient will be the concatenation of the raveled
gradients x_grad and Theta_grad.
"""
# Unfold the U and W matrices from params
cost_J = 0
prod_mov_feat = num_movies*num_features
x_array = params[:prod_mov_feat].reshape(num_movies, num_features)
x_grad = numpy.zeros(x_array.shape)
Theta = params[prod_mov_feat:].reshape(num_users, num_features)
Theta_grad = numpy.zeros(Theta.shape)
hypothesis = numpy.dot(x_array, numpy.transpose(Theta))
error = (1/2)*numpy.sum(numpy.square((hypothesis - Y) * R))
reg_term = (reg_lambda/2)*(numpy.sum(numpy.square(x_array)) + numpy.sum(numpy.square(Theta)))
cost_J = error + reg_term
for i in range(R.shape[0]):
idx = numpy.where(R[i, :] == 1)[0]
Theta_temp = Theta[idx, :]
Y_temp = Y[i, idx]
term1 = numpy.dot(numpy.dot(x_array[i, :], Theta_temp.T) - Y_temp, Theta_temp)
term2 = reg_lambda*x_array[i, :]
x_grad[i, :] = term1 + term2
for j in range(R.shape[1]):
idx = numpy.where(R[:, j] == 1)[0]
X_temp = x_array[idx, :]
Y_temp = Y[idx, j]
term1 = numpy.dot(numpy.dot(X_temp, Theta[j, :]) - Y_temp, X_temp)
term2 = reg_lambda * Theta[j, :]
Theta_grad[j, :] = term1 + term2
grad = numpy.concatenate([x_grad.ravel(), Theta_grad.ravel()])
return cost_J, grad
| StarcoderdataPython |
3279839 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.utils.py3 import httplib
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.compute.types import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.providers import get_driver
from libcloud.test import unittest
from libcloud.test.secrets import PROFIT_BRICKS_PARAMS
class ProfitBricksTests(unittest.TestCase):
def setUp(self):
ProfitBricks = get_driver(Provider.PROFIT_BRICKS)
ProfitBricks.connectionCls.conn_classes = (None, ProfitBricksMockHttp)
self.driver = ProfitBricks(*PROFIT_BRICKS_PARAMS)
''' Server Function Tests
'''
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 3)
node = nodes[0]
self.assertEqual(node.id, "c8e57d7b-e731-46ad-a913-1828c0562246")
self.assertEqual(node.name, "server001")
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['192.168.127.12'])
self.assertEqual(node.private_ips, ['10.10.108.12', '10.13.198.11'])
self.assertEqual(node.extra['datacenter_id'], "e1e8ec0d-b47f-4d39-a91b-6e885483c899")
self.assertEqual(node.extra['datacenter_version'], "5")
self.assertEqual(node.extra['provisioning_state'], NodeState.RUNNING)
self.assertEqual(node.extra['creation_time'], "2014-07-14T20:52:20.839Z")
self.assertEqual(node.extra['last_modification_time'], "2014-07-14T22:11:09.324Z")
self.assertEqual(node.extra['os_type'], "LINUX")
self.assertEqual(node.extra['availability_zone'], "ZONE_1")
def test_ex_describe_node(self):
image = type('NodeImage', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9",
name="Debian-7-server-2014-07-01"))
size = type('NodeSize', (object,),
dict(id="2",
name="Small Instance",
ram=2048,
disk=50,
extra={'cores': 1}))
node = self.driver.create_node(name="SPC-Server",
image=image,
size=size)
self.assertEqual(node.id, "7b18b85f-cc93-4c2d-abcc-5ce732d35750")
def test_reboot_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
reboot = self.driver.reboot_node(node=node)
self.assertTrue(reboot)
def test_ex_stop_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
stop = self.driver.ex_stop_node(node=node)
self.assertTrue(stop)
def test_ex_start_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
start = self.driver.ex_start_node(node=node)
self.assertTrue(start)
def test_destroy_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
destroy = self.driver.destroy_node(node=node)
self.assertTrue(destroy)
def test_ex_update_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
zone = type('ExProfitBricksAvailabilityZone', (object,),
dict(name="ZONE_2"))
update = self.driver.ex_update_node(node=node, ram=2048, cores=2, name="server002", availability_zone=zone)
self.assertTrue(update)
''' Volume Function Tests
'''
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(len(volumes), 4)
volume = volumes[0]
self.assertEqual(volume.id, "453582cf-8d54-4ec8-bc0b-f9962f7fd232")
self.assertEqual(volume.name, "storage001")
self.assertEqual(volume.size, 50)
self.assertEqual(volume.extra['server_id'], "ebee7d83-912b-42f1-9b62-b953351a7e29")
self.assertEqual(volume.extra['provisioning_state'], NodeState.RUNNING)
self.assertEqual(volume.extra['creation_time'], "2014-07-15T03:19:38.252Z")
self.assertEqual(volume.extra['last_modification_time'], "2014-07-15T03:28:58.724Z")
self.assertEqual(volume.extra['image_id'], "d2f627c4-0289-11e4-9f63-52540066fee9")
self.assertEqual(volume.extra['image_name'], "CentOS-6-server-2014-07-01")
self.assertEqual(volume.extra['datacenter_id'], "06eac419-c2b3-4761-aeb9-10efdd2cf292")
def test_create_volume(self):
datacenter = type('Datacenter', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
image = type('NodeImage', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
create = self.driver.create_volume(name="StackPointCloudStorage001",
size=50,
ex_datacenter=datacenter,
ex_image=image)
self.assertTrue(create)
def test_attach_volume_general(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
attach = self.driver.attach_volume(node=node,
volume=volume,
device=None, ex_bus_type=None)
self.assertTrue(attach)
def test_attach_volume_device_defined(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
attach = self.driver.attach_volume(node=node, volume=volume, device=1, ex_bus_type=None)
self.assertTrue(attach)
def test_attach_volume_bus_type_defined(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
attach = self.driver.attach_volume(node=node,
volume=volume,
device=None,
ex_bus_type="IDE")
self.assertTrue(attach)
def test_attach_volume_options_defined(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
attach = self.driver.attach_volume(node=node, volume=volume,
device=1, ex_bus_type="IDE")
self.assertTrue(attach)
def test_detach_volume(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476",
extra={'server_id': "cd59b162-0289-11e4-9f63-52540066fee9"}
))
attach = self.driver.detach_volume(volume=volume)
self.assertTrue(attach)
def test_destroy_volume(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
destroy = self.driver.destroy_volume(volume=volume)
self.assertTrue(destroy)
def test_update_volume(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
destroy = self.driver.ex_update_volume(volume=volume)
self.assertTrue(destroy)
def test_ex_describe_volume(self):
describe = self.driver.ex_describe_volume(volume_id="8669a69f-2274-4520-b51e-dbdf3986a476")
self.assertEqual(describe.id, "00d0b9e7-e016-456f-85a0-517aa9a34bf5")
self.assertEqual(describe.size, 50)
self.assertEqual(describe.name, "StackPointCloud-Volume")
self.assertEqual(describe.extra['provisioning_state'], NodeState.RUNNING)
''' Image Function Tests
'''
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 3)
image = images[0]
self.assertEqual(image.extra['cpu_hotpluggable'], "false")
self.assertEqual(image.id, "03b6c3e7-f2ad-11e3-a036-52540066fee9")
self.assertEqual(image.name, "windows-2012-r2-server-2014-06")
self.assertEqual(image.extra['image_size'], "11264")
self.assertEqual(image.extra['image_type'], "HDD")
self.assertEqual(image.extra['memory_hotpluggable'], "false")
self.assertEqual(image.extra['os_type'], "WINDOWS")
self.assertEqual(image.extra['public'], "true")
self.assertEqual(image.extra['location'], None)
self.assertEqual(image.extra['writeable'], "true")
''' Datacenter Function Tests
'''
def test_ex_create_datacenter(self):
datacenter = self.driver.ex_create_datacenter(name="StackPointCloud",
location="us/la")
self.assertEqual(datacenter.id, '0c793dd1-d4cd-4141-86f3-8b1a24b2d604')
self.assertEqual(datacenter.extra['location'], 'us/las')
self.assertEqual(datacenter.version, '1')
def test_ex_destroy_datacenter(self):
datacenter = type('Datacenter', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
destroy = self.driver.ex_destroy_datacenter(datacenter=datacenter)
self.assertTrue(destroy)
def test_ex_describe_datacenter(self):
datacenter = type('Datacenter', (object,),
dict(id="d96dfafc-9a8c-4c0e-8a0c-857a15db572d"))
describe = self.driver.ex_describe_datacenter(datacenter_id=datacenter.id)
self.assertEqual(describe.id, 'a3e6f83a-8982-4d6a-aebc-60baf5755ede')
self.assertEqual(describe.name, 'StackPointCloud')
self.assertEqual(describe.version, '1')
self.assertEqual(describe.extra['location'], 'us/las')
self.assertEqual(describe.extra['provisioning_state'], NodeState.RUNNING)
def test_ex_clear_datacenter(self):
datacenter = type('Datacenter', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
clear = self.driver.ex_clear_datacenter(datacenter=datacenter)
self.assertTrue(clear)
def test_ex_list_datacenters(self):
datacenters = self.driver.ex_list_datacenters()
self.assertEqual(len(datacenters), 2)
dc1 = datacenters[0]
self.assertEqual(dc1.id, "a3e6f83a-8982-4d6a-aebc-60baf5755ede")
self.assertEqual(dc1.name, "StackPointCloud")
self.assertEqual(dc1.version, "1")
def test_ex_rename_datacenter(self):
datacenter = type('Datacenter', (object,),
dict(id="d96dfafc-9a8c-4c0e-8a0c-857a15db572d"))
update = self.driver.ex_rename_datacenter(datacenter=datacenter,
name="StackPointCloud")
self.assertTrue(update)
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertEqual(len(locations), 3)
locationNamesResult = sorted(list(a.name for a in locations))
locationNamesExpected = ['de/fkb', 'de/fra', 'us/las']
self.assertEqual(locationNamesResult, locationNamesExpected)
''' Availability Zone Tests
'''
def test_ex_list_availability_zones(self):
zones = self.driver.ex_list_availability_zones()
self.assertEqual(len(zones), 3)
zoneNamesResult = sorted(list(a.name for a in zones))
zoneNamesExpected = ['AUTO', 'ZONE_1', 'ZONE_2']
self.assertEqual(zoneNamesResult, zoneNamesExpected)
''' Interface Tests
'''
def test_ex_list_interfaces(self):
interfaces = self.driver.ex_list_network_interfaces()
self.assertEqual(len(interfaces), 3)
interface = interfaces[0]
self.assertEqual(interface.id, "6b38a4f3-b851-4614-9e3a-5ddff4727727")
self.assertEqual(interface.name, "StackPointCloud")
self.assertEqual(interface.state, NodeState.RUNNING)
self.assertEqual(interface.extra['server_id'], "234f0cf9-1efc-4ade-b829-036456584116")
self.assertEqual(interface.extra['lan_id'], '3')
self.assertEqual(interface.extra['internet_access'], 'false')
self.assertEqual(interface.extra['mac_address'], "02:01:40:47:90:04")
self.assertEqual(interface.extra['dhcp_active'], "true")
self.assertEqual(interface.extra['gateway_ip'], None)
self.assertEqual(interface.extra['ips'], ['10.14.96.11', '172.16.31.10', '172.16.58.3'])
def test_ex_create_network_interface(self):
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
interface = self.driver.ex_create_network_interface(node=node)
self.assertEqual(interface.id, '6b38a4f3-b851-4614-9e3a-5ddff4727727')
def test_ex_destroy_network_interface(self):
network_interface = type('ProfitBricksNetworkInterface', (object,),
dict(
id="cd59b162-0289-11e4-9f63-52540066fee9"))
destroy = self.driver.ex_destroy_network_interface(
network_interface=network_interface)
self.assertTrue(destroy)
def test_ex_update_network_interface(self):
network_interface = type('ProfitBricksNetworkInterface', (object,),
dict(
id="cd59b162-0289-11e4-9f63-52540066fee9"))
create = self.driver.ex_update_network_interface(
network_interface=network_interface)
self.assertTrue(create)
def test_ex_describe_network_interface(self):
network_interface = type('ProfitBricksNetworkInterface', (object,),
dict(
id="cd59b162-0289-11e4-9f63-52540066fee9"))
describe = self.driver.ex_describe_network_interface(network_interface=network_interface)
self.assertEqual(describe.id, "f1c7a244-2fa6-44ee-8fb6-871f337683a3")
self.assertEqual(describe.name, None)
self.assertEqual(describe.state, NodeState.RUNNING)
self.assertEqual(describe.extra['datacenter_id'], "a3a2e730-0dc3-47e6-bac6-4c056d5e2aee")
self.assertEqual(describe.extra['datacenter_version'], "6")
self.assertEqual(describe.extra['server_id'], "c09f4f31-336c-4ad2-9ec7-591778513408")
self.assertEqual(describe.extra['lan_id'], "1")
self.assertEqual(describe.extra['internet_access'], "false")
self.assertEqual(describe.extra['mac_address'], "02:01:96:d7:60:e0")
self.assertEqual(describe.extra['dhcp_active'], "true")
self.assertEqual(describe.extra['gateway_ip'], None)
self.assertEqual(describe.extra['ips'], ['10.10.38.12'])
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 7)
class ProfitBricksMockHttp(MockHttp):
fixtures = ComputeFileFixtures('profitbricks')
def _1_3_clearDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_clear_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_createDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_create_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_deleteDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_destroy_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_describe_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllDataCenters(self, method, url, body, headers):
body = self.fixtures.load('ex_list_datacenters.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_updateDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_update_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllImages(self, method, url, body, headers):
body = self.fixtures.load('list_images.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllServers(self, method, url, body, headers):
body = self.fixtures.load('list_nodes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_resetServer(self, method, url, body, headers):
body = self.fixtures.load('reboot_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_stopServer(self, method, url, body, headers):
body = self.fixtures.load('ex_stop_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_startServer(self, method, url, body, headers):
body = self.fixtures.load('ex_start_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_deleteServer(self, method, url, body, headers):
body = self.fixtures.load('destroy_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllStorages(self, method, url, body, headers):
body = self.fixtures.load('list_volumes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_createStorage(self, method, url, body, headers):
body = self.fixtures.load('create_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_connectStorageToServer(self, method, url, body, headers):
body = self.fixtures.load('attach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_disconnectStorageFromServer(self, method, url, body, headers):
body = self.fixtures.load('detach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_deleteStorage(self, method, url, body, headers):
body = self.fixtures.load('destroy_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_updateStorage(self, method, url, body, headers):
body = self.fixtures.load('ex_update_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_updateServer(self, method, url, body, headers):
body = self.fixtures.load('ex_update_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getNic(self, method, url, body, headers):
body = self.fixtures.load('ex_describe_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllNic(self, method, url, body, headers):
body = self.fixtures.load('ex_list_network_interfaces.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_createNic(self, method, url, body, headers):
body = self.fixtures.load('ex_list_network_interfaces.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_deleteNic(self, method, url, body, headers):
body = self.fixtures.load('ex_destroy_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_updateNic(self, method, url, body, headers):
body = self.fixtures.load('ex_update_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getServer(self, method, url, body, headers):
body = self.fixtures.load('ex_describe_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getStorage(self, method, url, body, headers):
body = self.fixtures.load('ex_describe_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_createServer(self, method, url, body, headers):
body = self.fixtures.load('create_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| StarcoderdataPython |
5119 | import sys
sys.setrecursionlimit(10000)
def dfs(r, c):
global visit
visit[r][c] = True
mov = [(-1, 0), (0, -1), (1, 0), (0, 1)]
for i in range(4):
dr, dc = mov[i]
nr, nc = r + dr, c + dc
if 0 <= nr < N and 0 <= nc < M and visit[nr][nc] == False and board[nr][nc] == 1:
dfs(nr, nc)
T = int(input())
for _ in range(T):
M, N, K = map(int, input().split())
board = [[0] * M for _ in range(N)]
for _ in range(K):
c, r = map(int, input().split())
board[r][c] = 1
visit = [[False] * M for _ in range(N)]
cnt = 0
for r in range(N):
for c in range(M):
if not visit[r][c] and board[r][c] == 1:
cnt += 1
dfs(r, c)
for ele in visit:
print(ele)
print()
print(cnt) | StarcoderdataPython |
1648492 | <reponame>kit-tm/seed
from pox.core import core
from pox.lib.packet.arp import arp
from pox.lib.revent import EventMixin
log = core.getLogger()
class host(EventMixin):
host_map = {}
def __init__(self):
core.listen_to_dependencies(self)
def _handle_openflow_PacketIn(self, event):
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%i: ignoring unparsed packet", dpid)
return
if not core.openflow_discovery.is_edge_port(dpid, inport):
log.debug("ignore packetIn at switch port: %i %i", dpid, inport)
return
a = packet.find('arp')
if not a:
return
if (a.prototype == arp.PROTO_TYPE_IP and
a.hwtype == arp.HW_TYPE_ETHERNET and
a.protosrc != 0):
self.host_map[a.protosrc] = (dpid, inport)
def query(self, protosrc):
return self.host_map.get(protosrc)
def launch():
core.registerNew(host)
| StarcoderdataPython |
3306459 | <filename>src/dorime-bot.py
# Discord Stuff
import discord
# Spotify Stuff
# import spotipy
# from spotipy.oauth2 import SpotifyClientCredentials
# Utility Imports
from distutils.util import strtobool
from dotenv import dotenv_values
import requests
import json
import re
# See Sample.env for example of what the .env file should look like
config = dotenv_values(".env")
# Regex Definitions
SPOTIFY_URL_REGEX = re.compile(r"(?i)open\.spotify\.com/track/([a-zA-Z0-9]*)\?.*")
# Update the .env file to swap the DEBUG Value
DEBUG = bool(strtobool(config["DEBUG"])) if "DEBUG" in config else False
# Get Watched Spotify Watched Channel
SPOTIFY_WATCHED_CHANNEL = int(config["WATCHED_SPOTIFY_CHANNEL"])
# Double check that the program is extracting the .env configurations correctly
if DEBUG:
for key in config:
print(f"{key}: {config[key]}")
# Random Helper Functions
def get_quote() -> str:
res = requests.get("https://zenquotes.io/api/random").json()[0]
return f"\"{res['q']}\" - {res['a']}"
# Dorime Bot Definitions
class DorimeBot(discord.Client):
async def on_ready(self) -> None:
"""Will display which user we are connecting to when the discord bot connects to the server"""
print(f"We Have Logged in as {self.user}")
async def on_message(self, message: discord.Message) -> None:
"""
Describes how to handle messages that the bot sees
:param discord.Message message: The incoming message
:return None
"""
# If the message is coming from the ignore...
if message.author == self.user:
return
author: discord.User = message.author
channel: discord.TextChannel = message.channel
if DEBUG:
print(
f"channel.id ({channel.id}) == SPOTIFY_WATCHED_CHANNEL ({SPOTIFY_WATCHED_CHANNEL}): {channel.id == SPOTIFY_WATCHED_CHANNEL}"
)
# If the message starts with $hello, reply with Hello
if channel.id == SPOTIFY_WATCHED_CHANNEL:
if DEBUG:
print("Message sent in Spotify Watch Channel")
spotify_track_ids = re.findall(SPOTIFY_URL_REGEX, message.content)
if DEBUG and not spotify_track_ids:
print("No Spotify Track URL found in Message")
return
await message.channel.send(
f"Found the Spotify URL with the Track id(s): {spotify_track_ids}"
)
# TODO: Add Track id to Spotify Playlist
elif message.content.startswith("$hello"):
msg = f"Hello <<EMAIL>}>!"
await message.channel.send(msg)
elif message.content.startswith("$inspire"):
quote = get_quote()
await message.channel.send(quote)
elif message.content.startswith("$here"):
print(message)
def main():
# Start the bot with Bot Token from the .env file
client = DorimeBot()
client.run(config["DISCORD_TOKEN"])
if __name__ == "__main__":
main()
| StarcoderdataPython |
1620857 | # Program to find Juggler Sequence in Python
# Juggler Sequence: https://en.wikipedia.org/wiki/Juggler_sequence
# The juggler_sequence function takes in a starting number and prints all juggler
# numbers starting from that number until it reaches 1
# Keep in mind that the juggler sequence has been conjectured to reach 1 eventually
# but this fact has not yet been proved
def juggler_sequence(n):
seq = [n]
while seq[-1] != 1:
if seq[-1] % 2 == 0:
seq.append(int(seq[-1] ** 0.5))
else:
seq.append(int(seq[-1] ** 1.5))
return seq
if __name__ == "__main__":
x = int(input("Enter a number for Juggler Sequence: "))
print(juggler_sequence(x))
| StarcoderdataPython |
3376468 | <filename>src/lesson.py
# -*- coding: utf-8 -*-
import random
import word
import dictionary
import lesson_words
class Practice:
def __init__(self, lesson, word, type_pr):
self.lesson = lesson
self.word = word
self.type_pr = type_pr
self.result = None
self.is_answer = False
def question_data(self):
return self.word.question_data(self.type_pr)
def is_new(self):
return self.word.is_new(self.type_pr)
def is_rur(self):
return self.type_pr == word.ru_to_en_write
def get_source_info(self):
return self.word.get_source_info()
def update_stat(self, right_answer_percent, wrong_answer_percent):
if self.result is not None:
add_percent = right_answer_percent if self.result else -wrong_answer_percent
self.word.update_stat(self.result, add_percent, self.type_pr)
def is_end(self):
return self.is_answer
def check(self, user_answer):
is_success, right_answer = self.word.check(user_answer, self.type_pr)
self.is_answer = is_success
if self.result is None:
self.result = is_success
self.lesson.update_stat(is_success)
return is_success, right_answer
def last_result(self):
type_pr = word.ru_to_en_write if self.type_pr == word.en_to_ru_write else word.en_to_ru_write
return self.is_answer, self.word.question_data(type_pr)
class Lesson:
def __init__(self, cfg):
random.seed()
self.type_pr = random.choice([word.en_to_ru_write, word.ru_to_en_write])
self.dict = dictionary.Dict(cfg)
self.right_answer_percent = cfg["right_answer_percent"]
self.wrong_answer_percent = cfg["wrong_answer_percent"]
self.max_success = cfg["words_per_lesson"]
self.cnt_success = 0
self.cnt_error = 0
self.path_to_stat = cfg["path_to_stat"]
self.practice_list = []
self.dict.reload_dict(cfg["path_to_dict"])
self.dict.reload_stat(self.path_to_stat)
words = self.dict.words_for_lesson(cfg["CntStudyWords"], self.type_pr)
self.lsn_words = lesson_words.LessonWords(words)
def get_dict(self):
return self.dict
def update_stat(self, is_success):
if is_success:
self.cnt_success += 1
else:
self.cnt_error += 1
def end_lesson(self):
self.dict.reload_stat(self.path_to_stat)
for it in self.practice_list:
it.update_stat(self.right_answer_percent, self.wrong_answer_percent)
self.dict.save_stat(self.path_to_stat)
def get_next_practice(self):
pr = Practice(self, self.lsn_words.get_any_word(), self.type_pr)
self.practice_list.append(pr)
return pr
def get_lesson_stat(self):
return (self.cnt_success, self.max_success, self.cnt_error)
def is_end_lesson(self):
return self.max_success == self.cnt_success
| StarcoderdataPython |
3255982 | <reponame>qihuilyu/P2T
import main_ct as denoise
import math
from subprocess import run, CalledProcessError
import numpy as np
import numpy.random as nprand
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError, InvalidArgumentError
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--start', type=int, default=1, help='first trial index for logging')
parser.add_argument('--trials', type=int, default=50, help='number of trials to run')
def randrange(start, stop, size=None):
return (stop-start)*nprand.random_sample(size) + start
if __name__ == "__main__":
args = parser.parse_args()
for trial in range(args.start, args.start+args.trials+1):
tf.compat.v1.reset_default_graph()
denoise.args = denoise.parser.parse_args([])
nfilters = nprand.random_integers(4, 128)
depth = nprand.random_integers(1,7)
lr = randrange(1e-3, 5e-3)
memo = "trial{:03d}_nfilters{:d}_depth{:d}_lr{:0.3e}".format(trial, nfilters, depth, lr)
print("BEGINNING TRIAL: {}".format(memo))
run_args = [
'python main_ct.py'
]
denoise.args.phase = 'train'
denoise.args.gpu = '0,1,2,3'
denoise.args.weightedloss = True
denoise.args.geometry = True
denoise.args.epoch = 40
denoise.args.traindata_dir = "/home/ryan/projects/MCDoseDenoiser/model_data_2.5mm_crop_rotate"
denoise.args.log_dir = "/home/ryan/projects/MCDoseDenoiser/logs"
denoise.args.checkpoint_dir = "/home/ryan/projects/MCDoseDenoiser/checkpoints/{}".format(memo)
denoise.args.memo = memo
denoise.args.nfilters = nfilters
denoise.args.depth = depth
denoise.args.lr = lr
denoise.args.batch_size = 100*4
denoise.args.noeval = False
denoise.args.nogamma = True
success = False
while not success and denoise.args.batch_size>1:
try:
denoise.run()
success = True
except ResourceExhaustedError as e:
print(str(e))
denoise.args.batch_size = math.ceil(denoise.args.batch_size / 2)
print("reducing batch_size to: {}".format(denoise.args.batch_size))
except InvalidArgumentError as e:
print(str(e))
print("Retrying")
| StarcoderdataPython |
3220910 | <reponame>XiXL/stereo-py-cv
# filename: camera_configs.py
import cv2
import numpy as np
left_camera_matrix = np.array([[427.794765373576, 0., 345.316879362880],
[0., 427.523078909675,248.042320744550],
[0., 0., 1.]])
left_distortion = np.array([[0.1203, -0.3813, 0.0, 0.0, 0.00000]])
right_camera_matrix = np.array([[428.460031706320, 0., 345.577140366428],
[0., 428.182342124927, 253.893691879974],
[0., 0., 1.]])
right_distortion = np.array([[0.1146, -0.3773, 0.0, 0.00, 0.00000]])
om = np.array([0.01911, 0.03125, -0.00960]) # 旋转关系向量
R = np.array([[0.999951195950026, 0.00365550158381214, -0.00917839998493453],
[-0.00363800747921088, 0.999991535409192, 0.00192198115182608],
[0.00918534809867894, -0.00188849626356949, 0.999956030514426]])
T = np.array([-59.9526388908344, -0.295725876726042, 0.398543708588111]) # 平移关系向量
size = (640, 480) # 图像尺寸
# 进行立体更正
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(left_camera_matrix, left_distortion,
right_camera_matrix, right_distortion, size, R,
T)
# 计算更正map
left_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix, left_distortion, R1, P1, size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix, right_distortion, R2, P2, size, cv2.CV_16SC2)
| StarcoderdataPython |
58601 | from dagster import check
from dagster.core.host_representation.external_data import ExternalPartitionData
from dagster.core.host_representation.handle import RepositoryHandle
from .utils import execute_unary_api_cli_command
def sync_get_external_partition(repository_handle, partition_set_name, partition_name):
from dagster.cli.api import PartitionApiCommandArgs
check.inst_param(repository_handle, 'repository_handle', RepositoryHandle)
check.str_param(partition_set_name, 'partition_set_name')
check.str_param(partition_name, 'partition_name')
repository_origin = repository_handle.get_origin()
return check.inst(
execute_unary_api_cli_command(
repository_origin.executable_path,
'partition',
PartitionApiCommandArgs(
repository_origin=repository_origin,
partition_set_name=partition_set_name,
partition_name=partition_name,
),
),
ExternalPartitionData,
)
| StarcoderdataPython |
1767131 | <gh_stars>10-100
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
r'''Layers for ranking model.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
class DotInteract(tf.layers.Layer):
r'''DLRM: Deep Learning Recommendation Model for Personalization and
Recommendation Systems.
See https://github.com/facebookresearch/dlrm for more information.
'''
def call(self, x):
r'''Call the DLRM dot interact layer.
'''
x2 = tf.matmul(x, x, transpose_b=True)
x2_dim = x2.shape[-1]
x2_ones = tf.ones_like(x2)
x2_mask = tf.linalg.band_part(x2_ones, 0, -1)
y = tf.boolean_mask(x2, x2_ones - x2_mask)
y = tf.reshape(y, [-1, x2_dim * (x2_dim - 1) // 2])
return y
class Cross(tf.layers.Layer):
r'''DCN V2: Improved Deep & Cross Network and Practical Lessons for Web-scale
Learning to Rank Systems.
See https://arxiv.org/abs/2008.13535 for more information.
'''
def call(self, x):
r'''Call the DCN cross layer.
'''
x2 = tf.layers.dense(
x, x.shape[-1],
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(),
bias_initializer=tf.zeros_initializer())
y = x * x2 + x
y = tf.reshape(y, [-1, x.shape[1] * x.shape[2]])
return y
class Ranking(tf.layers.Layer):
r'''A simple ranking model.
'''
def __init__(
self,
embedding_columns,
bottom_mlp=None,
top_mlp=None,
feature_interaction=None,
**kwargs):
r'''Constructor.
Args:
embedding_columns: List of embedding columns.
bottom_mlp: List of bottom MLP dimensions.
top_mlp: List of top MLP dimensions.
feature_interaction: Feature interaction layer class.
**kwargs: keyword named properties.
'''
super().__init__(**kwargs)
if bottom_mlp is None:
bottom_mlp = [512, 256, 64]
self.bottom_mlp = bottom_mlp
if top_mlp is None:
top_mlp = [1024, 1024, 512, 256, 1]
self.top_mlp = top_mlp
if feature_interaction is None:
feature_interaction = DotInteract
self.feature_interaction = feature_interaction
self.embedding_columns = embedding_columns
dimensions = {c.dimension for c in embedding_columns}
if len(dimensions) > 1:
raise ValueError('Only one dimension supported')
self.dimension = list(dimensions)[0]
def call(self, values, embeddings):
r'''Call the dlrm model
'''
with tf.name_scope('bottom_mlp'):
bot_mlp_input = tf.math.log(values + 1.)
for i, d in enumerate(self.bottom_mlp):
bot_mlp_input = tf.layers.dense(
bot_mlp_input, d,
activation=tf.nn.relu,
kernel_initializer=tf.glorot_normal_initializer(),
bias_initializer=tf.random_normal_initializer(
mean=0.0,
stddev=math.sqrt(1.0 / d)),
name=f'bottom_mlp_{i}')
bot_mlp_output = tf.layers.dense(
bot_mlp_input, self.dimension,
activation=tf.nn.relu,
kernel_initializer=tf.glorot_normal_initializer(),
bias_initializer=tf.random_normal_initializer(
mean=0.0,
stddev=math.sqrt(1.0 / self.dimension)),
name='bottom_mlp_output')
with tf.name_scope('feature_interaction'):
feat_interact_input = tf.concat([bot_mlp_output] + embeddings, axis=-1)
feat_interact_input = tf.reshape(
feat_interact_input,
[-1, 1 + len(embeddings), self.dimension])
feat_interact_output = self.feature_interaction()(feat_interact_input)
with tf.name_scope('top_mlp'):
top_mlp_input = tf.concat([bot_mlp_output, feat_interact_output], axis=1)
num_fields = len(self.embedding_columns)
prev_d = (num_fields * (num_fields + 1)) / 2 + self.dimension
for i, d in enumerate(self.top_mlp[:-1]):
top_mlp_input = tf.layers.dense(
top_mlp_input, d,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(
mean=0.0,
stddev=math.sqrt(2.0 / (prev_d + d))),
bias_initializer=tf.random_normal_initializer(
mean=0.0,
stddev=math.sqrt(1.0 / d)),
name=f'top_mlp_{i}')
prev_d = d
top_mlp_output = tf.layers.dense(
top_mlp_input, self.top_mlp[-1],
activation=tf.nn.sigmoid,
kernel_initializer=tf.random_normal_initializer(
mean=0.0,
stddev=math.sqrt(2.0 / (prev_d + self.top_mlp[-1]))),
bias_initializer=tf.random_normal_initializer(
mean=0.0,
stddev=math.sqrt(1.0 / self.top_mlp[-1])),
name=f'top_mlp_{len(self.top_mlp) - 1}')
return top_mlp_output
| StarcoderdataPython |
1669227 | <gh_stars>100-1000
from sanic import Sanic
from sanic import response
from sanic.exceptions import NotFound
from sanic_cors import CORS
import segment,project
app = Sanic(name=__name__)
CORS(app)
# camera = Camera()
# @app.route('/')
# def handle_request(request):
# return response.html('<p>Hello world!</p><img src="/camera-stream/">')
@app.route('/face', methods=['POST'])
async def handle_face_count(request):
# print(request.json)
json=request.json
res=0
if json:
res=segment.face_count(json['base64'])
return response.json({'count': res})
@app.route('/segment', methods=['POST'])
async def handle_segment(request):
# print(request.json)
json=request.json
res=''
if json:
res=segment.run(json['base64'])
return response.json({'base64': res})
@app.route('/project', methods=['POST'])
async def handle_project(request):
json=request.json
res={"x":'0.5',"y":'0.5'}
try:
res=project.run(json['view'])
res['x']=str(res['x'])
res['y']=str(res['y'])
except:
print("-")
# sanic的json dumps int有bug,需提前转为str
return response.json(res)
# @app.route('/depth_estimation')
# async def handle_depth_estimation(request):
# return await response.file_stream(camera.depth_estimation_file)
# @app.route('/camera-stream/')
# async def camera_stream(request):
# return response.stream(
# camera.stream,
# content_type='multipart/x-mixed-replace; boundary=frame'
# )
if __name__ == '__main__':
app.error_handler.add(
NotFound,
lambda r, e: response.empty(status=404)
)
app.run(host='0.0.0.0', port=8891,workers=1)
#workers须==1,不然无法运行,原因待查,可能是飞浆hub不支持 | StarcoderdataPython |
126990 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import shorturl
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
readme = open('README.rst').read()
setup(
name='django-shorturl',
packages=['shorturl'],
version='0.1.0',
description='A django short URL app.',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/lefterisnik/django-shorturl',
include_package_data=True,
license="BSD",
zip_safe=False,
keywords='django-shorturl',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
install_requires=[
'Django>=1.8,<1.9',
'python-social-auth',
],
)
| StarcoderdataPython |
3345683 | # <NAME> <<EMAIL>>
import copy
import logging
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.loop import EventsInDatasetReader, EventLoop
##__________________________________________________________________||
@pytest.fixture()
def eventLoopRunner():
return mock.Mock(name='eventLoopRunner')
@pytest.fixture()
def reader():
ret = mock.Mock(name='reader')
ret.configure_mock(name='reader')
return ret
@pytest.fixture()
def collector():
return mock.Mock(name='collector')
@pytest.fixture()
def split_into_build_events():
return mock.Mock(name='split_into_build_events')
@pytest.fixture()
def obj(eventLoopRunner, reader, collector, split_into_build_events):
return EventsInDatasetReader(eventLoopRunner, reader, collector,
split_into_build_events)
##__________________________________________________________________||
def test_deprecated(eventLoopRunner, reader, collector, split_into_build_events, caplog):
with caplog.at_level(logging.WARNING):
obj = EventsInDatasetReader(
eventLoopRunner, reader, collector,
split_into_build_events
)
assert len(caplog.records) == 1
assert caplog.records[0].levelname == 'WARNING'
assert 'EventsInDatasetReader' in caplog.records[0].name
assert 'deprecated' in caplog.records[0].msg
##__________________________________________________________________||
def test_repr(obj):
repr(obj)
def test_begin(obj, eventLoopRunner):
assert 0 == eventLoopRunner.begin.call_count
obj.begin()
assert 1 == eventLoopRunner.begin.call_count
def test_begin_end(obj, eventLoopRunner, collector):
obj.begin()
eventLoopRunner.end.return_value = [ ]
end = obj.end()
assert [mock.call([ ])] == collector.collect.call_args_list
assert collector.collect() == end
def test_standard(obj, eventLoopRunner, reader, collector,
split_into_build_events, caplog):
## begin
obj.begin()
## create data sets
# dataset1 - 3 event builders
build_events1 = mock.Mock(name='build_events1')
build_events2 = mock.Mock(name='build_events2')
build_events3 = mock.Mock(name='build_events3')
dataset1 = mock.Mock(name='dataset1', build_events=[build_events1, build_events2, build_events3])
dataset1.configure_mock(name='dataset1')
# dataset2 - no event builder
dataset2 = mock.Mock(name='dataset2', build_events=[ ])
dataset2.configure_mock(name='dataset2')
# dataset3 - 1 event builder
build_events4 = mock.Mock(name='build_events4')
dataset3 = mock.Mock(name='dataset3', build_events=[build_events4])
dataset3.configure_mock(name='dataset3')
split_into_build_events.side_effect = lambda dataset: dataset.build_events
## read
obj.read(dataset1)
obj.read(dataset2)
obj.read(dataset3)
assert 0 == eventLoopRunner.run.call_count
assert 3 == eventLoopRunner.run_multiple.call_count
call1 = eventLoopRunner.run_multiple.call_args_list[0]
eventLoop1 = call1[0][0][0]
assert isinstance(eventLoop1, EventLoop)
assert build_events1 is eventLoop1.build_events
assert reader is not eventLoop1.reader
assert 'reader' == eventLoop1.reader.name
eventLoop2 = call1[0][0][1]
assert isinstance(eventLoop2, EventLoop)
assert build_events2 is eventLoop2.build_events
assert reader is not eventLoop2.reader
assert 'reader' == eventLoop2.reader.name
eventLoop3 = call1[0][0][2]
assert isinstance(eventLoop3, EventLoop)
assert build_events3 is eventLoop3.build_events
assert reader is not eventLoop3.reader
assert 'reader' == eventLoop3.reader.name
call2 = eventLoopRunner.run_multiple.call_args_list[1]
assert mock.call([ ]) == call2
call3 = eventLoopRunner.run_multiple.call_args_list[2]
eventLoop4 = call3[0][0][0]
assert isinstance(eventLoop4, EventLoop)
assert build_events4 is eventLoop4.build_events
assert reader is not eventLoop4.reader
assert 'reader' == eventLoop4.reader.name
## end
eventLoopRunner.end.return_value = [
eventLoop1.reader, eventLoop2.reader, eventLoop3.reader, eventLoop4.reader
]
collector.collect.side_effect = lambda x: x
assert 0 == eventLoopRunner.end.call_count
assert 0 == collector.collect.call_count
results = obj.end()
assert 1 == eventLoopRunner.end.call_count
assert 1 == collector.collect.call_count
expected = [
('dataset1', (eventLoop1.reader, eventLoop2.reader, eventLoop3.reader), ),
('dataset2', ( )),
('dataset3', (eventLoop4.reader, )),
]
assert expected == results
assert [mock.call(expected)] == collector.collect.call_args_list
def test_wrong_number_of_results(obj, eventLoopRunner, reader,
collector, split_into_build_events,
caplog):
reader1 = mock.Mock(name='reader1')
reader2 = mock.Mock(name='reader2')
eventLoopRunner.end.return_value = [reader1, reader2]
dataset1 = mock.Mock(name='dataset1')
obj.dataset_nreaders[:] = [(dataset1, 1)]
with caplog.at_level(logging.WARNING, logger = 'alphatwirl'):
results = obj.end()
assert results is None
assert len(caplog.records) == 1
assert caplog.records[0].levelname == 'WARNING'
assert 'EventsInDatasetReader' in caplog.records[0].name
assert 'the same number of' in caplog.records[0].msg
##__________________________________________________________________||
| StarcoderdataPython |
66424 | import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class MaSTr1325(CustomDataset):
"""PascalContext dataset.
In segmentation map annotation for PascalContext, 0 stands for background,
which is included in 60 categories. ``reduce_zero_label`` is fixed to
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
fixed to '.png'.
Args:
split (str): Split txt file for PascalContext.
"""
CLASSES = ('land','water','sky')
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0]]
def __init__(self, split, **kwargs):
super(MaSTr1325, self).__init__(
img_suffix='.jpg',
seg_map_suffix='.png',
split=split,
reduce_zero_label=False,
#att_metrics=['PRE', 'REC', 'F-measure', 'F-max', 'FPR', 'FNR'],
**kwargs)
assert osp.exists(self.img_dir) and self.split is not None
| StarcoderdataPython |
3349531 | from __future__ import with_statement
import py
import sys
from rpython.rlib.parsing.tree import Nonterminal, Symbol, RPythonVisitor
from rpython.rlib.parsing.codebuilder import Codebuilder
from rpython.rlib.objectmodel import we_are_translated
class BacktrackException(Exception):
def __init__(self, error=None):
self.error = error
if not we_are_translated():
Exception.__init__(self, error)
class TreeOptimizer(RPythonVisitor):
def visit_or(self, t):
if len(t.children) == 1:
return self.dispatch(t.children[0])
return self.general_nonterminal_visit(t)
visit_commands = visit_or
def visit_negation(self, t):
child = self.dispatch(t.children[0])
if child.symbol == "negation":
child.symbol = "lookahead"
return child
t.children[0] = child
return t
def general_nonterminal_visit(self, t):
for i in range(len(t.children)):
t.children[i] = self.dispatch(t.children[i])
return t
def general_visit(self, t):
return t
syntax = r"""
NAME:
`[a-zA-Z_][a-zA-Z0-9_]*`;
SPACE:
' ';
COMMENT:
`( *#[^\n]*\n)+`;
IGNORE:
`(#[^\n]*\n)|\n|\t| `;
newline:
COMMENT
| `( *\n *)*`;
REGEX:
r = `\`[^\\\`]*(\\.[^\\\`]*)*\``
return {Symbol('REGEX', r, None)};
QUOTE:
r = `'[^\']*'`
return {Symbol('QUOTE', r, None)};
PYTHONCODE:
r = `\{[^\n\}]*\}`
return {Symbol('PYTHONCODE', r, None)};
EOF:
!__any__;
file:
IGNORE*
list
[EOF];
list:
content = production+
return {Nonterminal('list', content)};
production:
name = NAME
SPACE*
args = productionargs
':'
IGNORE*
what = or_
IGNORE*
';'
IGNORE*
return {Nonterminal('production', [name, args, what])};
productionargs:
'('
IGNORE*
args = (
NAME
[
IGNORE*
','
IGNORE*
]
)*
arg = NAME
IGNORE*
')'
IGNORE*
return {Nonterminal('productionargs', args + [arg])}
| return {Nonterminal('productionargs', [])};
or_:
l = (commands ['|' IGNORE*])+
last = commands
return {Nonterminal('or', l + [last])}
| commands;
commands:
cmd = command
newline
cmds = (command [newline])+
return {Nonterminal('commands', [cmd] + cmds)}
| command;
command:
simplecommand;
simplecommand:
return_
| if_
| named_command
| repetition
| choose
| negation;
return_:
'return'
SPACE*
code = PYTHONCODE
IGNORE*
return {Nonterminal('return', [code])};
if_:
'do'
newline
cmd = command
SPACE*
'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [cmd, condition])}
| 'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [condition])};
choose:
'choose'
SPACE*
name = NAME
SPACE*
'in'
SPACE*
expr = PYTHONCODE
IGNORE*
cmds = commands
return {Nonterminal('choose', [name, expr, cmds])};
commandchain:
result = simplecommand+
return {Nonterminal('commands', result)};
named_command:
name = NAME
SPACE*
'='
SPACE*
cmd = command
return {Nonterminal('named_command', [name, cmd])};
repetition:
what = enclosed
SPACE* '?' IGNORE*
return {Nonterminal('maybe', [what])}
| what = enclosed
SPACE*
repetition = ('*' | '+')
IGNORE*
return {Nonterminal('repetition', [repetition, what])};
negation:
'!'
SPACE*
what = negation
IGNORE*
return {Nonterminal('negation', [what])}
| enclosed;
enclosed:
'<'
IGNORE*
what = primary
IGNORE*
'>'
IGNORE*
return {Nonterminal('exclusive', [what])}
| '['
IGNORE*
what = or_
IGNORE*
']'
IGNORE*
return {Nonterminal('ignore', [what])}
| ['(' IGNORE*] or_ [')' IGNORE*]
| primary;
primary:
call | REGEX [IGNORE*] | QUOTE [IGNORE*];
call:
x = NAME
args = arguments
IGNORE*
return {Nonterminal("call", [x, args])};
arguments:
'('
IGNORE*
args = (
PYTHONCODE
[IGNORE* ',' IGNORE*]
)*
last = PYTHONCODE
')'
IGNORE*
return {Nonterminal("args", args + [last])}
| return {Nonterminal("args", [])};
"""
class ErrorInformation(object):
def __init__(self, pos, expected=None):
if expected is None:
expected = []
self.expected = expected
self.pos = pos
def __str__(self):
return "ErrorInformation(%s, %s)" % (self.pos, self.expected)
def get_line_column(self, source):
pos = self.pos
assert pos >= 0
uptoerror = source[:pos]
lineno = uptoerror.count("\n")
columnno = pos - uptoerror.rfind("\n")
return lineno, columnno
def nice_error_message(self, filename='<filename>', source=""):
if source:
lineno, columnno = self.get_line_column(source)
result = [" File %s, line %s" % (filename, lineno + 1)]
result.append(source.split("\n")[lineno])
result.append(" " * columnno + "^")
else:
result.append("<couldn't get source>")
if self.expected:
failure_reasons = self.expected
if len(failure_reasons) > 1:
all_but_one = failure_reasons[:-1]
last = failure_reasons[-1]
expected = "%s or '%s'" % (
", ".join(["'%s'" % e for e in all_but_one]), last)
else:
expected = failure_reasons[0]
result.append("ParseError: expected %s" % (expected, ))
else:
result.append("ParseError")
return "\n".join(result)
class Status(object):
# status codes:
NORMAL = 0
ERROR = 1
INPROGRESS = 2
LEFTRECURSION = 3
SOMESOLUTIONS = 4
def __repr__(self):
return "Status(%s, %s, %s, %s)" % (self.pos, self.result, self.error,
self.status)
def __init__(self):
self.pos = 0
self.error = None
self.status = self.INPROGRESS
self.result = None
class ParserBuilder(RPythonVisitor, Codebuilder):
def __init__(self):
Codebuilder.__init__(self)
self.initcode = []
self.names = {}
self.matchers = {}
def make_parser(self):
m = {'Status': Status,
'Nonterminal': Nonterminal,
'Symbol': Symbol,}
exec py.code.Source(self.get_code()).compile() in m
return m['Parser']
def memoize_header(self, name, args):
dictname = "_dict_%s" % (name, )
self.emit_initcode("self.%s = {}" % (dictname, ))
if args:
self.emit("_key = (self._pos, %s)" % (", ".join(args)))
else:
self.emit("_key = self._pos")
self.emit("_status = self.%s.get(_key, None)" % (dictname, ))
with self.block("if _status is None:"):
self.emit("_status = self.%s[_key] = Status()" % (
dictname, ))
with self.block("else:"):
self.emit("_statusstatus = _status.status")
with self.block("if _statusstatus == _status.NORMAL:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
with self.block("elif _statusstatus == _status.ERROR:"):
self.emit("raise BacktrackException(_status.error)")
if self.have_call:
with self.block(
"elif (_statusstatus == _status.INPROGRESS or\n"
" _statusstatus == _status.LEFTRECURSION):"):
self.emit("_status.status = _status.LEFTRECURSION")
with self.block("if _status.result is not None:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
with self.block("else:"):
self.emit("raise BacktrackException(None)")
with self.block(
"elif _statusstatus == _status.SOMESOLUTIONS:"):
self.emit("_status.status = _status.INPROGRESS")
self.emit("_startingpos = self._pos")
self.start_block("try:")
self.emit("_result = None")
self.emit("_error = None")
def memoize_footer(self, name, args):
dictname = "_dict_%s" % (name, )
if self.have_call:
with self.block(
"if _status.status == _status.LEFTRECURSION:"):
with self.block("if _status.result is not None:"):
with self.block("if _status.pos >= self._pos:"):
self.emit("_status.status = _status.NORMAL")
self.emit("self._pos = _status.pos")
self.emit("return _status")
self.emit("_status.pos = self._pos")
self.emit("_status.status = _status.SOMESOLUTIONS")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("self._pos = _startingpos")
self.emit("return self._%s(%s)" % (name, ', '.join(args)))
else:
self.emit("assert _status.status != _status.LEFTRECURSION")
self.emit("_status.status = _status.NORMAL")
self.emit("_status.pos = self._pos")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("return _status")
self.end_block("try")
with self.block("except BacktrackException, _exc:"):
self.emit("_status.pos = -1")
self.emit("_status.result = None")
self.combine_error('_exc.error')
self.emit("_status.error = _error")
self.emit("_status.status = _status.ERROR")
self.emit("raise BacktrackException(_error)")
def choice_point(self, name=None):
var = "_choice%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = self._pos" % (var, ))
return var
def revert(self, var):
self.emit("self._pos = %s" % (var, ))
def visit_list(self, t):
self.start_block("class Parser(object):")
for elt in t.children:
self.dispatch(elt)
with self.block("def __init__(self, inputstream):"):
for line in self.initcode:
self.emit(line)
self.emit("self._pos = 0")
self.emit("self._inputstream = inputstream")
if self.matchers:
self.emit_regex_code()
self.end_block("class")
def emit_regex_code(self):
for regex, matcher in self.matchers.iteritems():
with self.block(
"def _regex%s(self):" % (abs(hash(regex)), )):
c = self.choice_point()
self.emit("_runner = self._Runner(self._inputstream, self._pos)")
self.emit("_i = _runner.recognize_%s(self._pos)" % (
abs(hash(regex)), ))
self.start_block("if _runner.last_matched_state == -1:")
self.revert(c)
self.emit("raise BacktrackException")
self.end_block("if")
self.emit("_upto = _runner.last_matched_index + 1")
self.emit("_pos = self._pos")
self.emit("assert _pos >= 0")
self.emit("assert _upto >= 0")
self.emit("_result = self._inputstream[_pos: _upto]")
self.emit("self._pos = _upto")
self.emit("return _result")
with self.block("class _Runner(object):"):
with self.block("def __init__(self, text, pos):"):
self.emit("self.text = text")
self.emit("self.pos = pos")
self.emit("self.last_matched_state = -1")
self.emit("self.last_matched_index = -1")
self.emit("self.state = -1")
for regex, matcher in self.matchers.iteritems():
matcher = str(matcher).replace(
"def recognize(runner, i)",
"def recognize_%s(runner, i)" % (abs(hash(regex)), ))
self.emit(str(matcher))
def visit_production(self, t):
name = t.children[0]
if name in self.names:
raise Exception("name %s appears twice" % (name, ))
self.names[name] = True
otherargs = t.children[1].children
argswithself = ", ".join(["self"] + otherargs)
argswithoutself = ", ".join(otherargs)
with self.block("def %s(%s):" % (name, argswithself)):
self.emit("return self._%s(%s).result" % (name, argswithoutself))
self.start_block("def _%s(%s):" % (name, argswithself, ))
self.namecount = 0
self.resultname = "_result"
self.have_call = False
self.created_error = False
allother = self.store_code_away()
self.dispatch(t.children[-1])
subsequent = self.restore_code(allother)
self.memoize_header(name, otherargs)
self.add_code(subsequent)
self.memoize_footer(name, otherargs)
self.end_block("def")
def visit_or(self, t, first=False):
possibilities = t.children
if len(possibilities) > 1:
self.start_block("while 1:")
for i, p in enumerate(possibilities):
c = self.choice_point()
with self.block("try:"):
self.dispatch(p)
self.emit("break")
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
if i == len(possibilities) - 1:
self.emit("raise BacktrackException(_error)")
self.dispatch(possibilities[-1])
if len(possibilities) > 1:
self.emit("break")
self.end_block("while")
def visit_commands(self, t):
for elt in t.children:
self.dispatch(elt)
def visit_maybe(self, t):
c = self.choice_point()
with self.block("try:"):
self.dispatch(t.children[0])
with self.block("except BacktrackException:"):
self.revert(c)
def visit_repetition(self, t):
name = "_all%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = []" % (name, ))
if t.children[0] == '+':
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
with self.block("while 1:"):
c = self.choice_point()
with self.block("try:"):
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
self.emit("break")
self.emit("_result = %s" % (name, ))
def visit_exclusive(self, t):
self.resultname = "_enclosed"
self.dispatch(t.children[0])
self.emit("_enclosed = _result")
def visit_ignore(self, t):
resultname = "_before_discard%i" % (self.namecount, )
self.namecount += 1
self.emit("%s = _result" % (resultname, ))
self.dispatch(t.children[0])
self.emit("_result = %s" % (resultname, ))
def visit_negation(self, t):
c = self.choice_point()
resultname = "_stored_result%i" % (self.namecount, )
self.namecount += 1
child = t.children[0]
self.emit("%s = _result" % (resultname, ))
with self.block("try:"):
self.dispatch(child)
with self.block("except BacktrackException:"):
self.revert(c)
self.emit("_result = %s" % (resultname, ))
with self.block("else:"):
# heuristic to get nice error messages sometimes
if isinstance(child, Symbol) and child.symbol == "QUOTE":
error = "self._ErrorInformation(%s, ['NOT %s'])" % (
c, child.additional_info[1:-1], )
else:
error = "None"
self.emit("raise BacktrackException(%s)" % (error, ))
def visit_lookahead(self, t):
resultname = "_stored_result%i" % (self.namecount, )
self.emit("%s = _result" % (resultname, ))
c = self.choice_point()
self.dispatch(t.children[0])
self.revert(c)
self.emit("_result = %s" % (resultname, ))
def visit_named_command(self, t):
name = t.children[0]
self.dispatch(t.children[1])
self.emit("%s = _result" % (name, ))
def visit_return(self, t):
self.emit("_result = (%s)" % (t.children[0].additional_info[1:-1], ))
def visit_if(self, t):
if len(t.children) == 2:
self.dispatch(t.children[0])
with self.block("if not (%s):" % (
t.children[-1].additional_info[1:-1], )):
self.emit("raise BacktrackException(")
self.emit(" self._ErrorInformation(")
self.emit(" _startingpos, ['condition not met']))")
def visit_choose(self, t):
with self.block("for %s in (%s):" % (
t.children[0], t.children[1].additional_info[1:-1], )):
with self.block("try:"):
self.dispatch(t.children[2])
self.emit("break")
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
with self.block("else:"):
self.emit("raise BacktrackException(_error)")
def visit_call(self, t):
self.have_call = True
args = ", ".join(['(%s)' % (arg.additional_info[1:-1], )
for arg in t.children[1].children])
if t.children[0].startswith("_"):
callname = t.children[0]
self.emit("_result = self.%s(%s)" % (callname, args))
else:
callname = "_" + t.children[0]
self.emit("_call_status = self.%s(%s)" % (callname, args))
self.emit("_result = _call_status.result")
self.combine_error('_call_status.error')
def visit_REGEX(self, t):
r = t.additional_info[1:-1].replace('\\`', '`')
matcher = self.get_regex(r)
self.emit("_result = self._regex%s()" % (abs(hash(r)), ))
def visit_QUOTE(self, t):
self.emit("_result = self.__chars__(%r)" % (
str(t.additional_info[1:-1]), ))
def get_regex(self, r):
from rpython.rlib.parsing.regexparse import parse_regex
if r in self.matchers:
return self.matchers[r]
regex = parse_regex(r)
if regex is None:
raise ValueError(
"%s is not a valid regular expression" % regextext)
automaton = regex.make_automaton().make_deterministic()
automaton.optimize()
matcher = automaton.make_lexing_code()
self.matchers[r] = py.code.Source(matcher)
return matcher
def combine_error(self, newerror):
if self.created_error:
self.emit(
"_error = self._combine_errors(_error, %s)" % (newerror, ))
else:
self.emit("_error = %s" % (newerror, ))
self.created_error = True
class MetaPackratParser(type):
def __new__(cls, name_, bases, dct):
if '__doc__' not in dct or dct['__doc__'] is None:
return type.__new__(cls, name_, bases, dct)
from pypackrat import PyPackratSyntaxParser
import sys, new, inspect
frame = sys._getframe(1)
source = dct['__doc__']
p = PyPackratSyntaxParser(source)
try:
t = p.file()
except BacktrackException as exc:
print exc.error.nice_error_message("<docstring>", source)
lineno, _ = exc.error.get_line_column(source)
errorline = source.split("\n")[lineno]
try:
code = frame.f_code
source = inspect.getsource(code)
lineno_in_orig = source.split("\n").index(errorline)
if lineno_in_orig >= 0:
print "probable error position:"
print "file:", code.co_filename
print "line:", lineno_in_orig + code.co_firstlineno + 1
except (IOError, ValueError):
pass
raise exc
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
pcls = visitor.make_parser()
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in dct
#XXX XXX XXX
if 'BacktrackException' not in frame.f_globals:
raise Exception("must import BacktrackException")
if 'Status' not in frame.f_globals:
raise Exception("must import Status")
result = type.__new__(cls, name_, bases, dct)
for key, value in pcls.__dict__.iteritems():
if isinstance(value, type):
value.__module__ = result.__module__ #XXX help the annotator
if isinstance(value, type(lambda: None)):
value = new.function(value.func_code, frame.f_globals)
if not hasattr(result, key) and key not in forbidden:
setattr(result, key, value)
if result.__init__ == object.__init__:
result.__init__ = pcls.__dict__['__init__']
result.init_parser = pcls.__dict__['__init__']
result._code = visitor.get_code()
return result
class PackratParser(object):
__metaclass__ = MetaPackratParser
_ErrorInformation = ErrorInformation
_BacktrackException = BacktrackException
def __chars__(self, chars):
#print '__chars__(%s)' % (chars, ), self._pos
try:
for i in range(len(chars)):
if self._inputstream[self._pos + i] != chars[i]:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
self._pos += len(chars)
return chars
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
def __any__(self):
try:
result = self._inputstream[self._pos]
self._pos += 1
return result
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, ['anything']))
def _combine_errors(self, error1, error2):
if error1 is None:
return error2
if (error2 is None or error1.pos > error2.pos or
len(error2.expected) == 0):
return error1
elif error2.pos > error1.pos or len(error1.expected) == 0:
return error2
expected = []
already_there = {}
for ep in [error1.expected, error2.expected]:
for reason in ep:
if reason not in already_there:
already_there[reason] = True
expected.append(reason)
return ErrorInformation(error1.pos, expected)
def test_generate():
f = py.path.local(__file__).dirpath().join("pypackrat.py")
from pypackrat import PyPackratSyntaxParser
p = PyPackratSyntaxParser(syntax)
t = p.file()
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
code = visitor.get_code()
content = """
from rpython.rlib.parsing.tree import Nonterminal, Symbol
from makepackrat import PackratParser, BacktrackException, Status
%s
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.iteritems():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
""" % (code, )
print content
f.write(content)
| StarcoderdataPython |
3281468 | import os
import time
import numpy as np
import collections
import scipy
import scipy.sparse
import scipy.sparse.linalg
import scikits.sparse.cholmod
import sklearn.preprocessing
import hashlib
import types
import marshal
import pyublas
import cPickle as pickle
from collections import OrderedDict
from sigvisa.treegp.gp import GP, GPCov
from sigvisa.treegp.features import featurizer_from_string, recover_featurizer
from sigvisa.treegp.cover_tree import VectorTree
from sigvisa.models.spatial_regression.baseline_models import ParamModel
from sigvisa.utils.fileutils import mkdir_p
class LocalGPEnsemble(ParamModel):
def _build_local_gps(self, X, y, yvars, **kwargs):
cluster_distances = self.cluster_tree.kernel_matrix(pyublas.why_not(X),
pyublas.why_not(self.cluster_centers), True)
closest_cluster = np.argmin(cluster_distances, axis=1)
local_gps = []
sorted_X = []
sorted_y = []
sorted_yvars = []
for i, ctr in enumerate(self.cluster_centers):
cov = self.cluster_covs[i]
noise_var = self.cluster_noise_vars[i]
cluster_idxs = (closest_cluster == i)
cluster_X = X[cluster_idxs]
cluster_y = y[cluster_idxs]
cluster_yvars = yvars[cluster_idxs]
if len(cluster_y) == 0:
cluster_X = np.zeros((1, 5,))
cluster_y = np.zeros((1,))
cluster_yvars = np.ones((1,)) * 1e20
sorted_X.append(cluster_X)
sorted_y.append(cluster_y)
sorted_yvars.append(cluster_yvars)
lgp = GP(X=cluster_X, y=cluster_y, y_obs_variances=cluster_yvars,
cov_main=cov, noise_var=noise_var,
sort_events=False,
sparse_invert=False,
**kwargs)
local_gps.append(lgp)
sorted_X = np.vstack(sorted_X)
sorted_y = np.concatenate(sorted_y)
sorted_yvars = np.concatenate(sorted_yvars)
return local_gps, sorted_X, sorted_y, sorted_yvars
def __init__(self, X, y,
cluster_centers,
cluster_covs,
cluster_noise_vars,
yvars = None,
basis=None,
extract_dim = None,
prior_mean=None,
prior_cov=None,
featurizer_recovery=None,
**kwargs):
ParamModel.__init__(self, **kwargs)
self.cluster_centers = cluster_centers
self.cluster_covs = cluster_covs
self.cluster_noise_vars = cluster_noise_vars
self.cluster_metric = GPCov(wfn_str="se", dfn_str="lld", dfn_params=(1.0, 1.0), wfn_params=(1.0,))
self.cluster_tree = VectorTree(cluster_centers, 1, *self.cluster_metric.tree_params())
if yvars is None:
yvars = np.zeros(y.shape)
self.local_gps, self.X, self.y, self.yvars = self._build_local_gps(X, y, yvars,
compute_ll=(basis is None))
self.local_gp_cache = None
self.n = len(self.y)
self.basis = basis
self.extract_dim = extract_dim
self.prior_mean = prior_mean
self.prior_cov = prior_cov
self.featurizer_recovery = featurizer_recovery
# setup parametric features if needed
H = None
self.featurizer = None
self.featurizer_recovery = None
if featurizer_recovery is None:
if basis is not None:
H, self.featurizer, self.featurizer_recovery = featurizer_from_string(self.X, basis, extract_dim=extract_dim, transpose=True)
else:
self.featurizer, self.featurizer_recovery = recover_featurizer(basis, featurizer_recovery, transpose=True)
H = self.featurizer(self.X)
self.Kinv = scipy.sparse.block_diag([gp.Kinv for gp in self.local_gps])
self.L = scipy.sparse.block_diag([gp.L for gp in self.local_gps])
self.alpha = np.concatenate([gp.alpha_r.flatten() for gp in self.local_gps])
self.local_cumpts = np.cumsum([lgp.n for lgp in self.local_gps])
def build_low_rank_model(alpha, Kinv_sp, H, b, Binv):
"""
let n be the training size; we'll use an additional rank-m approximation.
the notation here follows section 2.7 in Rasmussen & Williams. For
simplicity, K refers to the observation covariance matrix rather than the
underlying function covariance (i.e. it might really be K+noise_var*I, or
K+diag(y_obs_variances), etc.)
takes:
alpha: n x 1, equal to K^-1 y
Kinv_sp: n x n sparse matrix, equal to K^-1
H: n x m features of training data (this is Qfu for FIC)
b: m x 1 prior mean on feature weights (this is 0 for FIC)
B: m x m prior covariance on feature weights (this is Quu for FIC)
returns:
invc = inv(chol(M)), where M = (B^-1 + H K^-1 H^T)^-1 is the
posterior covariance matrix on feature weights
beta_bar = M (HK^-1y + B^-1 b) gives the weights for the correction
of the low-rank component to the mean prediction
HKinv = HK^-1 comes up in the marginal likelihood computation, so we
go ahead and remember the value we compute now.
"""
# tmp = H * K^-1 * y + B^-1 * b
tmp = np.reshape(np.asarray(np.dot(H, alpha)), (-1,))
tmp += np.dot(Binv, b)
HKinv = H * Kinv_sp
M_inv = Binv + np.dot(HKinv, H.T)
c = scipy.linalg.cholesky(M_inv, lower=True)
beta_bar = scipy.linalg.cho_solve((c, True), tmp)
invc = scipy.linalg.inv(c)
return c, invc, beta_bar, HKinv
if self.basis is None:
self.n_features = 0
else:
self.n_features = len(self.prior_mean)
b = self.prior_mean
Binv = np.linalg.inv(self.prior_cov)
self.c, self.invc,self.beta_bar, self.HKinv = build_low_rank_model(self.alpha,
self.Kinv,
H, b, Binv)
self.z = np.dot(H.T, b) - self.y
def _x_to_cluster(self, X1):
dists = self.cluster_tree.kernel_matrix(X1, self.cluster_centers, True)
return np.argmin(dists, axis=1)
def param_mean(self):
try:
return self.beta_bar
except:
return np.zeros((0,))
def param_covariance(self, chol=False):
if chol:
return self.invc
else:
return np.dot(self.invc.T, self.invc)
def get_data_features(self, X):
# compute the full set of features for a matrix X of test points
features = np.zeros((self.n_features, X.shape[0]))
i = 0
if self.featurizer is not None:
F = self.featurizer(X)
i = F.shape[0]
features[:i,:] = F
return features
def sample(self, cond, include_obs=True, **kwargs):
mean = self.predict(cond)
variance = self.variance(cond, include_obs=include_obs)
return np.random.randn() * np.sqrt(variance) + mean
def log_p(self, x, cond, include_obs=True, **kwargs):
y = float(x)
mean = float(self.predict(cond))
variance = float(self.variance(cond, include_obs=include_obs))
return - .5 * ((y-mean)**2 / variance + np.log(2*np.pi*variance) )
def force_load_localgps(self):
self.cache_capacity = len(self.local_gps)
for i in range(len(self.local_gps)):
self.local_gps[i] = self.get_local_gp(i)
def get_local_gp(self, idx):
# note there's no real reason for a separate self.local_gps
# and self.local_gp_cache, so self.local_gps could probably be
# eliminated.
idx = int(idx)
if self.local_gps[idx] is None:
fname = os.path.join(self.lazyload_localgp_dir, "local%03d.gp" % idx)
lgp = GP(fname=fname, sparse_invert=True)
self.local_gps[idx] = lgp
if self.local_gp_cache is not None:
# if needed, evict oldest from cache, and delete from self.local_gps
if len(self.local_gp_cache) >= self.cache_capacity:
k, v = self.local_gp_cache.popitem(last=False)
self.local_gps[k] = None
nloaded = len(self.local_gp_cache)
#print "loaded lgp %s, total loaded %d" % (fname, nloaded)
if self.local_gp_cache is not None:
if idx in self.local_gp_cache:
_ = self.local_gp_cache.pop(idx)
self.local_gp_cache[idx] = self.local_gps[idx]
return self.local_gps[idx]
def predict(self, cond, **kwargs):
# TODO: cache features and R between predict and variance calls...
X1 = self.standardize_input_array(cond).astype(np.float)
cluster_idx = self._x_to_cluster(X1)
lgp = self.get_local_gp(cluster_idx)
gp_pred = float(lgp.predict(X1))
if self.n_features > 0:
query_K = lgp.get_query_K(X1, no_R=True)
H = self.get_data_features(X1)
k = self.local_cumpts[cluster_idx-1] if cluster_idx > 0 else 0
local_HKinv = np.matrix(self.HKinv[:, k:k+lgp.n])
R = H - local_HKinv * query_K
gp_pred += float(np.dot(R.T, self.beta_bar))
return gp_pred
def variance(self, cond, **kwargs):
X1 = self.standardize_input_array(cond).astype(np.float)
assert(X1.shape[0] == 1)
cluster_idx = self._x_to_cluster(X1)
lgp = self.get_local_gp(cluster_idx)
gp_variance = float(lgp.variance(X1, **kwargs))
if self.n_features > 0:
query_K = lgp.get_query_K(X1, no_R=True)
H = self.get_data_features(X1)
k = self.local_cumpts[cluster_idx-1] if cluster_idx > 0 else 0
local_HKinv = np.matrix(self.HKinv[:, k:k+lgp.n])
R = H - local_HKinv * query_K
tmp = np.dot(self.invc, R)
mean_cov = np.dot(tmp.T, tmp)
gp_variance += float(mean_cov)
return gp_variance
def log_likelihood(self):
self.force_load_localgps()
if self.n_features == 0:
ll = np.sum([gp.log_likelihood() for gp in self.local_gps])
return ll
Kinv = self.Kinv
z = self.z
tmp1 = Kinv * z
term1 = np.dot(z.T, tmp1)
tmp2 = np.dot(self.HKinv, z)
tmp3 = np.dot(self.invc, tmp2)
term2 = np.dot(tmp3.T, tmp3)
# following eqn 2.43 in R&W, we want to compute
# log det(K + H.T * B * H). using the matrix inversion
# lemma, we instead compute
# log det(K) + log det(B) + log det(B^-1 + H*K^-1*H.T)
# to compute log(det(K)), we use the trick that the
# determinant of a symmetric pos. def. matrix is the
# product of squares of the diagonal elements of the
# Cholesky factor
ldiag = self.L.diagonal()
ld2_K = np.log(ldiag).sum()
ld2 = np.log(np.diag(self.c)).sum() # det( B^-1 - H * K^-1 * H.T )
ld_B = np.linalg.slogdet(self.prior_cov)[1]
# eqn 2.43 in R&W, using the matrix inv lemma
ll = -.5 * (term1 - term2 + self.n * np.log(2*np.pi) + ld_B) - ld2_K - ld2
return ll
def log_likelihood_gradient(self):
nparams = np.sum([len(c.flatten())+1 for c in self.cluster_covs])
grad = np.zeros((nparams,))
if self.n_features > 0:
tmp = np.dot(self.invc, self.HKinv)
K_HBH_inv = self.Kinv - np.dot(tmp.T, tmp)
alpha = np.matrix(np.reshape(np.dot(K_HBH_inv, self.z), (-1, 1)))
M = np.matrix(K_HBH_inv)
else:
M = self.Kinv.todense()
alpha = self.alpha
npts = 0
nparams = 0
self.force_load_localgps()
for k, lgp in enumerate(self.local_gps):
lgp.distance_cache_XX = lgp.predict_tree.kernel_matrix(lgp.X, lgp.X, True)
n_main_params = len(lgp.cov_main.flatten())
local_alpha = alpha[npts:npts+lgp.n]
local_M = M[npts:npts+lgp.n][:, npts:npts+lgp.n]
npts += lgp.n
for i in range(n_main_params+1):
dKdi = lgp.get_dKdi_dense(i, n_main_params, 0)
dlldi = .5 * np.dot(local_alpha.T, np.dot(dKdi, local_alpha))
# here we use the fact:
# trace(AB) = sum_{ij} A_ij * B_ij
dlldi -= .5 * np.sum(np.sum(np.multiply(local_M.T, dKdi)))
grad[nparams ] = dlldi
nparams += 1
return grad
def get_flat_params(self):
params = []
self.force_load_localgps()
for lgp in self.local_gps:
params.append(lgp.noise_var)
params += list(lgp.cov_main.flatten())
return params
def __getstate__(self):
d = self.__dict__.copy()
del d["cluster_tree"]
n = len(self.local_gps)
d['local_gps'] = [None,] * n
try:
del d["Kinv"]
except:
pass
try:
del d["L"]
except:
pass
try:
del d["featurizer"]
except:
pass
return d
def __setstate__(self, d):
self.__dict__ = d
self.local_gp_cache = OrderedDict()
self.cluster_tree = VectorTree(self.cluster_centers, 1, *self.cluster_metric.tree_params())
if self.basis is not None:
self.featurizer, self.featurizer_recovery = recover_featurizer(self.basis, self.featurizer_recovery, transpose=True)
else:
self.featurizer = None
self.featurizer_recovery = None
def save_trained_model(self, fname):
mkdir_p(fname)
for i, lgp in enumerate(self.local_gps):
local_fname = os.path.join(fname, "local%03d.gp" % i)
lgp.save_trained_model(local_fname, tight=True)
with open(os.path.join(fname, "main.pkl"), "wb") as f:
pickle.dump(self, f)
def load_lgp_ensemble(fname, cache_capacity=15):
with open(os.path.join(fname, "main.pkl"), "rb") as f:
lgp = pickle.load(f)
lgp.lazyload_localgp_dir = fname
lgp.cache_capacity = cache_capacity
#lgp.force_load_localgps()
return lgp
def optimize_localgp_hyperparams(noise_prior=None,
cov_main=None,
cluster_centers=None,
y_list = None,
yvars_list = None,
force_unit_var=False,
**kwargs):
n_clusters = len(cluster_centers)
n_wfn = len(cov_main.wfn_params)
n_dfn = len(cov_main.dfn_params)
nparams = 1 + n_wfn + n_dfn
nparams *= n_clusters
if y_list is None:
y_list = [kwargs["y"],]
del kwargs["y"]
if yvars_list is None:
if "yvars" in kwargs:
yvars_list = [kwargs["yvars"]]
del kwargs["yvars"]
else:
yvars_list = [None,] * len(y_list)
def expand_reduced_params(rparams):
# given a set of params that includes only the signal/noise
# ratio, expand to the full parameterization assuming unit
# total variance.
# standard param order:
# noise var, signal var, lscale horiz, lscale depth
params = []
for i in range(0, len(rparams), 3):
# ratio = nv/sv = nv / (1-nv)
ratio10 = rparams[i]
ratio = ratio10 / 10.0
nv = ratio / (1.+ratio)
if nv == 1.0:
nv = 1.-1e-10
elif nv == 0.0:
nv = 1e-10
sv = 1.0-nv
lscale_horiz = rparams[i+1]
lscale_depth = rparams[i+2]
params.append(nv)
params.append(sv)
params.append(lscale_horiz)
params.append(lscale_depth)
return np.array(params)
def reduce_params(params):
rparams = []
for i in range(0, len(params), 4):
# ratio = nv/sv = nv / (1-nv)
nv = params[i]
sv = params[i+1]
ratio = nv/sv
ratio10 = ratio * 10
lscale_horiz = params[i+2]
lscale_depth = params[i+3]
rparams.append(ratio10)
rparams.append(lscale_horiz)
rparams.append(lscale_depth)
return np.array(rparams)
def grad_reduced_params(gfull, params):
rgrad = []
for i in range(0, len(gfull), 4):
d_nv = gfull[i]
d_sv = gfull[i+1]
d_lhoriz = gfull[i+2]
d_ldepth = gfull[i+3]
nv = params[i]
sv = params[i+1]
ratio = nv/sv
# dll_dratio = dll_dnv dnv_dratio + dll_dsv dsv_dratio
d_ratio = d_nv * 1./(ratio+1.)**2 + d_sv * -1. / (ratio+1.)**2
d_ratio10 = d_ratio / 10.0
rgrad.append(d_ratio10)
rgrad.append(d_lhoriz)
rgrad.append(d_ldepth)
return np.array(rgrad)
def covs_from_vector(params):
covs = []
noise_vars = []
k = 0
for c in cluster_centers:
new_cov = cov_main.copy()
nv = params[k]
k += 1
new_cov.wfn_params = np.array(params[k:k+n_wfn])
k += n_wfn
new_cov.dfn_params = np.array(params[k:k+n_dfn])
k += n_dfn
covs.append(new_cov)
noise_vars.append(nv)
return covs, noise_vars
def nllgrad(v):
if not np.all(np.isfinite(v)):
return np.float('inf'), np.zeros(v.shape)
try:
expv = np.exp(v)
if force_unit_var:
expv = expand_reduced_params(expv)
cluster_covs, cluster_noise_vars = covs_from_vector(expv)
grad_expv = np.zeros(expv.shape)
ll = 0.0
for i, (y, yvars) in enumerate(zip(y_list, yvars_list)):
lgps = LocalGPEnsemble(cluster_centers=cluster_centers,
cluster_covs=cluster_covs,
cluster_noise_vars=cluster_noise_vars,
y=y, yvars=yvars, **kwargs)
param_ll = lgps.log_likelihood()
ll += param_ll
grad_expv += lgps.log_likelihood_gradient()
del lgps
prior_grad = []
priorll = 0.0
for i, cc in enumerate(cluster_centers):
priorll += noise_prior.log_p(cluster_noise_vars[i])
priorll += cluster_covs[i].prior_logp()
prior_grad.append(noise_prior.deriv_log_p(cluster_noise_vars[i]))
prior_grad += list(cluster_covs[i].prior_grad())
prior_grad = np.array(prior_grad)
grad_expv += prior_grad
ll += priorll
if force_unit_var:
grad_expv = grad_reduced_params(grad_expv, expv)
grad_v = grad_expv * np.exp(v)
#print "expv", expv, "ll", ll
if np.isinf(ll):
import pdb; pdb.set_trace()
if np.isinf(np.sum(grad_v)):
import pdb; pdb.set_trace()
if np.isnan(grad_v).any():
print "warning: nans in gradient", grad_v
grad_v[np.isnan(grad_v)] = 0.0
ll = min(ll, -1e100)
except FloatingPointError as e:
print "warning: floating point error (%s) in likelihood computation, returning likelihood -inf" % str(e)
ll = np.float("-inf")
grad_v = np.zeros((len(v),))
except np.linalg.linalg.LinAlgError as e:
print "warning: lin alg error (%s) in likelihood computation, returning likelihood -inf" % str(e)
ll = np.float("-inf")
grad_v = np.zeros((len(v),))
except scikits.sparse.cholmod.CholmodError as e:
print "warning: cholmod error (%s) in likelihood computation, returning likelihood -inf" % str(e)
ll = np.float("-inf")
grad_v = np.zeros((len(v),))
#except ValueError as e:
# print "warning: value error (%s) in likelihood computation, returning likelihood -inf" % str(e)
# ll = np.float("-inf")
# grad = np.zeros((len(v),))
#print "hyperparams", v, "ll", ll, 'grad', grad
return -1 * ll, (-1 * grad_v if grad_v is not None else None)
def build_gp(v, **kwargs2):
expv = np.exp(v)
if force_unit_var:
expv = expand_reduced_params(expv)
cluster_covs, cluster_noise_vars = covs_from_vector(expv)
kw = dict(kwargs.items() + kwargs2.items())
gps = []
for (y, yvars) in zip(y_list, yvars_list):
gp = LocalGPEnsemble(cluster_centers=cluster_centers,
cluster_noise_vars=cluster_noise_vars,
cluster_covs=cluster_covs,
y=y, yvars=yvars, **kw)
gps.append(gp)
if len(gps) == 1:
return gp
else:
return gps
noise_var_default = noise_prior.predict()
if force_unit_var:
x0 = np.concatenate([[0.4, 0.6,] + list(cov_main.flatten())[1:] for i in range(n_clusters)])
x0 = reduce_params(x0)
else:
x0 = np.concatenate([[noise_var_default,] + list(cov_main.flatten()) for i in range(n_clusters)])
x0 = np.log(x0)
return nllgrad, x0, build_gp, covs_from_vector
| StarcoderdataPython |
1696422 | <filename>reporting/hardware_management.py
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import subprocess
import sys
import sensors #only needed for ubuntu
root_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(root_path[0:root_path.find("/thirtybirds")])
from thirtybirds3.reporting.exceptions import capture_exceptions
<EMAIL>
class Hardware_Management():
def __init__(
self,
os
):
self.os_name = os["name"]
self.os_version = os["version"]
def get_core_temp(self):
if self.os_name == "ubuntu":
sensors.init()
max_temp = 0
sensors.init()
for chip in sensors.iter_detected_chips():
for feature in chip:
if "temp" in feature.label:
core_temp = int(feature.get_value())
if core_temp > max_temp:
max_temp = core_temp
sensors.cleanup()
return max_temp
if self.os_name == "raspbian":
process = subprocess.run("/opt/vc/bin/vcgencmd measure_temp", shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
return process.stdout[1][5:-2]
def get_wifi_strength(self):
process = subprocess.run('iwconfig', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
lines_from_bash_str = process.stdout
lines_from_bash_l = lines_from_bash_str.split("\n")
for line in lines_from_bash_l:
try:
start_postion = line.index("Link Quality")
return int(line.split("Link Quality=")[1][:2])
except ValueError:
pass
return -1
def get_core_voltage(self):
try:
return float(commands.getstatusoutput("/opt/vc/bin/vcgencmd measure_volts core")[1])
# ^ not formatted yet
except Exception:
return False
def get_system_cpu(self):
return [x / os.cpu_count() * 100 for x in os.getloadavg()][-1]
def get_system_uptime(self):
process = subprocess.run('uptime -s', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
return process.stdout.strip()
def get_system_disk(self):
disk_usage = shutil.disk_usage("/")
return [disk_usage.free,disk_usage.total]
def get_memory_free(self):
"""
returns free memory in bytes
"""
process = subprocess.run('cat /proc/meminfo', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
lines_from_bash_str = process.stdout
lines_from_bash_l = lines_from_bash_str.split("\n")
for line in lines_from_bash_l:
if line.startswith("MemFree:"):
line_l_free = line.split()
kb_free = float(line_l_free[1])
mb_free = kb_free*1000.0
if line.startswith("MemTotal:"):
line_l_total = line.split()
kb_total = float(line_l_total[1])
mb_total = kb_total*1000.0
return [mb_free,mb_total]
def get_system_status(self):
report = {
"system_uptime":self.get_system_uptime(),
"system_cpu":self.get_system_cpu(),
"memory_free":self.get_memory_free(),
"system_disk":self.get_system_disk(),
"core_temp":self.get_core_temp(),
"os_version":[self.os_name,self.os_version],
"wifi_strength":0 #self.get_wifi_strength()
}
return report
def restart(self):
process = subprocess.run('/usr/bin/sudo /sbin/shutdown -r now', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
lines_from_bash_str = process.stdout
return True
def shutdown(self):
process = subprocess.run('/usr/bin/sudo /sbin/shutdown now', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
lines_from_bash_str = process.stdout
return True
| StarcoderdataPython |
110740 | <gh_stars>10-100
# Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import argparse
import json
import logging
import pickle
import re
from collections import Counter, defaultdict
from pyrocov.align import AlignDB
from pyrocov.util import open_tqdm
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(relativeCreated) 9d %(message)s", level=logging.INFO)
def process_row(
id_to_lineage,
mutation_counts,
status_counts,
accession_id,
row,
):
# Check whether row is valid
lineage = row["lineage"]
status = row["qc.overallStatus"]
status_counts[lineage][status] += 1
if status != "good":
id_to_lineage[accession_id] = None
return
# Collect stats on a single lineage.
id_to_lineage[accession_id] = lineage, row["lineages"], row["clade"], row["clades"]
mutation_counts = mutation_counts[lineage]
mutation_counts[None] += 1 # hack to count number of lineages
for col in ["aaSubstitutions", "aaDeletions"]:
ms = row[col]
if not ms:
continue
ms = ms.split(",")
mutation_counts.update(ms)
# Add within-gene pairs of mutations.
by_gene = defaultdict(list)
for m in ms:
g, m = m.split(":")
by_gene[g].append(m)
for g, ms in by_gene.items():
# Sort by position, then alphabetical.
ms.sort(key=lambda m: (int(re.search(r"\d+", m).group(0)), m))
for i, m1 in enumerate(ms):
for m2 in ms[i + 1 :]:
mutation_counts[f"{g}:{m1},{m2}"] += 1
def main(args):
# Load the filtered accession ids.
logger.info(f"Loading {args.columns_file_in}")
with open(args.columns_file_in, "rb") as f:
columns = pickle.load(f)
id_to_lineage = {aid: None for aid in columns["accession_id"]}
del columns
# Count mutations via nextclade.
# This is batched and cached under the hood.
logger.info(f"Loading {args.gisaid_file_in}")
mutation_counts = defaultdict(Counter)
status_counts = defaultdict(Counter)
db = AlignDB()
for i, line in enumerate(open_tqdm(args.gisaid_file_in, "rt")):
if args.truncate and args.truncate <= i:
break
datum = json.loads(line)
# Filter to desired sequences.
accession_id = datum["covv_accession_id"]
if accession_id not in id_to_lineage:
continue
# Schedule sequence for alignment.
seq = datum["sequence"].replace("\n", "")
db.schedule(
seq,
process_row,
id_to_lineage,
mutation_counts,
status_counts,
accession_id,
)
db.wait()
message = ["Total quality:"]
counts = Counter()
for c in status_counts.values():
counts.update(c)
for s, c in counts.most_common():
message.append(f"{s}: {c}")
logger.info("\n\t".join(message))
message = ["Lineages with fewest good samples:"]
for c, l in sorted((c["good"], l) for l, c in status_counts.items())[:20]:
message.append(f"{l}: {c}")
logger.info("\n\t".join(message))
# Update columns with usher-computed lineages.
with open(args.columns_file_in, "rb") as f:
old_columns = pickle.load(f)
del old_columns["lineage"]
columns = defaultdict(list)
for row in zip(*old_columns.values()):
row = dict(zip(old_columns, row))
llcc = id_to_lineage.get(row["accession_id"])
if llcc is None:
continue # drop the row
lineage, lineages, clade, clades = llcc
columns["clade"].append(clade)
columns["clades"].append(clades)
columns["lineage"].append(lineage)
columns["lineages"].append(lineages)
for k, v in row.items():
columns[k].append(v)
del old_columns
columns = dict(columns)
with open(args.columns_file_out, "wb") as f:
pickle.dump(columns, f)
# Collect a set of all single mutations observed in this subsample.
agg_counts = Counter()
for ms in mutation_counts.values():
for m, count in ms.items():
if m is not None and "," not in m:
agg_counts[m] += count
logger.info(f"saving {args.counts_file_out}")
with open(args.counts_file_out, "wb") as f:
pickle.dump(dict(agg_counts), f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Featurize nextclade mutations")
parser.add_argument("--gisaid-file-in", default="results/gisaid.json")
parser.add_argument("--columns-file-in", default="results/gisaid.columns.pkl")
parser.add_argument("--columns-file-out", default="results/usher.columns.pkl")
parser.add_argument("--counts-file-out", default="results/nextclade.counts.pkl")
parser.add_argument("--truncate", type=int)
args = parser.parse_args()
main(args)
| StarcoderdataPython |
3303617 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a single project with specified service accounts and APIs enabled."""
import copy
import sys
import random
def GenerateConfig(context):
"""Generates config."""
project_id = ""
if "project-name" in context.properties and context.properties["project-name"] is not None and context.properties["project-name"] != "":
project_id = context.properties["project-name"]
else:
project_id = context.env["name"]
billing_name = 'billing_' + project_id
if not IsProjectParentValid(context.properties):
sys.exit('Invalid [organization-id, parent-folder-id], must specify exactly one.')
parent_type = ''
parent_id = ''
if "organization-id" in context.properties and context.properties["organization-id"] is not None and context.properties["organization-id"] != "":
parent_type = "organization"
parent_id = context.properties["organization-id"]
else:
parent_type = "folder"
parent_id = context.properties["folder-id"]
resources = [
{
'name': project_id,
'type': 'cloudresourcemanager.v1.project',
'properties': {
'name': project_id,
'projectId': project_id,
'parent': {
'type': parent_type,
'id': parent_id
}
},
'accessControl': {
'gcpIamPolicy': MergeCallingServiceAccountWithOwnerPermissionsIntoBindings(context.env, context.properties)
}
},
{
'name': billing_name,
'type': 'deploymentmanager.v2.virtual.projectBillingInfo',
'properties': {
'name': 'projects/' + project_id,
'billingAccountName': "billingAccounts/" + context.properties['billing']
},
'metadata': {
'dependsOn': [
project_id
]
}
}
]
if "apis" in context.properties and context.properties["apis"] is not None and len(context.properties["apis"]) > 0:
resources.append(
{
'name': 'apis',
'type': 'api.py',
'properties': {
'project': project_id,
'billing': billing_name,
'apis': context.properties['apis'],
'concurrent-api-activation': context.properties['concurrent-api-activation']
},
'metadata': {
'dependsOn': [
project_id
]
}
}
)
if "service-accounts" in context.properties and context.properties["service-accounts"] is not None and len(context.properties["service-accounts"]) > 0:
sas = []
for sa in context.properties["service-accounts"]:
sas.append({
"name" : sa,
"displayName": sa,
"createKey": False
})
resources.append(
{
'name': 'service-accounts',
'type': 'sa.py',
'properties': {
'project': project_id,
'service-accounts': sas
},
'metadata': {
'dependsOn': [
project_id
]
}
}
)
if "project-owner" in context.properties and context.properties["project-owner"] is not None and context.properties["project-owner"] != "":
# Get the current IAM policy and then add the specified account as the owner
get_iam_policy_name = 'get-iam-policy'
resources.extend(
[
{
"name": get_iam_policy_name,
"action": "gcp-types/cloudresourcemanager-v1:cloudresourcemanager.projects.getIamPolicy",
"properties": {
"resource": project_id
},
"metadata": {
"dependsOn" : [
project_id,
ApiResourceName(project_id, "deploymentmanager.googleapis.com")
],
"runtimePolicy": [
"UPDATE_ALWAYS"
]
}
},
{
"name": "patch-iam-policy",
"action": "gcp-types/cloudresourcemanager-v1:cloudresourcemanager.projects.setIamPolicy",
"properties": {
"resource": project_id,
"policy": "$(ref." + get_iam_policy_name + ")",
"gcpIamPolicyPatch": {
"add": [{
"role": "roles/owner",
"members": [
context.properties["project-owner"]
]
}]
}
}
}
])
if "set-dm-service-account-as-owner" in context.properties and context.properties["set-dm-service-account-as-owner"] is not None and context.properties["set-dm-service-account-as-owner"] == True:
# Get the current IAM policy and then add the specified account as the owner
get_iam_policy_name_dm = 'get-iam-policy-dm'
set_iam_policy_name_dm = 'patch-iam-policy-dm'
resources.extend(
[
{
"name": get_iam_policy_name_dm,
"action": "gcp-types/cloudresourcemanager-v1:cloudresourcemanager.projects.getIamPolicy",
"properties": {
"resource": project_id
},
"metadata": {
"dependsOn": [
project_id,
],
"runtimePolicy": [
"UPDATE_ALWAYS"
]
}
},
{
"name": set_iam_policy_name_dm,
"action": "gcp-types/cloudresourcemanager-v1:cloudresourcemanager.projects.setIamPolicy",
"properties": {
"resource": project_id,
"policy": "$(ref." + get_iam_policy_name_dm + ")",
"gcpIamPolicyPatch": {
"add": [{
"role": "roles/owner",
"members": [
"serviceAccount:" + context.env["project_number"] + "@cloudservices.gserviceaccount.com"
]
}]
}
}
}
])
if 'bucket-export-settings' in context.properties and context.properties["bucket-export-settings"] is not None:
bucket_name = None
action_dependency = [project_id, ApiResourceName(project_id, 'compute.googleapis.com')]
if context.properties['bucket-export-settings'].get('create-bucket'):
bucket_name = project_id + '-export-bucket'
resources.append(
{
'name': bucket_name,
'type': 'gcp-types/storage-v1:buckets',
'properties': {
'project': project_id,
'name': bucket_name
},
'metadata': {
'dependsOn': [
project_id,
ApiResourceName(project_id, 'storage-component.googleapis.com')
]
}
}
)
action_dependency.append(bucket_name)
else:
bucket_name = context.properties['bucket-export-settings']['bucket-name']
resources.append(
{
'name': 'set-export-bucket',
'action': 'gcp-types/compute-v1:compute.projects.setUsageExportBucket',
'properties': {
'project': project_id,
'bucketName': 'gs://' + bucket_name
},
'metadata': {
'dependsOn': action_dependency
}
}
)
return {'resources': resources}
def MergeCallingServiceAccountWithOwnerPermissionsIntoBindings(env, properties):
"""
A helper function that merges the acting service account of the project
creator as an owner of the project being created
"""
service_account = ('service<EMAIL>:{<EMAIL>'.format(env['project_number']))
set_creator_sa_as_owner = {
'role': 'roles/owner',
'members': [
service_account,
]
}
if 'iam-policy' not in properties:
return {
'bindings': [
set_creator_sa_as_owner,
]
}
iam_policy = copy.deepcopy(properties['iam-policy'])
bindings = []
if 'bindings' in iam_policy:
bindings = iam_policy['bindings']
else:
iam_policy['bindings'] = bindings
merged = False
for binding in bindings:
if binding['role'] == 'roles/owner':
merged = True
if service_account not in binding['members']:
binding['members'].append(service_account)
break
if not merged:
bindings.append(set_creator_sa_as_owner)
return iam_policy
def ApiResourceName(project_id, api_name):
return project_id + '-' + api_name
def IsProjectParentValid(properties):
""" A helper function to validate that the project is either under a folder
or under an organization and not both
"""
# Neither specified
if "organization-id" not in properties and "folder-id" not in properties:
return False
# Both specified
elif "organization-id" in properties and properties["organization-id"] is not None and properties["organization-id"] != "" and "folder-id" in properties and properties["folder-id"] is not None and properties["folder-id"] != "":
return False
# Both specified and both empty
elif "organization-id" in properties and (properties["organization-id"] is None or properties["organization-id"] == "") and "folder-id" in properties and (properties["folder-id"] is None or properties["folder-id"] == ""):
return False
else:
return True | StarcoderdataPython |
52275 | import re
from django.urls import path
from render_static.tests.views import TestView
class Unrecognized:
regex = re.compile('Im not normal')
class NotAPattern:
pass
urlpatterns = [
path('test/simple/', TestView.as_view(), name='bad'),
NotAPattern()
]
urlpatterns[0].pattern = Unrecognized()
| StarcoderdataPython |
1702473 | <filename>pipeline.py<gh_stars>1-10
from typing import Dict, Iterable, List, Tuple, Any, Union
from PyMongoWrapper import MongoResultSet
from concurrent.futures import ThreadPoolExecutor
from models import Paragraph
class PipelineStage:
"""
流程各阶段。返回应同样为一个段落记录(可以不与数据库中的一致)。
注意,针对段落的处理可能是并发进行的。
"""
def resolve(self, p : Paragraph) -> Paragraph:
return p
def summarize(self, r) -> Any:
return r
class Pipeline:
pipeline_ctx = None
def __init__(self, stages : List[Union[Tuple[str, Dict], List, Dict, PipelineStage]], concurrent : int, resume_next : bool):
"""
Args:
stages: 表示各阶段的 Tuple[<名称>, <配置>], List[<名称>, <配置>], 形如 {$<名称> : <配置>} 的参数所构成的列表,或直接由已初始化好的 PipelineStage
concurrent (int): 并发运行的数量
resume_next (bool): 当某个处理阶段发生错误时是否继续
"""
self.stages = []
if stages:
for stage in stages:
if isinstance(stage, dict):
(name, kwargs), = stage.items()
if name.startswith('$'): name = name[1:]
stage = (name, kwargs)
if isinstance(stage, (tuple, list)) and len(stage) == 2 and Pipeline.pipeline_ctx:
name, kwargs = stage
stage = Pipeline.pipeline_ctx[name](**kwargs)
self.stages.append(stage)
self.concurrent = concurrent
self.resume_next = resume_next
def apply(self, p : Paragraph):
if not self.stages:
return p
for stage in self.stages:
stage.logger = lambda *args: self.logger(stage.__class__.__name__, *args)
try:
p = stage.resolve(p)
if not p: return
except Exception as ex:
if not self.resume_next:
raise ex
return p
def applyParagraphs(self, rs : Union[MongoResultSet, Iterable[Paragraph]]):
"""
处理段落
Args:
rs (MongoResultSet | Iterable[Paragraph]): 要处理的各个 Paragraph
"""
if not self.stages:
return rs
if self.concurrent > 1:
def _update_and_do(p):
p = self.apply(p)
return p
with ThreadPoolExecutor(max_workers=self.concurrent) as te:
r = te.map(_update_and_do, rs)
return [_ for _ in r if _]
else:
def _seq():
for p in rs:
p = self.apply(p)
if p: yield p
return _seq()
def summarize(self):
"""
Reduce 阶段
"""
returned = None
for stage in self.stages:
returned = stage.summarize(returned)
return returned
| StarcoderdataPython |
4811809 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Implementing DL models for ranking, with evaluation benchmarks',
author='ajea',
license='MIT',
)
| StarcoderdataPython |
137815 | <reponame>tws0002/anima
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2018, <NAME>
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
from anima import logger
from anima.ui.lib import QtGui, QtCore
def set_item_color(item, color):
"""sets the item color
:param item: the item
:param color: the color
"""
foreground = item.foreground()
foreground.setColor(color)
item.setForeground(foreground)
class VersionItem(QtGui.QStandardItem):
"""Implements the Version as a QStandardItem
"""
def __init__(self, *args, **kwargs):
QtGui.QStandardItem.__init__(self, *args, **kwargs)
logger.debug(
'VersionItem.__init__() is started for item: %s' % self.text()
)
self.loaded = False
self.version = None
self.parent = None
self.pseudo_model = None
self.fetched_all = False
self.setEditable(False)
logger.debug(
'VersionItem.__init__() is finished for item: %s' % self.text()
)
def clone(self):
"""returns a copy of this item
"""
logger.debug(
'VersionItem.clone() is started for item: %s' % self.text()
)
new_item = VersionItem()
new_item.version = self.version
new_item.parent = self.parent
new_item.fetched_all = self.fetched_all
logger.debug(
'VersionItem.clone() is finished for item: %s' % self.text()
)
return new_item
def canFetchMore(self):
logger.debug(
'VersionItem.canFetchMore() is started for item: %s' % self.text()
)
if self.version and not self.fetched_all:
return_value = bool(self.version.inputs)
else:
return_value = False
logger.debug(
'VersionItem.canFetchMore() is finished for item: %s' % self.text()
)
return return_value
@classmethod
def generate_version_row(cls, parent, pseudo_model, version):
"""Generates a new version row
:return:
"""
# column 0
version_item = VersionItem(0, 0)
version_item.parent = parent
version_item.pseudo_model = pseudo_model
version_item.version = version
version_item.setEditable(False)
reference_resolution = pseudo_model.reference_resolution
if version in reference_resolution['update']:
action = 'update'
font_color = QtGui.QColor(192, 128, 0)
if version in reference_resolution['root']:
version_item.setCheckable(True)
version_item.setCheckState(QtCore.Qt.Checked)
elif version in reference_resolution['create']:
action = 'create'
font_color = QtGui.QColor(192, 0, 0)
if version in reference_resolution['root']:
version_item.setCheckable(True)
version_item.setCheckState(QtCore.Qt.Checked)
else:
font_color = QtGui.QColor(0, 192, 0)
action = ''
version_item.action = action
set_item_color(version_item, font_color)
# thumbnail
thumbnail_item = QtGui.QStandardItem()
thumbnail_item.setEditable(False)
# thumbnail_item.setText('no thumbnail')
thumbnail_item.version = version
thumbnail_item.action = action
set_item_color(thumbnail_item, font_color)
# Nice Name
nice_name_item = QtGui.QStandardItem()
nice_name_item.toolTip()
nice_name_item.setText(
'%s_v%s' % (
version.nice_name,
('%s' % version.version_number).zfill(3)
)
)
nice_name_item.setEditable(False)
nice_name_item.version = version
nice_name_item.action = action
set_item_color(nice_name_item, font_color)
# Take
take_item = QtGui.QStandardItem()
take_item.setEditable(False)
take_item.setText(version.take_name)
take_item.version = version
take_item.action = action
set_item_color(take_item, font_color)
# Current
current_version_item = QtGui.QStandardItem()
current_version_item.setText('%s' % version.version_number)
current_version_item.setEditable(False)
current_version_item.version = version
current_version_item.action = action
set_item_color(current_version_item, font_color)
# Latest
latest_published_version = version.latest_published_version
latest_published_version_item = QtGui.QStandardItem()
latest_published_version_item.version = version
latest_published_version_item.action = action
latest_published_version_item.setEditable(False)
latest_published_version_text = 'No Published Version'
if latest_published_version:
latest_published_version_text = '%s' % \
latest_published_version.version_number
latest_published_version_item.setText(
latest_published_version_text
)
set_item_color(latest_published_version_item, font_color)
# Action
action_item = QtGui.QStandardItem()
action_item.setEditable(False)
action_item.setText(action)
action_item.version = version
action_item.action = action
set_item_color(action_item, font_color)
# Updated By
updated_by_item = QtGui.QStandardItem()
updated_by_item.setEditable(False)
updated_by_text = ''
if latest_published_version.updated_by:
updated_by_text = latest_published_version.updated_by.name
updated_by_item.setText(updated_by_text)
updated_by_item.version = version
updated_by_item.action = action
set_item_color(updated_by_item, font_color)
# Description
description_item = QtGui.QStandardItem()
if latest_published_version:
description_item.setText(latest_published_version.description)
description_item.setEditable(False)
description_item.version = version
description_item.action = action
set_item_color(description_item, font_color)
# # Path
# path_item = QtGui.QStandardItem()
# if latest_published_version:
# path_item.setText(version.absolute_full_path)
# path_item.setEditable(True)
# set_item_color(path_item, font_color)
return [version_item, thumbnail_item, nice_name_item, take_item,
current_version_item, latest_published_version_item,
action_item, updated_by_item, description_item]
def fetchMore(self):
logger.debug(
'VersionItem.fetchMore() is started for item: %s' % self.text()
)
if self.canFetchMore():
# model = self.model() # This will cause a SEGFAULT
versions = sorted(self.version.inputs, key=lambda x: x.full_path)
for version in versions:
self.appendRow(
self.generate_version_row(self, self.pseudo_model, version)
)
self.fetched_all = True
logger.debug(
'VersionItem.fetchMore() is finished for item: %s' % self.text()
)
def hasChildren(self):
logger.debug(
'VersionItem.hasChildren() is started for item: %s' % self.text()
)
if self.version:
return_value = bool(self.version.inputs)
else:
return_value = False
logger.debug(
'VersionItem.hasChildren() is finished for item: %s' % self.text()
)
return return_value
def type(self, *args, **kwargs):
"""
"""
return QtGui.QStandardItem.UserType + 2
class VersionTreeModel(QtGui.QStandardItemModel):
"""Implements the model view for the version hierarchy
"""
def __init__(self, flat_view=False, *args, **kwargs):
QtGui.QStandardItemModel.__init__(self, *args, **kwargs)
logger.debug('VersionTreeModel.__init__() is started')
self.root = None
self.root_versions = []
self.reference_resolution = None
self.flat_view = flat_view
logger.debug('VersionTreeModel.__init__() is finished')
def populateTree(self, versions):
"""populates tree with root versions
"""
logger.debug('VersionTreeModel.populateTree() is started')
self.setColumnCount(7)
self.setHorizontalHeaderLabels(
['Do Update?', 'Thumbnail', 'Task', 'Take', 'Current', 'Latest',
'Action', 'Updated By', 'Notes']
)
self.root_versions = versions
for version in versions:
self.appendRow(
VersionItem.generate_version_row(None, self, version)
)
logger.debug('VersionTreeModel.populateTree() is finished')
def canFetchMore(self, index):
logger.debug(
'VersionTreeModel.canFetchMore() is started for index: %s' % index
)
if not index.isValid():
return_value = False
else:
item = self.itemFromIndex(index)
return_value = item.canFetchMore()
logger.debug(
'VersionTreeModel.canFetchMore() is finished for index: %s' % index
)
return return_value
def fetchMore(self, index):
"""fetches more elements
"""
logger.debug(
'VersionTreeModel.canFetchMore() is started for index: %s' % index
)
if index.isValid():
item = self.itemFromIndex(index)
item.fetchMore()
logger.debug(
'VersionTreeModel.canFetchMore() is finished for index: %s' % index
)
def hasChildren(self, index):
"""returns True or False depending on to the index and the item on the
index
"""
logger.debug(
'VersionTreeModel.hasChildren() is started for index: %s' % index
)
if not index.isValid():
return_value = len(self.root_versions) > 0
else:
if self.flat_view:
return False
else:
item = self.itemFromIndex(index)
return_value = False
if item:
return_value = item.hasChildren()
logger.debug(
'VersionTreeModel.hasChildren() is finished for index: %s' % index
)
return return_value
| StarcoderdataPython |
1705453 | <filename>swift/common/middleware/slo.py<gh_stars>0
# -*- coding: utf-8 -*-
from urllib import quote
from cStringIO import StringIO
from datetime import datetime
import mimetypes
from webob import Request
from urllib import unquote
from webob.exc import HTTPBadRequest, HTTPServerError, \
HTTPMethodNotAllowed, HTTPRequestEntityTooLarge, HTTPLengthRequired
from swift.common.mx_swob import wsgify
from swift.common.utils import json, get_logger, config_true_value
from swift.common.middleware.bulk import get_response_body, \
ACCEPTABLE_FORMATS, Bulk
from swift.common.utils import split_path
from swift.common.env_utils import *
from swift.common.bufferedhttp import jresponse
def parse_input(raw_data):
try:
parsed_data = json.loads(raw_data)
except ValueError:
raise HTTPBadRequest("Manifest must be valid json.")
req_keys = set(['path', 'etag', 'size_bytes'])
try:
for seg_dict in parsed_data:
if (set(seg_dict.keys()) != req_keys or
'/' not in seg_dict['path'].lstrip('/')):
raise HTTPBadRequest('Invalid SLO Manifest File')
except (AttributeError, TypeError):
raise HTTPBadRequest('Invalid SLO Manifest File')
return parsed_data
class StaticLargeObject(object):
def __init__(self, app, conf):
self.conf = conf
self.app = app
self.logger = get_logger(conf, log_route='slo')
self.max_manifest_segments = int(self.conf.get('max_manifest_segments',
1000))
self.max_manifest_size = int(self.conf.get('max_manifest_size',
1024 * 1024 * 2))
self.min_segment_size = int(self.conf.get('min_segment_size',
1024 * 1024))
self.bulk_deleter = Bulk(
app, {'max_deletes_per_request': self.max_manifest_segments})
def handle_multipart_put(self, req):
try:
vrs, account, container, obj = split_path(req.path,1, 4, True)
except ValueError:
return self.app
if req.content_length > self.max_manifest_size:
raise HTTPRequestEntityTooLarge(
"Manifest File > %d bytes" % self.max_manifest_size)
if req.headers.get('X-Copy-From') or req.headers.get('Destination'):
raise HTTPMethodNotAllowed(
'Multipart Manifest PUTs cannot be Copy requests')
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
raise HTTPLengthRequired(request=req)
parsed_data = parse_input(req.environ['wsgi.input'].read(self.max_manifest_size))
problem_segments = []
if len(parsed_data) > self.max_manifest_segments:
raise HTTPRequestEntityTooLarge(
'Number segments must be <= %d' % self.max_manifest_segments)
total_size = 0
out_content_type = 'application/json'
if not out_content_type:
out_content_type = 'text/plain'
data_for_storage = []
for index, seg_dict in enumerate(parsed_data):
obj_path = '/'.join(
['', vrs, account, seg_dict['path'].lstrip('/')])
try:
seg_size = int(seg_dict['size_bytes'])
except (ValueError, TypeError):
raise HTTPBadRequest('Invalid Manifest File')
new_env = req.environ.copy()
if isinstance(obj_path, unicode):
obj_path = obj_path.encode('utf-8')
new_env['PATH_INFO'] = obj_path
new_env['REQUEST_METHOD'] = 'HEAD'
new_env['swift.source'] = 'SLO'
del(new_env['wsgi.input'])
del(new_env['QUERY_STRING'])
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT')
head_seg_resp = \
Request.blank(obj_path, new_env).get_response(self.app)
if head_seg_resp.status_int // 100 == 2:
total_size += seg_size
if seg_size != head_seg_resp.content_length:
problem_segments.append([quote(obj_path), 'Size Mismatch'])
if seg_dict['etag'] != head_seg_resp.etag:
problem_segments.append([quote(obj_path), 'Etag Mismatch'])
data_for_storage.append(
{'name': '/' + seg_dict['path'].lstrip('/'),
'bytes': seg_size,
'hash': seg_dict['etag']})
else:
problem_segments.append([quote(obj_path),
head_seg_resp.status])
if problem_segments:
resp_body = get_response_body(
out_content_type, {}, problem_segments)
raise jresponse('-1','badrequest',req,400,param=resp_body)
env = req.environ
env['swift.content_type_overriden'] = True
env['HTTP_X_STATIC_LARGE_OBJECT'] = 'True'
json_data = json.dumps(data_for_storage)
env['CONTENT_LENGTH'] = str(len(json_data))
env['wsgi.input'] = StringIO(json_data)
return self.app
def handle_multipart_delete(self, req):
new_env = req.environ.copy()
new_env['REQUEST_METHOD'] = 'GET'
del(new_env['wsgi.input'])
new_env['QUERY_STRING'] = 'multipart-manifest=get'
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s MultipartDELETE' % req.environ.get('HTTP_USER_AGENT')
new_env['swift.source'] = 'SLO'
get_man_resp = \
Request.blank('', new_env).get_response(self.app)
if get_man_resp.status_int // 100 == 2:
if not config_true_value(
get_man_resp.headers.get('X-Static-Large-Object')):
raise HTTPBadRequest('Not an SLO manifest')
try:
manifest = json.loads(get_man_resp.body)
except ValueError:
raise HTTPServerError('Invalid manifest file')
delete_resp = self.bulk_deleter.handle_delete(
req,
objs_to_delete=[o['name'].encode('utf-8') for o in manifest],
user_agent='MultipartDELETE', swift_source='SLO')
if delete_resp.status_int // 100 == 2:
# delete the manifest file itself
return self.app
else:
return delete_resp
return get_man_resp
@wsgify
def __call__(self, req):
"""
WSGI entry point
"""
try:
vrs, account, container, obj = split_path(req.path,1, 4, True)
except ValueError:
return self.app
if obj:
if req.method == 'PUT' and \
req.GET.get('multipart-manifest') == 'put':
return self.handle_multipart_put(req)
if req.method == 'DELETE' and \
req.GET.get('multipart-manifest') == 'delete':
return self.handle_multipart_delete(req)
if 'X-Static-Large-Object' in req.headers:
raise HTTPBadRequest(
request=req,
body='X-Static-Large-Object is a reserved header. '
'To create a static large object add query param '
'multipart-manifest=put.')
return self.app
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def slo_filter(app):
return StaticLargeObject(app, conf)
return slo_filter
| StarcoderdataPython |
24315 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import signal
import abc
import logging
from time import sleep
from multiprocessing import Process
from argparse import ArgumentParser
from levitas.lib.modificationmonitor import ModificationMonitor
from .settings import SettingMissing
log = logging.getLogger("levitas.lib.daemonize")
def cli(daemon_class, daemon_args=[], daemon_kwargs={}, umask=0):
"""
Command-line interface to control a daemon.
@param daemon_class: Subclass of L{AbstractDaemon}.
@param daemon_args: Arguments to instantiate the daemon.
@param daemon_kwargs: Named arguments to instantiate the daemon.
@param umask: file mode creation mask.
"""
name = os.path.basename(sys.argv[0])
options = CLIOptions(name)
try:
options.parse_args()
except CLIOptionError as err:
sys.stderr.write(str(err))
sys.exit(1)
sys.stdout.write("%s %s: " % (options.action or "start", name))
if options.reloader and "MODIFICATIONMONITOR_STARTED" not in os.environ:
sys.stdout.write("Start ModificationMonitor\n")
ModificationMonitor()
sys.exit(0)
try:
dz = Daemonizer(daemon_class,
chdir=os.getcwd(),
umask=umask,
daemon_args=daemon_args,
daemon_kwargs=daemon_kwargs)
if dz.do_action(options.action, options.pidfile):
sys.stdout.write("done\n")
return True
else:
sys.stdout.write("failed\n")
return False
except SettingMissing as err:
sys.stderr.write(err)
class AbstractDaemon:
metaclass = abc.ABCMeta
@abc.abstractmethod
def start(self):
pass
@abc.abstractmethod
def stop(self):
pass
class Daemonizer(Process):
def __init__(self, daemon_class,
chdir="/", umask=0,
daemon_args=[], daemon_kwargs={}):
if not issubclass(daemon_class, AbstractDaemon):
raise TypeError("%s is not subclass of %s"
% (str(daemon_class), str(AbstractDaemon)))
Process.__init__(self)
self.daemon_class = daemon_class
self.chdir = chdir
self.umask = umask
self.daemon_args = daemon_args
self.daemon_kwargs = daemon_kwargs
self.pidfile = None
self.daemon_process = None
self._daemonize = False
def read_pidfile(self):
try:
f = open(self.pidfile, "r")
pid = int(f.read().strip())
f.close()
except IOError:
pid = None
return pid
def do_action(self, action, pidfile):
if action not in ["start", "stop", "restart", "foreground"]:
action = "foreground"
self.pidfile = pidfile
if pidfile is not None:
pid = self.read_pidfile()
else:
pid = None
if action == "start":
return self.do_start_action(pid)
elif action == "stop":
return self.do_stop_action(pid)
elif action == "restart":
if self.do_stop_action(pid):
pid = self.read_pidfile()
return self.do_start_action(pid)
else:
return False
elif action == "foreground":
# Start as a subprocess without making a daemon
self.start()
return True
def do_start_action(self, pid):
if pid:
msg = "Start aborted, pid-file '%s' exist.\n"
sys.stderr.write(msg % self.pidfile)
return False
self._daemonize = True
self.start()
return True
def do_stop_action(self, pid):
if not pid:
msg = "Could not stop process, missing pid-file '%s'.\n"
sys.stderr.write(msg % self.pidfile)
return False
try:
while True:
os.kill(pid, signal.SIGTERM)
sleep(0.1)
except OSError:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return True
def setsignals(self):
signal.signal(signal.SIGTERM, self.sigexit)
signal.signal(signal.SIGHUP, self.sigexit)
signal.signal(signal.SIGINT, self.sigexit)
signal.signal(signal.SIGQUIT, self.sigexit)
def sigexit(self, sig, frame):
log.debug("Stop process")
self.daemon_process.stop()
sys.exit(0)
def run(self):
# Make a daemon
if self._daemonize:
self.daemonize()
try:
self.start_process()
except:
raise
def start_process(self):
self.setsignals()
os.chdir(self.chdir)
self.daemon_process = self.daemon_class(*self.daemon_args,
**self.daemon_kwargs)
self.daemon_process.start()
def daemonize(self):
pid = os.fork()
if pid != 0:
# Parent
os._exit(0)
# Child
os.close(0)
sys.stdin = sys.__stdin__ = open("/dev/null")
os.chdir(self.chdir)
os.umask(self.umask)
os.setsid()
pid = str(os.getpid())
if self.pidfile:
f = file(self.pidfile, "w+")
f.write("%s\n" % pid)
f.close()
class CLIOptionError(Exception):
pass
class CLIOptions(object):
def __init__(self, name):
self.name = name
self.parser = ArgumentParser()
self.pidfile = None
self.action = None
self.parser.add_argument("action", type=str, nargs='?',
choices=["start", "stop", "restart", "foreground"])
self.parser.add_argument("-l", "--logfile",
dest="logfile",
type=str,
help="Path to logfile (optional)")
self.parser.add_argument("-c", "--logfilecount",
dest="logfilecount",
type=int, default=0,
help="Count of old logfiles to be saved. (default: 0)")
self.parser.add_argument("-v", "--verbose",
dest="verbose",
action="store_true",
help="vebose output")
self.parser.add_argument("-s", "--SETTINGS",
dest="settings_module",
type=str,
help="SETTINGS module (required)",
metavar="SETTINGS_MODULE")
self.parser.add_argument("-r", "--RELOADER",
dest="reloader",
action="store_true",
help="Start with autoreloader")
self.parser.add_argument("-p", "--pidfile",
dest="pidfile",
type=str,
default="/var/run/%s.pid" % self.name,
help="pidfile")
def parse_args(self):
args = self.parser.parse_args()
logfile = args.logfile
logfilecount = args.logfilecount
self.pidfile = args.pidfile
self.action = args.action or "foreground"
self.reloader = args.reloader
if hasattr(args, "settings_module"):
if args.settings_module:
os.environ["LEVITAS_SETTINGS"] = args.settings_module
else:
self.parser.print_help()
msg = "option --setting required \n\n"
raise CLIOptionError(msg)
if self.action == "start":
self._initLogging(args.verbose, logfile, logfilecount)
elif self.action == "foreground":
if logfile is None:
logfile = "console"
self._initLogging(args.verbose, logfile, logfilecount)
def _initLogging(self, verbose=False, logfile=None, logfilecount=0):
log = logging.getLogger()
if logfile == "console":
h = logging.StreamHandler()
elif logfile is not None:
from logging.handlers import RotatingFileHandler
doRotation = True if os.path.exists(logfile) else False
h = RotatingFileHandler(logfile, backupCount=logfilecount)
if doRotation:
h.doRollover()
else:
return
if verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s "
"- %(levelname)s - %(message)s")
h.setFormatter(formatter)
log.addHandler(h)
| StarcoderdataPython |
90120 | <filename>obs_movieconverter.py<gh_stars>0
""" Converts obs movies to a format usable by imovie """
import os
import sys
from shutil import copyfile
files = sorted([f for f in os.listdir('.') if '.mp4' in f and '2020' in f])
out_files = ['new_' + f + '.mov' for f in files]
rtn_codes = []
for count, f in enumerate(files):
input = f
out = out_files[count]
cmd = 'ffmpeg -i ' + input + ' -c:a aac -c:v libx264 -crf 20 -preset fast -f mov ' + out
#cmd = 'handbrakecli -i ' + input + ' -o ' + out + ' -e x264 -q 20 -B 160 --aencoder mp3'
print(cmd)
rtn = os.system(cmd)
rtn = 0
rtn_codes.append(rtn)
any_failed = False
for count, rtn in enumerate(rtn_codes):
if rtn != 0:
print('Exited because of error code ' + str(rtn) + ' in file ' + files[count])
if any_failed:
print('One or more files failed, aborting file replacement')
sys.exit(0)
for count, f in enumerate(files):
copyfile(out_files[count], f)
#os.system('handbrakecli -i 2020-11-24_18-12-06.mp4 -o movie.mp4 -e x264 -q 20 -B 160') | StarcoderdataPython |
3322633 | <reponame>rcalfredson/objects_counting_dmap
"""Looper implementation."""
from os import error
from typing import Optional, List
import cv2
from dual_loss_helper import get_loss_weight_function
import torch
import numpy as np
import matplotlib
import matplotlib.axes
import timeit
class Looper:
"""Looper handles epoch loops, logging, and plotting."""
def __init__(
self,
network: torch.nn.Module,
device: torch.device,
config: dict,
optimizer: torch.optim.Optimizer,
data_loader: torch.utils.data.DataLoader,
dataset_size: int,
plots: Optional[matplotlib.axes.Axes] = None,
validation: bool = False,
left_col_plots: str = None,
rand_samp_mult: int = None,
):
"""
Initialize Looper.
Args:
network: already initialized model
device: a device model is working on
config: dict of settings for the training
optimizer: already initialized optimizer link to network parameters
data_loader: already initialized data loader
dataset_size: no. of samples in dataset
plot: matplotlib axes
validation: flag to set train or eval mode
"""
self.network = network
self.device = device
self.loss_tp = config["loss"]
self.config = config
self.should_quit_early = False
self.set_up_loss()
self.optimizer = optimizer
self.loader = data_loader
self.validation = validation
self.plots = plots
self.running_loss = []
if self.loss_tp == "dual":
self.running_loss_by_pixel = []
self.running_loss_by_egg_ct = []
self.running_mean_abs_err = []
self.mean_abs_err_ylim_max = 2
if plots is not None:
assert (
left_col_plots is not None
), "left_col_plots must have a value if plots are set"
self.left_col_plots = left_col_plots
self.rand_samp_mult = rand_samp_mult
self.size = dataset_size * (
rand_samp_mult if rand_samp_mult is not None and not validation else 1
)
def set_up_loss(self):
def mean_abs_error_loss(result, label):
errors = []
for true, predicted in zip(label, result):
actual = torch.sum(true)
errors.append(
torch.abs(actual - torch.sum(predicted))
/ 100
/ self.config["maeLossDivisor"]
)
return torch.mean(torch.stack(errors))
def mse_false_pos_penalty_loss(result, label):
diffs = result - label
coeffs = torch.where(diffs > 0, 1., 1.)
scaled_diffs = torch.mul(diffs, coeffs)
return (scaled_diffs**2).mean()
if self.loss_tp == "mse":
self.loss = torch.nn.MSELoss()
elif self.loss_tp == "mae":
self.loss = mean_abs_error_loss
elif self.loss_tp == "dual":
self.coeffs = []
self.n_batches_since_reset = 0
# self.loss_mse = torch.nn.MSELoss()
self.loss_mse = mse_false_pos_penalty_loss
self.loss_weight_fn = get_loss_weight_function(self.config)
def dual_loss(result, label):
if (
self.n_batches_since_reset >= self.config["dualOptions"]["period"]
or len(self.coeffs) == 0
):
self.coeffs = self.loss_weight_fn(self.epoch_number)
self.n_batches_since_reset = 0
self.n_batches_since_reset += 1
loss_1 = self.loss_mse(result, label)
loss_2 = mean_abs_error_loss(result, label)
loss_sum = (self.coeffs[0] * loss_1).add(loss_2 * self.coeffs[1])
return loss_sum, loss_1, loss_2
self.loss = dual_loss
def run(self, epoch_number: int):
"""Run a single epoch loop.
Returns:
Mean absolute error.
"""
# reset current results and add next entry for running loss
self.true_values = []
self.predicted_values = []
self.err = []
self.abs_err = []
self.mean_err = []
self.running_loss.append(0)
self.epoch_number = epoch_number
if self.loss_tp == "dual":
self.running_loss_by_pixel.append(0)
self.running_loss_by_egg_ct.append(0)
# set a proper mode: train or eval
self.network.train(not self.validation)
for image, label in self.loader:
# abs_err_for_batch = []
# move images and labels to given device
image = image.to(self.device)
label = label.to(self.device)
# clear accumulated gradient if in train mode
if not self.validation:
self.optimizer.zero_grad()
# get model prediction (a density map)
result = self.network(image)
asymmetryCorrs = {"v": 0, "h": 0}
hDiff = label.shape[-1] - result.shape[-1]
if hDiff % 2 > 0:
asymmetryCorrs["h"] = 1
else:
asymmetryCorrs["h"] = 0
vDiff = label.shape[-2] - result.shape[-2]
# vertExcess = int(vDiff / 2)
if vDiff % 2 > 0:
asymmetryCorrs["v"] = 1
else:
asymmetryCorrs["v"] = 0
# loop over batch samples
for true, predicted in zip(label, result):
# integrate a density map to get no. of objects
# note: density maps were normalized to 100 * no. of objects
# to make network learn better
true_counts = torch.sum(true).item() / 100
predicted_counts = torch.sum(predicted).item() / 100
# print("true counts:", true_counts)
# print("and predicted:", predicted_counts)
# update current epoch results
self.err.append(true_counts - predicted_counts)
self.abs_err.append(abs(self.err[-1]))
# abs_err_for_batch.append(abs(self.err[-1]))
self.true_values.append(true_counts)
self.predicted_values.append(predicted_counts)
# self.abs_err += abs_err_for_batch
# print("absolute errors used for the loss calculation:", abs_err_for_batch)
# input()
label = label[:, :, : label.shape[-2] - vDiff, : label.shape[-1] - hDiff]
# calculate loss and update running loss
if self.loss_tp != "dual":
loss = self.loss(result, label)
else:
loss_results = self.loss(result, label)
loss = loss_results[0]
# not sure how to plot this.
self.running_loss_by_pixel[-1] += (
image.shape[0] * loss_results[1].item() / self.size
)
self.running_loss_by_egg_ct[-1] += (
image.shape[0] * loss_results[2].item() / self.size
)
self.running_loss[-1] += image.shape[0] * loss.item() / self.size
# update weights if in train mode
if not self.validation:
loss.backward()
self.optimizer.step()
# calculate errors and standard deviation
self.update_errors()
if (
not self.validation
and self.config["abandonDivergentTraining"]
and self.epoch_number + 1 == self.config["minNumEpochs"]
and not np.any(
np.asarray(self.running_mean_abs_err) <= self.mean_abs_err_ylim_max
)
):
self.should_quit_early = True
return
# update live plot
if self.plots is not None:
self.plot()
# print epoch summary
self.log()
# print("how many steps in the epoch?", counter)
return self.mean_abs_err
def update_errors(self):
"""
Calculate errors and standard deviation based on current
true and predicted values.
"""
# self.abs_err = [abs(error) for error in self.err]
self.mean_err = sum(self.err) / self.size
self.mean_abs_err = sum(self.abs_err) / self.size
self.running_mean_abs_err.append(self.mean_abs_err)
self.std = np.array(self.err).std()
def plot(self):
"""Plot true vs predicted counts and loss."""
# true vs predicted counts
true_line = [[0, max(self.true_values)]] * 2 # y = x
epochs = np.arange(1, len(self.running_loss) + 1)
self.plots[0].cla()
self.plots[0].set_title("Train" if not self.validation else "Valid")
if self.left_col_plots == "scatter":
self.plots[0].set_xlabel("True value")
self.plots[0].set_ylabel("Predicted value")
self.plots[0].plot(*true_line, "r-")
self.plots[0].scatter(self.true_values, self.predicted_values)
elif self.left_col_plots == "mae":
self.plots[0].set_xlabel("Epoch")
self.plots[0].set_ylabel("Mean absolute error")
self.plots[0].set_ylim((0, self.mean_abs_err_ylim_max))
self.plots[0].plot(epochs, self.running_mean_abs_err)
# loss
self.plots[1].cla()
self.plots[1].set_title("Train" if not self.validation else "Valid")
self.plots[1].set_xlabel("Epoch")
self.plots[1].set_ylabel("Loss")
self.plots[1].set_ylim((0, 0.2))
self.plots[1].plot(
epochs,
self.running_loss,
"h-"
if self.loss_tp == "dual" and self.config["dualOptions"] == "flip"
else "-",
label="Loss" if self.loss_tp == "dual" else None,
markersize=9,
)
if self.loss_tp == "dual":
self.plots[1].plot(
epochs, self.running_loss_by_egg_ct, label="Egg count loss"
)
self.plots[1].plot(
epochs, self.running_loss_by_pixel, label="Pixel-wise loss"
)
self.plots[1].legend()
matplotlib.pyplot.pause(0.01)
matplotlib.pyplot.tight_layout()
def log(self):
"""Print current epoch results."""
print(
f"{'Train' if not self.validation else 'Valid'}:\n"
f"\tAverage loss: {self.running_loss[-1]:3.4f}\n"
f"\tMean error: {self.mean_err:3.3f}\n"
f"\tMean absolute error: {self.mean_abs_err:3.3f}\n"
f"\tError deviation: {self.std:3.3f}"
)
| StarcoderdataPython |
3388844 | <filename>Chapter05/TFIDFdemo/tfidf_scikitlearn.py
# given saksperar data set to generate the tf-tdf model and thenfor new document it sugesset us keywords
import numpy as np
import nltk
import string
import os
from nltk.stem.porter import *
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import Counter
from nltk.corpus import stopwords
def get_tokens():
with open('/home/jalaj/PycharmProjects/NLPython/NLPython/ch5/TFIDFdemo/shakes/shakes1.txt', 'r') as shakes:
text = shakes.read()
lowers = text.lower()
#remove the punctuation using the character deletion step of translate
no_punctuation = lowers.translate(None, string.punctuation)
tokens = nltk.word_tokenize(no_punctuation)
return tokens
tokens = get_tokens()
count = Counter(tokens)
#print count.most_common(10)
tokens = get_tokens()
filtered = [w for w in tokens if not w in stopwords.words('english')]
count = Counter(filtered)
#print count.most_common(100)
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
stemmer = PorterStemmer()
stemmed = stem_tokens(filtered, stemmer)
count = Counter(stemmed)
#print count.most_common(100)
path = '/home/jalaj/PycharmProjects/NLPython/NLPython/ch5/TFIDFdemo/shakes'
token_dict = {}
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens, stemmer)
return stems
for subdir, dirs, files in os.walk(path):
for file in files:
file_path = subdir + os.path.sep + file
shakes = open(file_path, 'r')
text = shakes.read()
lowers = text.lower()
no_punctuation = lowers.translate(None, string.punctuation)
token_dict[file] = no_punctuation
# this can take some time
tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english')
tfs = tfidf.fit_transform(token_dict.values())
str = 'this sentence has unseen text such as computer but also king lord juliet'
response = tfidf.transform([str])
#print response
feature_names = tfidf.get_feature_names()
for col in response.nonzero()[1]:
print feature_names[col], ' - ', response[0, col]
feature_array = np.array(tfidf.get_feature_names())
tfidf_sorting = np.argsort(response.toarray()).flatten()[::-1]
n = 3
top_n = feature_array[tfidf_sorting][:n]
print top_n
n = 4
top_n = feature_array[tfidf_sorting][:n]
print top_n
| StarcoderdataPython |
1668562 | <filename>account/views.py
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from .forms import LoginForm, SignupForm
from .models import UserProfile
def login_view(request):
if request.method == 'GET':
form = LoginForm()
context = { 'form': form }
return render(request, 'login.html', context)
if request.method == 'POST':
form = LoginForm(request.POST)
if not form.is_valid():
context = { 'form': form }
return render(request, 'login.html', context)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
print(user)
login(request, user)
return HttpResponseRedirect(reverse('home'))
@login_required
def logout_view(require):
logout(require)
return HttpResponseRedirect(reverse('home'))
def signup_view(request):
if request.method == 'GET':
form = SignupForm()
context = { 'form': form }
return render(request, 'signup.html', context)
if request.method == 'POST':
form = SignupForm(request.POST)
if not form.is_valid():
context = { 'form': form }
return render(request, 'signup.html', context)
data = form.cleaned_data
if User.objects.filter(username=data['email']).exists():
context = { 'form': form }
return render(request, 'signup.html', context)
username = data['username']
password = data['password']
email = data['email']
user = User.objects.create_user(username=username, password=password, email=email)
profile = UserProfile(user)
user = authenticate(username=username, password=password)
login(request, user)
return HttpResponseRedirect(reverse('home'))
| StarcoderdataPython |
3251901 | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class ModifyScalingNotificationRequest(Request):
def __init__(self):
super(ModifyScalingNotificationRequest, self).__init__(
'scaling', 'qcloudcliV1', 'ModifyScalingNotification', 'scaling.api.qcloud.com')
def get_notificationId(self):
return self.get_params().get('notificationId')
def set_notificationId(self, notificationId):
self.add_param('notificationId', notificationId)
def get_notificationName(self):
return self.get_params().get('notificationName')
def set_notificationName(self, notificationName):
self.add_param('notificationName', notificationName)
def get_notificationTypes(self):
return self.get_params().get('notificationTypes')
def set_notificationTypes(self, notificationTypes):
self.add_param('notificationTypes', notificationTypes)
def get_receiversIds(self):
return self.get_params().get('receiversIds')
def set_receiversIds(self, receiversIds):
self.add_param('receiversIds', receiversIds)
def get_scalingGroupId(self):
return self.get_params().get('scalingGroupId')
def set_scalingGroupId(self, scalingGroupId):
self.add_param('scalingGroupId', scalingGroupId)
| StarcoderdataPython |
1717303 | <gh_stars>1-10
# Python3
import functools
# 有限制修改區域
from fractions import gcd
def leastCommonDenominator(denominators):
return functools.reduce(lambda a, b: a * b / gcd(a, b), denominators)
| StarcoderdataPython |
66659 | <reponame>ecradock/wifihawk
DEFAULT_CONFIG_NAME = ".wifihawk.ini"
| StarcoderdataPython |
3201349 | <gh_stars>1-10
# CLUB ENTRY EDABIT SOLUTION:
def club_entry(word):
# creating a for-loop to iterate for the characters in the word.
for i in range(len(word) - 1):
# creating a nested if-statement to check for the same character being repeated twice.
if word[i] == word[i + 1]:
# returning the appropriate value after finding the number of the alphabet and multiplying it by four.
return (ord(word[i]) - 96) * 4 | StarcoderdataPython |
3244752 | # -*- coding: UTF-8 -*-
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BACKBONE_DIR='paddlepalm.backbone'
TASK_INSTANCE_DIR='paddlepalm.task_instance'
READER_DIR='paddlepalm.reader'
PARADIGM_DIR='paddlepalm.task_paradigm'
OPTIMIZER_DIR='paddlepalm.optimizer'
OPTIMIZE_METHOD='optimize'
REQUIRED_ARGS={
'task_instance': str,
'backbone': str,
'optimizer': str,
'learning_rate': float,
'batch_size': int
}
OPTIONAL_ARGS={
'mix_ratio': str,
'target_tag': str,
'reuse_rag': str
}
TASK_REQUIRED_ARGS={
'paradigm': str,
'reader': str,
'train_file': str
}
| StarcoderdataPython |
3352216 | <filename>sim/simulation.py<gh_stars>1-10
import math
import casadi as ca
import numpy as np
from models.geometry_utils import RectangleRegion
from sim.logger import (
ControllerLogger,
GlobalPlannerLogger,
LocalPlannerLogger,
SystemLogger,
)
class System:
def __init__(self, time=0.0, state=None, geometry=None, dynamics=None):
self._time = time
self._state = state
self._geometry = geometry
self._dynamics = dynamics
class Robot:
def __init__(self, system):
self._system = system
self._system_logger = SystemLogger()
def set_global_planner(self, global_planner):
self._global_planner = global_planner
self._global_planner_logger = GlobalPlannerLogger()
def set_local_planner(self, local_planner):
self._local_planner = local_planner
self._local_planner_logger = LocalPlannerLogger()
def set_controller(self, controller):
self._controller = controller
self._controller_logger = ControllerLogger()
def run_global_planner(self, sys, obstacles, goal_pos):
# TODO: global path shall be generated with `system` and `obstacles`.
self._global_path = self._global_planner.generate_path(sys, obstacles, goal_pos)
self._global_planner.logging(self._global_planner_logger)
def run_local_planner(self):
# TODO: local path shall be generated with `obstacles`.
self._local_trajectory = self._local_planner.generate_trajectory(self._system, self._global_path)
self._local_planner.logging(self._local_planner_logger)
def run_controller(self, obstacles):
self._control_action = self._controller.generate_control_input(
self._system, self._global_path, self._local_trajectory, obstacles
)
self._controller.logging(self._controller_logger)
def run_system(self):
self._system.update(self._control_action)
self._system.logging(self._system_logger)
class SingleAgentSimulation:
def __init__(self, robot, obstacles, goal_position):
self._robot = robot
self._obstacles = obstacles
self._goal_position = goal_position
def run_navigation(self, navigation_time):
self._robot.run_global_planner(self._robot._system, self._obstacles, self._goal_position)
while self._robot._system._time < navigation_time:
self._robot.run_local_planner()
self._robot.run_controller(self._obstacles)
self._robot.run_system()
| StarcoderdataPython |
1655561 | <reponame>MiguelTeixeiraUFPB/PythonM3
def lecontatosdearquivo(nomearquivo):
arquivo=open(nomearquivo,'r')
linhas=[]
for linha in arquivo:
linhalida=linha.strip().split('#')
linhas.append(linhalida)
return(linhas)
def listacontatos(contatos):
for linha in contatos:
print(f'nome: {linha[0]}, telefone:{linha[1]} bairro::{linha[2]}aniversário: {linha[3]} mês: {linha[4]}')
def pesquisarAniversarioP_Nome(nomepessoa,contatos):
for nome,telefone,bairro,aniversário,mes in contatos:
if nomepessoa==nome:
print(f'{aniversário}/{mes}')
def pesquisarPessoasDobairro(contatosbairro,contatos):
for nome,telefone,bairro,aniversário,mes in contatos:
if bairro==contatosbairro:
print(nome)
def pesquisarAniversariantedoMes(mesaniversariante,contatos):
for nome,telefone,bairro,aniversário,mes in contatos:
if mes==mesaniversariante:
print(f'{nome}')
def gravaContatos(contatos, nomeArquivo):
arquivo = open(nomeArquivo,'w')
for nome, telefone, bairro, dia, mes in contatos:
linha = nome + "#" + telefone + "#" + bairro + "#" + dia + "#"+ mes + "\n"
arquivo.write(linha)
arquivo.close()
def listaContatosIniciadosCom(letra, contatos):
print("CONTATOS COM PREFIXO:",letra)
for nome, telefone, bairro, dia, mes in contatos:
if (nome.upper()[0]== letra.upper()):
print("Nome:", nome, ", Telefone:", telefone, ", Bairro:", bairro, ", Aniversário:", dia,"/",mes)
#PROGRAMA PRINCIPAL
contatos=lecontatosdearquivo('agendariotinto.txt')
numcolunas=len(contatos[0])
numlinhas=len(contatos)
while True:
escolha=int(input('digite uma opção \n 1) Listar contatos\n 2) pesquisar aniversariante de ... \n 3) pesquisar contatos do bairro \n 4) pesquisar aniversariantes do mês \n 5) cadastrar um contato\n 6) salvar dados\n 7) listar contatos com letra\n 8) sair'))
if escolha==1:
listacontatos(contatos)
elif escolha==2:
nomepessoa=str(input('nome da pessoa'))
pesquisarAniversarioP_Nome(nomepessoa,contatos)
elif escolha==3 :
contatosbairro=str(input('contatos do Bairro: '))
pesquisarPessoasDobairro(contatosbairro,contatos)
elif escolha==4:
mesaniversariante=str(input('aniversariante do mês'))
pesquisarAniversariantedoMes(mesaniversariante,contatos)
elif escolha ==5:
nomepessoa=input('nome para cadastro')
telefone=int(input('telefone para cadastro'))
bairropessoa=input('bairro para cadastro ')
dia=int(input('dia para cadastro'))
mes=input('mês para cadastro')
contatonovo=[nomepessoa,telefone,bairropessoa,dia,mes]
contatos.append(contatonovo)
elif escolha==6:
gravaContatos(contatos,'agendariotinto.txt')
elif escolha==7:
letra=str(input('letra ')).upper()
listaContatosIniciadosCom(letra,contatos)
elif escolha==8:
break | StarcoderdataPython |
3200660 | <reponame>adiitya-dey/datastructures
import logging
import numpy as np
class BasicSort:
def __init__(self, arr, sortfn):
logging.debug("Sorting class is initialized.")
self.__arr = arr
print(sortfn)
self.__sortfn = sortfn.lower()
if self.__sortfn == "insertsort":
self.insertsort()
elif self.__sortfn == "bubblesort":
self.bubblesort()
elif self.__sortfn == "mergesort":
p = 0
r = (len(self.__arr)-1)
self.mergesort(p,r)
elif self.__sortfn == "quicksort":
p = 0
r = (len(self.__arr)-1)
self.quicksort(p,r)
elif self.__sortfn == "pythonsort":
self.pythonsort()
elif self.__sortfn == "numpysort":
self.numpysort()
#Insert Sort
def insertsort(self):
logging.debug("Initializing InsertSort Function.")
n = len(self.__arr)
for i in range(1, n):
key = self.__arr[i]
j = i - 1
while j >=0 and self.__arr[j] > key:
self.__arr[j+1] = self.__arr[j]
j = j - 1
self.__arr[j+1] = key
logging.debug("Sorted Array after InsertSort : {}".format(self.__arr))
logging.debug("InsertSort Function completed successfully.")
#Bubble Sort
def bubblesort(self):
logging.debug("Initializing BubbleSort Function.")
n = len(self.__arr)
for j in range(1, n):
for i in range(0, n - 1):
if self.__arr[i] > self.__arr[i+1]:
temp = self.__arr[i]
self.__arr[i] = self.__arr[i+1]
self.__arr[i+1] = temp
logging.debug("Sorted Array after BubbleSort : {}".format(self.__arr))
logging.debug("BubbleSort Function completed successfully.")
# Merge Sort
def mergesort(self, p, r):
logging.debug("Initializing Merge Sort Function.")
if p < r:
q = (p + r) // 2 ##Double slash divides down to nearest whole number
self.mergesort(p, q)
self.mergesort(q + 1, r)
self.merge(p, q, r)
logging.debug("Sorted Array after MergeSort : {}".format(self.__arr))
logging.debug("MergeSort Function completed successfully.")
def merge(self, p, q, r):
A = self.__arr
n1 = q - p + 1
n2 = r - q
left = [0] * (n1 + 1)
right = [0] * (n2 + 1)
for i in range(0, n1):
left[i] = A[p + i]
for j in range(0, n2):
right[j] = A[q + j + 1]
left[n1] = float('inf')
right[n2] = float('inf')
i = 0
j = 0
for k in range(p, r + 1):
if left[i] <= right[j]:
A[k] = left[i]
i = i + 1
else:
A[k] = right[j]
j = j + 1
# return A
# Quick Sort
def quicksort(self, p, r):
if p < r:
q = self.partition(p, r)
self.quicksort(p, q - 1)
self.quicksort(q + 1, r)
logging.debug("Sorted Array after Quick Sort : {}".format(self.__arr))
logging.debug("QuickSort Function completed successfully.")
# Partition Function works with Quick Sort
def partition(self, p, r):
A = self.__arr
x = A[r]
i = p - 1
for j in range(p, r):
if A[j] <= x:
i = i + 1
A[i], A[j] = A[j], A[i]
A[i + 1], A[r] = A[r], A[i + 1]
return i + 1 | StarcoderdataPython |
84687 | #! /usr/bin/env python3
import numpy as np
from sklearn.neighbors import NearestNeighbors
from computeNeighborWeights import computeNeighborWeights
from computeWeightedMRecons import computeWeightedMRecons
from computeFeatures import computeFeatures
def computeFullERD(MeasuredValues,MeasuredIdxs,UnMeasuredIdxs,Theta,SizeImage,TrainingInfo,Resolution,ImageType):
NeighborValues,NeighborWeights,NeighborDistances = FindNeighbors(TrainingInfo,MeasuredIdxs,UnMeasuredIdxs,MeasuredValues,Resolution)
ReconValues,ReconImage = ComputeRecons(TrainingInfo,NeighborValues,NeighborWeights,SizeImage,UnMeasuredIdxs,MeasuredIdxs,MeasuredValues)
# Compute features
PolyFeatures=computeFeatures(MeasuredValues,MeasuredIdxs,UnMeasuredIdxs,SizeImage,NeighborValues,NeighborWeights,NeighborDistances,TrainingInfo,ReconValues,ReconImage,Resolution,ImageType)
# Compute ERD
# ERDValues = PolyFeatures.dot(Theta)
ERDValues = Theta.predict(PolyFeatures)
return(ERDValues,ReconValues,ReconImage)
def updateERD(Mask,MeasuredIdxs,UnMeasuredIdxs,MeasuredValues,Theta,SizeImage,TrainingInfo,Resolution,ImageType,NewIdxs,NumSamples,UpdateERDParams,ReconValues,ReconImage,ERDValues,MaxIdxsVect,BatchSamplingParams):
ERDValues=np.delete(ERDValues,(MaxIdxsVect))
ReconValues=np.delete(ReconValues,(MaxIdxsVect))
SuggestedRadius = int(np.sqrt((1/np.pi)*(SizeImage[0]*SizeImage[1]*TrainingInfo.NumNbrs/NumSamples)))
UpdateRadiusTemp=np.max([SuggestedRadius,UpdateERDParams.MinRadius]);
UpdateRadius=int(np.min([UpdateERDParams.MaxRadius,UpdateRadiusTemp]));
updateRadiusMat = np.zeros((SizeImage[0],SizeImage[1]))
Done=0
while(Done==0):
if BatchSamplingParams.Do == 'N':
updateRadiusMat[max(NewIdxs[0]-UpdateRadius,0):min(NewIdxs[0]+UpdateRadius,SizeImage[0])][:,max(NewIdxs[1]-UpdateRadius,0):min(NewIdxs[1]+UpdateRadius,SizeImage[1])]=1
else:
for b in range(0,BatchSamplingParams.NumSamplesPerIter):
updateRadiusMat[max(NewIdxs[b][0]-UpdateRadius,0):min(NewIdxs[b][0]+UpdateRadius,SizeImage[0])][:,max(NewIdxs[b][1]-UpdateRadius,0):min(NewIdxs[b][1]+UpdateRadius,SizeImage[1])]=1
updateIdxs = np.where(updateRadiusMat[Mask==0]==1)
SmallUnMeasuredIdxs = np.transpose(np.where(np.logical_and(Mask==0,updateRadiusMat==1)))
if SmallUnMeasuredIdxs.size==0:
UpdateRadius=int(UpdateRadius*UpdateERDParams.IncreaseRadiusBy)
else:
Done=1
# Find neighbors of unmeasured locations
SmallNeighborValues,SmallNeighborWeights,SmallNeighborDistances = FindNeighbors(TrainingInfo,MeasuredIdxs,SmallUnMeasuredIdxs,MeasuredValues,Resolution)
# Perform reconstruction
SmallReconValues=computeWeightedMRecons(SmallNeighborValues,SmallNeighborWeights,TrainingInfo)
ReconImage[(np.logical_and(Mask==0,updateRadiusMat==1))]=SmallReconValues
ReconImage[MeasuredIdxs[:,0],MeasuredIdxs[:,1]]=MeasuredValues
# Compute features
SmallPolyFeatures=computeFeatures(MeasuredValues,MeasuredIdxs,SmallUnMeasuredIdxs,SizeImage,SmallNeighborValues,SmallNeighborWeights,SmallNeighborDistances,TrainingInfo,SmallReconValues,ReconImage,Resolution,ImageType)
# Compute ERD
# SmallERDValues = SmallPolyFeatures.dot(Theta)
SmallERDValues = Theta.predict(SmallPolyFeatures)
ReconValues[updateIdxs] = SmallReconValues
ERDValues[updateIdxs] = SmallERDValues
return(ERDValues,ReconValues)
def FindNeighbors(TrainingInfo,MeasuredIdxs,UnMeasuredIdxs,MeasuredValues,Resolution):
# Find neighbors of unmeasured locations
Neigh = NearestNeighbors(n_neighbors=TrainingInfo.NumNbrs)
Neigh.fit(MeasuredIdxs)
NeighborDistances, NeighborIndices = Neigh.kneighbors(UnMeasuredIdxs)
NeighborDistances=NeighborDistances*Resolution
# print(np.max(NeighborIndices))
# print(MeasuredValues.shape)
NeighborValues=MeasuredValues[NeighborIndices]
# print(NeighborValues.shape)
NeighborWeights=computeNeighborWeights(NeighborDistances,TrainingInfo)
return(NeighborValues,NeighborWeights,NeighborDistances)
def ComputeRecons(TrainingInfo,NeighborValues,NeighborWeights,SizeImage,UnMeasuredIdxs,MeasuredIdxs,MeasuredValues):
# Perform reconstruction
ReconValues=computeWeightedMRecons(NeighborValues,NeighborWeights,TrainingInfo)
ReconImage = np.zeros((SizeImage[0],SizeImage[1]))
ReconImage[UnMeasuredIdxs[:,0],UnMeasuredIdxs[:,1]]=ReconValues
ReconImage[MeasuredIdxs[:,0],MeasuredIdxs[:,1]]=MeasuredValues
return(ReconValues,ReconImage)
| StarcoderdataPython |
3396408 | """
Module that contains simple in memory storage implementation
"""
class Storage(object):
_tweets = [{
'id': 1,
'name' : 'mladen',
'tweet' : 'baba'}
]
@classmethod
def get_tweets(cls):
return cls._tweets
@classmethod
def get_tweet(cls, tweet_id):
try:
return cls._tweets[tweet_id]
except:
return None
@classmethod
def post_tweet(cls, tweet):
cls._tweets.append(tweet)
@classmethod
def del_tweet(cls, tweet_id):
try:
del cls._tweets[tweet_id]
return 'Deleted'
except:
return None
| StarcoderdataPython |
41357 | <gh_stars>10-100
# Copyright 2019 Cohesity Inc.
#
# Python example to list recent user_configurable unresolved alert unresolved Alerts.
#
# Usage: python list_unresolved_alerts.py --max_alerts 10
import argparse
import datetime
from cohesity_management_sdk.cohesity_client import CohesityClient
from cohesity_management_sdk.models.alert_state_list_enum import AlertStateListEnum
CLUSTER_USERNAME = 'cluster_username'
CLUSTER_PASSWORD = '<PASSWORD>'
CLUSTER_VIP = 'prod-cluster.cohesity.com'
DOMAIN = 'LOCAL'
MAX_ALERTS = 100
class Alerts(object):
"""
Class to display Alerts.
"""
def display_alerts(self, cohesity_client, max_alerts):
"""
Method to display the list of Unresolved Alerts
:param cohesity_client(object): Cohesity client object.
:return:
"""
alerts = cohesity_client.alerts
alerts_list = alerts.get_alerts(max_alerts=max_alerts,
alert_state_list=AlertStateListEnum.KOPEN)
for alert in alerts_list:
print ('{0:<10}\t\t{1:>8}\t{2:>10}'.format(self.epoch_to_date(alert.first_timestamp_usecs),
alert.alert_category,
alert.severity))
@staticmethod
def epoch_to_date(epoch):
"""
Method to convert epoch time in usec to date format
:param epoch(int): Epoch time of the job run.
:return: date(str): Date format of the job run.
"""
date_string = datetime.datetime.fromtimestamp(epoch/10**6).strftime('%m-%d-%Y %H:%M:%S')
return date_string
def main(args):
# To init client with user/pass.
cohesity_client = CohesityClient(cluster_vip=CLUSTER_VIP,
username=CLUSTER_USERNAME,
password=<PASSWORD>,
domain=DOMAIN)
alerts = Alerts()
alerts.display_alerts(cohesity_client, args.max_alerts)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Arguments needed to run this python process.")
parser.add_argument("--max_alerts", help="Number of Alerts.", default=MAX_ALERTS)
args = parser.parse_args()
main(args)
| StarcoderdataPython |
93268 | <filename>EchoTest.py<gh_stars>0
# (c) Copyright 2014 Synapse Wireless, Inc.
"""
EchoTest.py - a simple benchmark used to determine how fast SNAP Connect can
communicate with a directly connected bridge. We use this to evaluate different
Python platforms, implementations, and serial drivers.
This example demonstrates one way to maximize throughput without choking the network.
Refer to comments throughout this file, plus the accompanying README.txt.
"""
import logging
from snapconnect import snap
import time
# NOTE: Ensure that the Feature bits setting on this device has both (0x100) second data CRC
# and (0x400) packet CRC disabled
BRIDGE_NODE = "\x4B\x42\x34" # <- Replace this with the address of your bridge node
# Note the hardcoded COM1 usage.
# You should set these to match YOUR available hardware
SERIAL_TYPE = snap.SERIAL_TYPE_RS232
#SERIAL_TYPE = snap.SERIAL_TYPE_SNAPSTICK100
#SERIAL_TYPE = snap.SERIAL_TYPE_SNAPSTICK200
# If you're on a unix platform, you'll need to specify the interface type differently than windows
# An example for a typical interface device is shown below
SERIAL_PORT = 0 # COM1
#SERIAL_PORT = '/dev/ttyS1'
NUMBER_OF_QUERIES = 100 # More polls == longer test
TIMEOUT = 1.0 # (in seconds) You might need to increase this if:
# 1) You change the RPC call being made
# If you are invoking some custom function of your own, and it takes longer for the
# nodes to respond - for example, some function that performs multiple analog readings
# and then computes a result.
# 2) You are benchmarking a remote node, and it is so many hops away that 1 second is too short.
# You could experiment with various size payloads.
# Note that PAYLOAD is just one of several parameters to the actual RPC call
#PAYLOAD = "A"
#PAYLOAD = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#PAYLOAD = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOP"
PAYLOAD = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890-=<({[]})=-!@#$" # this is the biggest payload that will fit (RPC_CRC == OFF, PACKET_CRC == OFF)
# The only way to get a bigger payload would be to shorten the callback function's name (currently "response_handler")
# Also note that if you were to turn *ON* the RPC_CRC feature, that would also use up
# two bytes of each packet, requiring the PAYLOAD string to be shortened by two bytes to compensate.
# Initialize global variables
test_complete = False
test_started = False
polls_sent = 0
replies = 0
sent_id = None
def initiate_test(repeats):
"""Kick off a test run"""
global poll_limit, start_time, test_started
test_started = True
poll_limit = repeats
start_time = time.time()
send_next_poll()
print 'Initiating test sequence'
def show_results():
"""Display some simple statistics about the test run"""
end_time = time.time()
delta = end_time - start_time
delta *= 1000 # so we can show it in integer milliseconds
print "%d queries, %d responses in %d milliseconds" % (polls_sent, replies, delta)
def send_next_poll():
"""Send the next poll (RPC) if there are any remaining, else flag the test as complete"""
global test_complete, sent_id
if polls_sent < poll_limit:
#
# Notice that here we are only invoking built-in SNAPpy functions, to eliminate
# the need to put a SNAPpy script on the node.
#
# Notice that here we are sending the PAYLOAD "round-trip"
sent_id = comm.rpc(BRIDGE_NODE, 'callback', 'response_handler', 'str', PAYLOAD)
# If you only wanted to send the payload TO the node, you could do
#comm.rpc(BRIDGE_NODE, 'callback', 'response_handler', 'len', PAYLOAD)
# If you didn't want to send the PAYLOAD at all you could do something like
#comm.rpc(BRIDGE_NODE, 'callback', 'response_handler', 'random')
else:
test_complete = True
def response_handler(value):
"""Handler incoming replies to our poll RPCs"""
global replies, timeoutEvent
timeoutEvent.Stop() # Cancel the timeout, we DID get a reply
replies += 1
send_next_poll()
print 'received %d replies\r'%replies,
def timeout_handler():
"""Handler LACK OF incoming replies to our poll RPCs"""
send_next_poll()
def rpc_sent_handler(packet_id, future_use):
"""Handle handoff of each packet"""
global polls_sent, timeoutEvent
if sent_id == packet_id:
polls_sent += 1
timeoutEvent = comm.scheduler.schedule(TIMEOUT, timeout_handler)
def serial_open_handler(serial_type, serial_port, addr):
# Initiate the test after we know our serial port is opened
initiate_test(NUMBER_OF_QUERIES)
def main():
"""Simple benchmark. Create a SNAP Connect instance, and use it to send a batch of RPC calls"""
global comm
# Create a SNAP Connect object to do communications (comm) for us
comm = snap.Snap(funcs = {})
comm.add_rpc_func('response_handler', response_handler)
# By tieing into HOOK_RPC_SENT events, we can avoid needlessly "piling up" RPC calls INSIDE THIS PC
comm.set_hook(snap.hooks.HOOK_RPC_SENT, rpc_sent_handler)
# By tieing into the HOOK_SERIAL_OPEN event, we can wait until we know we have a connected device to start the test
comm.set_hook(snap.hooks.HOOK_SERIAL_OPEN, serial_open_handler)
comm.open_serial(SERIAL_TYPE, SERIAL_PORT)
# Wait for all the queries to be sent, and all the replies to be received (or timed out)
while not test_complete:
comm.poll()
show_results()
if __name__ == "__main__":
# Notice that because this is a benchmark, we have set logging to the LEAST verbose level
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
main()
| StarcoderdataPython |
35634 | """
test_Payload.py
Copyright 2012 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from mock import MagicMock
from w3af.plugins.attack.payloads.base_payload import Payload
from w3af.plugins.attack.payloads.payloads.tests.test_payload_handler import (FakeReadShell,
FakeExecShell)
class TestBasePayload(unittest.TestCase):
def setUp(self):
self.bp = Payload(FakeReadShell())
def test_can_run(self):
self.assertEqual(self.bp.can_run(), set())
def test_run_only_read(self):
bp = Payload(FakeReadShell())
self.assertRaises(AttributeError, bp.run, 'filename')
def test_run_execute(self):
class Executable(Payload):
called_run_execute = False
called_api_execute = False
def run_execute(self, cmd):
self.called_run_execute = True
self.shell.execute(cmd)
def api_execute(self, cmd):
self.called_api_execute = True
shell = FakeExecShell()
shell.execute = MagicMock(return_value='')
executable = Executable(shell)
self.assertEqual(self.bp.can_run(), set())
executable.run('command')
self.assertTrue(executable.called_run_execute)
self.assertEqual(executable.shell.execute.call_count, 1)
executable.run_api('command')
self.assertTrue(executable.called_api_execute)
| StarcoderdataPython |
3208778 | <reponame>Stevanus-Christian/tensorflow<filename>tensorflow/python/tpu/tests/tpu_embedding_v2_sequence_feature_test.py<gh_stars>1-10
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.platform import test
from tensorflow.python.tpu.tests import tpu_embedding_base_test
class TPUEmbeddingTest(tpu_embedding_base_test.TPUEmbeddingBaseTest):
@parameterized.parameters([True, False])
def test_sequence_feature(self, is_sparse):
seq_length = 3
# Set the max_seq_length in feature config
for feature in self.feature_config:
feature.max_sequence_length = seq_length
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
if is_sparse:
dataset = self._create_sparse_dataset(strategy)
else:
dataset = self._create_ragged_dataset(strategy)
feature_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(feature_iter), training=False)
return strategy.run(step)
output = test_fn()
self.assertEqual(
self._get_replica_numpy(output[0], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[1], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[2], strategy, 0).shape, (2, 3, 2))
@parameterized.parameters([True, False])
def test_sequence_feature_with_build(self, is_updated_shape):
seq_length = 3
# Set the max_seq_length in feature config
for feature in self.feature_config:
feature.max_sequence_length = seq_length
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
feature_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
if is_updated_shape:
mid_level_api.build([
TensorShape([self.batch_size, seq_length, 2]),
TensorShape([self.batch_size, seq_length, 2]),
TensorShape([self.batch_size, seq_length, 3])
])
else:
mid_level_api.build([
TensorShape([self.batch_size, 2]),
TensorShape([self.batch_size, 2]),
TensorShape([self.batch_size, 3])
])
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(feature_iter), training=False)
return strategy.run(step)
output = test_fn()
self.assertEqual(
self._get_replica_numpy(output[0], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[1], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[2], strategy, 0).shape, (2, 3, 2))
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| StarcoderdataPython |
58332 | <reponame>ddelange/PyArmyKnife
import inspect
import logging
import os
from pickle import HIGHEST_PROTOCOL
logger = logging.getLogger(__name__)
try:
stack = inspect.stack()
# print([entry for entry in stack])
# current_file = [entry[1] for entry in stack if entry[-2][0].strip(' ').startswith('import %s' % inspect.getmodule(stack[1][0]).__name__)][0]
current_file = os.path.abspath(stack[-1][1])
current_path = os.path.dirname(current_file) + os.sep
except Exception as e:
current_file, current_path = None, os.getcwd()
logger.info(
f"{e}: Can't get current filename, probably running as main or using interactive mode. Skipping current_file and current_path."
)
def dump(value, filename, *, compress=("zlib", 7), protocol=HIGHEST_PROTOCOL):
"""Dump a Python object to disk."""
filename = os.path.abspath(filename)
try:
try: # sometimes the latter won't work where the externals does despite same __version__
from sklearn.externals.joblib import dump as jobdump
except Exception:
from joblib import dump as jobdump
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
jobdump(value, filename, compress=compress, protocol=protocol)
except Exception as e:
logger.error("Unexpected error in dump: " + e)
def load(filename):
"""Load a Python object from disk."""
filename = os.path.abspath(filename)
try:
from sklearn.externals.joblib import load as jobload
except Exception:
from joblib import load as jobload
return jobload(filename)
def string_compress(data, compression_level=4, **kwargs):
"""Serialize and compress a variable into a Py3 str."""
import base64
import msgpack
import brotli
compressed = brotli.compress(
msgpack.packb(
data,
use_bin_type=kwargs.pop("use_bin_type", True),
strict_types=kwargs.pop("strict_types", True),
**kwargs,
),
quality=compression_level,
)
return base64.b64encode(compressed).decode("ascii")
def string_decompress(compressed, **kwargs):
"""Decompress and unpack a Py3 string from string_compress into a variable."""
import base64
import msgpack
import brotli
decompressed = brotli.decompress(base64.b64decode(compressed.encode("ascii")))
return msgpack.unpackb(decompressed, raw=kwargs.pop("raw", False), **kwargs)
def list_files(directory, hidden=False):
"""Return a list of files in a directory."""
files = []
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)) and (
hidden or not item.startswith(".")
):
files.append(item)
return files
def open_with_default(filename):
"""Open a file with default application depending on operating system."""
from subprocess import call as subcall
from os import system, name
from sys import platform
filename = os.path.abspath(filename)
if platform.startswith("darwin"): # Mac
if filename.endswith("pdf"):
# preferably open with Skim, because of its auto refresh (Terminal: defaults write -app Skim SKAutoReloadFileUpdate -boolean true)
system('open -a Skim "{}"'.format(filename))
else:
subcall(("open", filename))
elif name == "posix": # Unix
# subcall(('xdg-open', filename))
system('xdg-open "{}" > /dev/null 2>&1 &'.format(filename))
elif name == "nt": # Windows
system('start "" /b "{}"'.format(filename))
def write_fits(images, name):
from astropy.io import fits
if not os.path.exists(current_path + "img"):
os.makedirs(current_path + "img")
nwhdulist = fits.HDUList()
nwhdulist.append(
fits.PrimaryHDU()
) # header=fits.open(fitslocation+source_list[0])[0].header))
for image in images:
nwhdulist.append(fits.ImageHDU(data=image))
nwhdulist.writeto(os.path.join(current_path, "img", name + ".fits", clobber=True))
nwhdulist.close()
def append_to_txt(plaintext, name="output.txt"):
with open(os.path.join(current_path, name), "a") as fp:
fp.write(str(plaintext) + "\n")
def write_dict_to_json(data_dict, filename="output", openfile=False):
import json
if not filename.endswith(".json"):
filename = os.path.join(current_path, filename + ".json")
with open(filename, "w") as output_file:
json.dump(data_dict, output_file, indent=2)
if openfile:
open_with_default(filename)
return f"{filename}"
def load_json(filename):
import json
if os.path.isfile(filename):
with open(filename, "r") as fp:
return json.load(fp)
| StarcoderdataPython |
3291511 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Tests that linkers contain all possible types defined in constants."""
from moe.bandit.constant import BANDIT_ENDPOINTS, EPSILON_SUBTYPES, UCB_SUBTYPES
from moe.bandit.linkers import BANDIT_ENDPOINTS_TO_SUBTYPES, EPSILON_SUBTYPES_TO_BANDIT_METHODS, UCB_SUBTYPES_TO_BANDIT_METHODS
class TestLinkers(object):
"""Tests that linkers contain all possible types defined in constants."""
def test_bandit_links_have_all_bandit_endpoints(self):
"""Test each bandit endpoint is in a linker, and every linker key is a bandit endpoint."""
assert set(BANDIT_ENDPOINTS) == set(BANDIT_ENDPOINTS_TO_SUBTYPES.keys())
def test_epsilon_links_have_all_epsilon_subtypes(self):
"""Test each epsilon subtype is in a linker, and every linker key is an epsilon subtype."""
assert set(EPSILON_SUBTYPES) == set(EPSILON_SUBTYPES_TO_BANDIT_METHODS.keys())
def test_ucb_links_have_all_ucb_subtypes(self):
"""Test each UCB subtype is in a linker, and every linker key is a UCB subtype."""
assert set(UCB_SUBTYPES) == set(UCB_SUBTYPES_TO_BANDIT_METHODS.keys())
| StarcoderdataPython |
13990 | <gh_stars>1-10
'''
Uses HPI [[https://github.com/karlicoss/HPI/blob/master/doc/MODULES.org#myreddit][reddit]] module
'''
from itertools import chain
from typing import Set, Optional
from ..common import Visit, Loc, extract_urls, Results, logger
def index(*, render_markdown: bool = False, renderer: Optional['RedditRenderer'] = None) -> Results:
from . import hpi
try:
from my.reddit.all import submissions, comments, saved, upvoted
except ModuleNotFoundError as e:
if "No module named 'my.reddit.all'" in str(e):
import warnings
warnings.warn("DEPRECATED/reddit: Using an old version of HPI, please update")
from my.reddit import submissions, comments, saved, upvoted # type: ignore[no-redef]
else:
raise e
if renderer is not None:
assert callable(renderer), f"{renderer} is not a callable (should be a subclass of RedditRenderer)"
r = renderer(render_markdown=render_markdown)
else:
r = RedditRenderer(render_markdown=render_markdown)
logger.info('processing saves')
for s in saved():
try:
yield from r._from_save(s)
except Exception as e:
yield e
logger.info('processing comments')
for c in comments():
try:
yield from r._from_comment(c)
except Exception as e:
yield e
logger.info('processing submissions')
for sub in submissions():
try:
yield from r._from_submission(sub)
except Exception as e:
yield e
logger.info('processing upvotes')
for u in upvoted():
try:
yield from r._from_upvote(u)
except Exception as e:
yield e
# mostly here so we can keep track of how the user
# wants to render markdown
class RedditRenderer:
def __init__(self, render_markdown: bool = False):
self._link_extractor = None
self._parser_cls = None
try:
from .markdown import TextParser, extract_from_text
self._link_extractor = extract_from_text
self._parser_cls = TextParser
except ImportError as import_err:
# TODO: add dummy _link_extractor and _parser_cls classes incase
# these are called by a subclass?
# only send error if the user is trying to enable this feature
if render_markdown:
logger.exception(import_err)
logger.critical("Could not import markdown module to render reddit markdown. Try 'python3 -m pip install mistletoe'")
render_markdown = False # force to be false, couldn't import
self.render_markdown = render_markdown
def _from_comment(self, i: 'Comment') -> Results:
locator = Loc.make(
title='Reddit comment',
href=i.url,
)
yield from self._from_common(i, locator=locator)
def _from_submission(self, i: 'Submission') -> Results:
locator = Loc.make(
title=f'Reddit submission: {i.title}',
href=i.url,
)
yield from self._from_common(i, locator=locator)
def _from_upvote(self, i: 'Upvote') -> Results:
locator = Loc.make(
title=f'Reddit upvote',
href=i.url,
)
yield from self._from_common(i, locator=locator)
def _from_save(self, i: 'Save') -> Results:
locator = Loc.make(
title='Reddit save',
href=i.url,
)
yield from self._from_common(i, locator=locator)
# to allow for possible subclassing by the user?
def _render_body(self, text: str) -> str:
if self.render_markdown and self._parser_cls is not None:
return self._parser_cls(text)._doc_ashtml()
else:
return text
def _from_common(self, i: 'RedditBase', locator: Loc) -> Results:
urls = [i.url]
# TODO this should belong to HPI.. fix permalink handling I guess
# ok, it's not present for all of them..
lurl = i.raw.get('link_url')
if lurl is not None:
urls.append(lurl)
lurl = i.raw.get('url')
if lurl is not None:
urls.append(lurl)
context = self._render_body(i.text)
emitted: Set[str] = set()
for url in chain(urls, extract_urls(i.text)):
if url in emitted:
continue
yield Visit(
url=url,
dt=i.created,
context=context,
locator=locator,
)
emitted.add(url)
# extract from markdown links like [link text](https://...)
# incase URLExtract missed any
#
# this should try to do this, even if the user didn't enable
# the render_markdown flag, as it may catch extra links that URLExtract didnt
# would still require mistletoe to be installed, but
# the user may already have it installed for the auto/markdown modules
if self._link_extractor is not None:
for res in self._link_extractor(i.text):
if isinstance(res, Exception):
yield res
continue
if res.url in emitted:
continue
yield Visit(
url=res.url,
dt=i.created,
context=context,
locator=locator,
)
emitted.add(res.url)
import typing
if typing.TYPE_CHECKING:
from my.reddit.common import Submission, Comment, Save, Upvote, RedditBase
| StarcoderdataPython |
3373319 | ## FIXME: Replace all dots
## 8 kyu
## https://www.codewars.com/kata/596c6eb85b0f515834000049
import re
def replace_dots(str):
return re.sub(r"\.", "-", str) | StarcoderdataPython |
4803466 | """Authentication resource views"""
from flask_jwt_extended import create_access_token
from marshmallow import ValidationError
from flask_restful import Resource
from flask_api import status
from flask import jsonify, request, session, make_response
from serializers.user_schema import LoginSchema
from app_config import API
from models.user import User
from app_config import BCRYPT
LOGIN_SCHEMA = LoginSchema()
JWT_TOKEN = 'jwt_<PASSWORD>'
class LogoutResource(Resource):
"""
Implementation sign out method
"""
def get(self):
"""Get method for sign out"""
session.clear()
response = {
'is_logout': True,
}
return make_response(response, status.HTTP_200_OK)
class LoginResource(Resource):
"""Implementation sign in method"""
def post(self):
"""Post method for sign in"""
try:
data = LOGIN_SCHEMA.load(request.json)
current_user = User.find_user(user_name=data['user_name'])
except ValidationError as error:
return make_response(jsonify(error.messages), status.HTTP_400_BAD_REQUEST)
try:
check_password = BCRYPT.check_password_hash(current_user.user_password, data['user_password'])
if not check_password:
raise AttributeError
except AttributeError:
response_object = {
'Error': 'Your password or login is invalid'
}
return make_response(response_object, status.HTTP_400_BAD_REQUEST)
session.permanent = True
access_token = create_access_token(identity=current_user.id, expires_delta=False)
session[JWT_TOKEN] = access_token
response_object = {
"access_token": access_token
}
return make_response(jsonify(response_object), status.HTTP_200_OK)
API.add_resource(LogoutResource, '/logout')
API.add_resource(LoginResource, '/login') | StarcoderdataPython |
3201825 | <reponame>aws-samples/build-a-360-degree-customer-view-with-aws
import boto3
import os
import json
from datetime import datetime,timedelta
import time
import random
s3 = boto3.resource("s3")
region = os.getenv('region')
FilePath = 'card'
BucketName = os.getenv('BucketName')
csvDelimiter = os.getenv('csvDelimiter')
def getLine():
card_id = random.randint(5000,5999)*10000*10000*10000+random.randint(1000,1999)*10000*10000+random.randint(8000,8999)*10000+random.randint(2000,8999)
x = random.randint(1,2)
x = x*10
y = x+50
disp_id = random.randint(x,y)
type = random.choice(['junior','classic','gold','classic','classic','gold'])
daysago = random.randint(30,720)
now = datetime.now() + timedelta(days=-daysago)
str_now = now.isoformat()
issued_datetime = str_now
line = '%s,%s,%s,%s' % (card_id, disp_id, type, issued_datetime)
return line
def lambda_handler(event, context):
begining = datetime.now()
newtime = datetime.now()
count = 0
arquivo=''
filename='card'+'00'+time.strftime("%Y%m%d-%H%M%S")+'.csv'
s3_path=FilePath+'/'+filename
while (newtime - begining).total_seconds()<5:
line=getLine()
newtime = datetime.now()
newline=line
arquivo=arquivo+newline+'\n'
print(BucketName)
s3.Bucket(BucketName).put_object(Key=s3_path, Body=arquivo)
print('File '+ s3_path + ' saved.')
print ('Processed ' + str(count) + ' items.')
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.