id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8011211 | self.description = "remove a package with a directory that has been replaced with a symlink"
self.filesystem = [ "var/", "srv -> var/" ]
lpkg = pmpkg("pkg1")
lpkg.files = ["srv/"]
self.addpkg2db("local", lpkg)
self.args = "-R %s" % (lpkg.name)
self.addrule("PACMAN_RETCODE=0")
self.addrule("DIR_EXIST=var/")
self.addrule("!LINK_EXIST=srv")
self.addrule("!FILE_EXIST=srv")
self.addrule("!DIR_EXIST=srv")
self.addrule("!PACMAN_OUTPUT=cannot remove")
| StarcoderdataPython |
3206105 | def make_tree(seq):
tree = {}
for item in seq:
tree = _make_tree(item, tree)
return tree
def _make_tree(item, tree):
if not tree:
tree[item] = {'left': {}, 'right': {}}
else:
last_key = tree.keys()[0]
if item > last_key:
_make_tree(item, tree[last_key]['right'])
else:
_make_tree(item, tree[last_key]['left'])
return tree
def do(seqs):
tree = make_tree(seqs[0])
for seq in seqs[1:]:
test_tree = make_tree(seq)
if test_tree != tree:
print 'No'
else:
print 'Yes'
while True:
_input = raw_input()
if _input != '0':
seqs = []
seq_len, seq_num = _input.split(' ')
for i in range(int(seq_num) + 1):
seq = raw_input()
seqs.append(map(int, seq.split(' ')))
do(seqs)
else:
break
| StarcoderdataPython |
1921037 | import vmraid
from vmraid.model.naming import append_number_if_name_exists
from vmraid.utils.dashboard import get_dashboards_with_link
def execute():
if not vmraid.db.table_exists('Dashboard Chart')\
or not vmraid.db.table_exists('Number Card')\
or not vmraid.db.table_exists('Dashboard'):
return
vmraid.reload_doc('desk', 'doctype', 'dashboard_chart')
vmraid.reload_doc('desk', 'doctype', 'number_card')
vmraid.reload_doc('desk', 'doctype', 'dashboard')
modified_charts = get_modified_docs('Dashboard Chart')
modified_cards = get_modified_docs('Number Card')
modified_dashboards = [doc.name for doc in get_modified_docs('Dashboard')]
for chart in modified_charts:
modified_dashboards += get_dashboards_with_link(chart.name, 'Dashboard Chart')
rename_modified_doc(chart.name, 'Dashboard Chart')
for card in modified_cards:
modified_dashboards += get_dashboards_with_link(card.name, 'Number Card')
rename_modified_doc(card.name, 'Number Card')
modified_dashboards = list(set(modified_dashboards))
for dashboard in modified_dashboards:
rename_modified_doc(dashboard, 'Dashboard')
def get_modified_docs(doctype):
return vmraid.get_all(doctype,
filters = {
'owner': 'Administrator',
'modified_by': ['!=', 'Administrator']
})
def rename_modified_doc(docname, doctype):
new_name = docname + ' Custom'
try:
vmraid.rename_doc(doctype, docname, new_name)
except vmraid.ValidationError:
new_name = append_number_if_name_exists(doctype, new_name)
vmraid.rename_doc(doctype, docname, new_name)
| StarcoderdataPython |
3537327 | #!/usr/bin/env python
# Requires Python 3.x
"""
NSX-T SDK Sample Code
Copyright 2017-2020 VMware, Inc. All rights reserved
The BSD-2 license (the "License") set forth below applies to all
parts of the NSX-T SDK Sample Code project. You may not use this
file except in compliance with the License.
BSD-2 License
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
################################################################################
# Summary: Script to GET PER VM DFW rules programmed in the datapath.
# Usecase: Helps to monitor rules against supported rule scale limit per vnic (4K).
# Usage: python nsx-get-dfw-rules-per-vm.py [-h] --nsx-mgr-ip IP
# [--user USER]
# [--password PASSWORD]
# [--aboverulelimitonly ABOVERULELIMITONLY]
# [--fwrulelimit FWRULELIMIT]
# Caveat: This count is very close aproximaion to the rules in the datapath per VNIC with following caveat:
# 1) It also counts disabled rules.
# 2) If a rule has TCP & UDP services/ports together, e.g TCP {1,2,3} UDP (5,6}- Script counts as
# one but datapath would have 2 rules") one with UDP & TCP port set.
# 3) If a rule has Multiple L7 Context-Profiles- Script counts as one but datapath would have N rules,
# one for each of the L7 profile.
################################################################################
import requests
from requests.auth import HTTPBasicAuth
import json
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import argparse
import re
################################################################################
### Define Arguments for the script.
################################################################################
parser = argparse.ArgumentParser(description='Get per VM DFW rules programmed in the datapath')
parser.add_argument('--nsx-mgr-ip', dest="ip",
help="NSX Manager IP", required=True)
parser.add_argument('--user', dest="user",
help="NSX Username, default: admin",
default="admin", required=False)
parser.add_argument('--password', dest="password",
help="NSX Password, default: <PASSWORD>",
default="<PASSWORD>", required=False)
parser.add_argument('--aboverulelimitonly', dest="aboverulelimitonly",
help="-yes- Lists only VM Rule count above --fwrulelimit, -no- all VM",
default="no", required=False)
parser.add_argument('--fwrulelimit', dest="fwrulelimit", type=int,
help="VM's with rule above this limit, if --aboverulelimitdonly is used, default 100",
default="100", required=False)
args = parser.parse_args()
################################################################################
### REST API function using python "requests" module
################################################################################
def rest_api_call (method, endpoint, data=None, ip=args.ip, user=args.user, password=args.password):
url = "https://%s%s" % (ip, endpoint)
# To remove ssl-warnings bug. even with cert verification is set as false
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
headers = {'Content-Type': 'application/json'}
res = requests.request(
method=method,
url=url,
auth=HTTPBasicAuth(user, password),
headers=headers,
data=data,
verify=False
)
try:
res.raise_for_status()
except requests.exceptions.HTTPError as e:
raise e
if len(res.content) > 0:
response = res.json()
return response
################################################################################
### Get All NSX Logical Ports. Includes all VM VNIC ports
################################################################################
def nsx_get_all_logical_ports():
# Get All Logical ports
endpoint = "/api/v1/logical-ports"
res = rest_api_call(method= 'GET', endpoint = endpoint)
# Python dict with List of Logicalport details
lpdict = (res["results"])
#print(lpdict)
return lpdict
###############################################################################
### Count NSX DFW Policy, Rules and GROUPS.
################################################################################
def nsx_dfw_policy_count():
# Send API Request to NSX Manager to get DFW Policy inventory
#endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group"
endpoint = "/policy/api/v1/infra?filter=Type-Domain|SecurityPolicy|Rule|Group"
res = rest_api_call(method= 'GET', endpoint = endpoint)
tempfile = "tempfile.json"
with open(tempfile, 'w') as bkdata:
# Save the resonse dictionary in python to a json file.
# Use option indent to save json in more readable format
json.dump(res, bkdata, indent=4)
#print("\n >>> NSX DFW Policy Backup completed and saved as [%s]. Backup includes DFW policy, Rules & Groups.\n" % backupfile)
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
f = open(tempfile, "r")
lines = f.readlines()
f.close()
# Count pattern "ChildSecurityPolicy" for Total Policy Count
search_for_policy = 'ChildSecurityPolicy'
# Count pattern "Rule_id for Total Policy Count
search_for_rule = 'rule_id'
# Count pattern "ChildGroup" for Total Policy Count
search_for_group = 'ChildGroup'
# Intialize counter variable
pcount, rcount, gcount = 0, 0, 0
for line in lines:
line = line.strip().lower().split()
for words in line:
if words.find(search_for_policy.lower()) != -1:
pcount +=1
for words in line:
if words.find(search_for_rule.lower()) != -1:
rcount +=1
for words in line:
if words.find(search_for_group.lower()) != -1:
gcount +=1
print("\n NSX Manager system wide DFW config summary: %s Policy, %s Rules, %s Group\n" % (pcount, rcount, gcount))
################################################################################
### Get All NSX Logical Ports. Includes all VM VNIC ports
################################################################################
def nsx_get_dfw_rule_per_lport():
nsx_dfw_policy_count()
# Get All Logical ports
res = nsx_get_all_logical_ports()
# API to get FW Rule per logical port
vmcrossrulelimit = {}
lplist_abovelimit = []
if "no" in (args.aboverulelimitonly):
print("---------------------------------------------------------------------------------")
print(" Total NSX DFW datapath Rule Count per VM VNIC")
print(" Counts only Rules based on Applied-To field for given VM VNIC")
print(" Use applied-to to define scope of policy and to remove unrelated policy from VM")
print("---------------------------------------------------------------------------------")
print(" Rule-Count ------ VM-VNIC")
for lp in res:
lpname = lp["display_name"]
if re.search("vmx@", lpname):
rc = 0
endpoint = ("/api/v1/firewall/sections?applied_tos=%s&deep_search=true" % lp["internal_id"])
res = rest_api_call(method = 'GET', endpoint = endpoint)
# Python dict with List of Logicalport details
lpdict = (res["results"])
for policy in lpdict:
rc = rc + policy["rule_count"]
print("\t%s ---> %s" % (rc,lp["display_name"]))
if "yes" in (args.aboverulelimitonly):
print("---------------------------------------------------------------------------------")
print(" VM-NIC having DFW Rules count above %s " % args.fwrulelimit)
print(" Counts only Rules based on Applied-To field for given VM VNIC")
print(" Use Applied-To to define scope of policy and to remove unrelated policy from VM")
print("---------------------------------------------------------------------------------")
print(" Rule-Count ------ VM-VNIC")
for lp in res:
lpname = lp["display_name"]
if re.search("vmx@", lpname):
rc = 0
endpoint = ("/api/v1/firewall/sections?applied_tos=%s&deep_search=true" % lp["internal_id"])
res = rest_api_call(method = 'GET', endpoint = endpoint)
# Python dict with List of Logicalport details
lpdict = (res["results"])
for policy in lpdict:
rc = rc + policy["rule_count"]
#print("\t%s ---> %s" % (rc,lp["display_name"]))
if rc >= (args.fwrulelimit):
#print("high scale")
print("\t%s ---> %s" % (rc,lp["display_name"]))
# This counts is very close approxmation with following caveat
# 1) disabled rules on the rule table counted - if it is applied to that VM based on Applied-to.
# 2) Does't account for rule exapantion due to Multiple Context Profiles in a single rule or TCP/UDP services together in a rule.
print("-----------------------------------------------------------------------------------------------")
print(" This count is very close aproximaion to the rules in the datapath per VNIC with following caveat:")
print("1) It also counts disabled rules.")
print("2) If a rule has TCP & UDP services/ports together, e.g TCP {1,2,3} UDP (5,6}- Script counts as one but datapath would have 2 rules")
print(" one with UDP & TCP port set")
print("3) If a rule has Multiple L7 Context-Profiles- Script counts as one but datapath would have N rules, one for each of the L7 profile")
################################################################################
### Get DFW Rule per VNIC
################################################################################
if __name__ == "__main__":
nsx_get_dfw_rule_per_lport()
"""
Sample output of script:
bhatg@bhatg-a02 DFW % python nsx-get-dfw-rules-per-vm.py --nsx-mgr-ip 10.114.208.136
NSX Manager system wide DFW config summary: 11 Policy, 34 Rules, 27 Group
---------------------------------------------------------------------------------
Total NSX DFW datapath Rule Count per VM VNIC
Counts only Rules based on Applied-To field for given VM VNIC
Use applied-to to define scope of policy and to remove unrelated policy from VM
---------------------------------------------------------------------------------
Rule-Count ------ VM-VNIC
23 ---> DC02-GOLDEN-IMAGE/DC02-GOLDEN-IMAGE.vmx@18a2b527-e79c-4bcf-98d0-1373a2970376
23 ---> DC02-PROD-MRS-APP-01/DC02-PROD-MRS-APP-01.vmx@bb7b1e58-f5c7-4fe8-80bb-b123e39b07b8
23 ---> DC02-PROD-MRS-APP-02/DC02-PROD-MRS-APP-02.vmx@18a2b527-e79c-4bcf-98d0-1373a2970376
31 ---> DC02-PROD-MRS-DB-01/DC02-PROD-MRS-DB-01.vmx@18a2b527-e79c-4bcf-98d0-1373a2970376
31 ---> DC02-PROD-MRS-WEB-01/DC02-PROD-MRS-WEB-01.vmx@18a2b527-e79c-4bcf-98d0-1373a2970376
23 ---> DEV-MRS-WEB-01/DEV-MRS-WEB-01.vmx@bb7b1e58-f5c7-4fe8-80bb-b123e39b07b8
19 ---> nsx-edgevm-1/nsx-edgevm-1.vmx@bdfcc848-2fd8-42c2-afb2-c53c79ef8c30
19 ---> nsx-edgevm-2/nsx-edgevm-2.vmx@bdfcc848-2fd8-42c2-afb2-c53c79ef8c30
19 ---> nsx-edgevm-3/nsx-edgevm-3.vmx@bdfcc848-2fd8-42c2-afb2-c53c79ef8c30
19 ---> nsx-edgevm-3/nsx-edgevm-3.vmx@bdfcc848-2fd8-42c2-afb2-c53c79ef8c30
19 ---> nsx-edgevm-3/nsx-edgevm-3.vmx@bdfcc848-2fd8-42c2-afb2-c53c79ef8c30
19 ---> nsx-edgevm-4/nsx-edgevm-4.vmx@bdfcc848-2fd8-42c2-afb2-c53c79ef8c30
19 ---> nsx-edgevm-4/nsx-edgevm-4.vmx@bdfcc848-2fd8-42c2-afb2-c53c79ef8c30
23 ---> vcenter2.dg.vsphere.local/vcenter2.dg.vsphere.local.vmx@a8fc1054-3569-4c35-a07c-5f512c37a472
-----------------------------------------------------------------------------------------------
This count is very close aproximaion to the rules in the datapath per VNIC with following caveat:
1) It also counts disabled rules.
2) If a rule has TCP & UDP services/ports together, e.g TCP {1,2,3} UDP (5,6}- Script counts as one but datapath would have 2 rules
one with UDP & TCP port set
3) If a rule has Multiple L7 Context-Profiles- Script counts as one but datapath would have N rules, one for each of the L7 profile
bhatg@bhatg-a02 DFW %
bhatg@bhatg-a02 DFW % python nsx-get-dfw-rules-per-vm.py --nsx-mgr-ip 10.114.208.136 --aboverulelimitonly yes --fwrulelimit 20
NSX Manager system wide DFW config summary: 11 Policy, 34 Rules, 27 Group
---------------------------------------------------------------------------------
VM-NIC having DFW Rules count above 20
Counts only Rules based on Applied-To field for given VM VNIC
Use Applied-To to define scope of policy and to remove unrelated policy from VM
---------------------------------------------------------------------------------
Rule-Count ------ VM-VNIC
23 ---> DC02-GOLDEN-IMAGE/DC02-GOLDEN-IMAGE.vmx@18a2b527-e79c-4bcf-98d0-1373a2970376
23 ---> DC02-PROD-MRS-APP-01/DC02-PROD-MRS-APP-01.vmx@bb7b1e58-f5c7-4fe8-80bb-b123e39b07b8
23 ---> DC02-PROD-MRS-APP-02/DC02-PROD-MRS-APP-02.vmx@18a2b527-e79c-4bcf-98d0-1373a2970376
31 ---> DC02-PROD-MRS-DB-01/DC02-PROD-MRS-DB-01.vmx@18a2b527-e79c-4bcf-98d0-1373a2970376
31 ---> DC02-PROD-MRS-WEB-01/DC02-PROD-MRS-WEB-01.vmx@18a2b527-e79c-4bcf-98d0-1373a2970376
23 ---> DEV-MRS-WEB-01/DEV-MRS-WEB-01.vmx@bb7b1e58-f5c7-4fe8-80bb-b123e39b07b8
23 ---> vcenter2.dg.vsphere.local/vcenter2.dg.vsphere.local.vmx@a8fc1054-3569-4c35-a07c-5f512c37a472
-----------------------------------------------------------------------------------------------
This count is very close aproximaion to the rules in the datapath per VNIC with following caveat:
1) It also counts disabled rules.
2) If a rule has TCP & UDP services/ports together, e.g TCP {1,2,3} UDP (5,6}- Script counts as one but datapath would have 2 rules
one with UDP & TCP port set
3) If a rule has Multiple L7 Context-Profiles- Script counts as one but datapath would have N rules, one for each of the L7 profile
"""
| StarcoderdataPython |
6598709 | import sys
if __name__ == '__main__':
filename = sys.argv[-1]
with open(filename) as input_file:
input_code = input_file.read()
output_code = ""
is_string = False
is_inline_comment = False
is_multiline_comment = False
upper = False
prev_char = ""
for char in input_code:
is_comment = is_inline_comment or is_multiline_comment
is_special = is_comment or is_string
two_char = prev_char + char
if char == '\'' and not is_comment:
is_string = not is_string
elif two_char == '--' or char == "#" and not is_special:
is_inline_comment = True
elif two_char == '/*' and not is_special:
is_multiline_comment = True
elif char == '\n' and is_inline_comment:
is_inline_comment = False
elif two_char == '*/' and is_multiline_comment:
is_multiline_comment = False
if not is_special:
if upper:
output_code += char.upper()
else:
output_code += char.lower()
upper = not upper
else:
output_code += char
prev_char = char
with open('sPoNgeCaSeD_' + filename, 'w') as output_file:
output_file.write(output_code)
| StarcoderdataPython |
1964421 | from tortoise import Model, fields
from tortoise.contrib.postgres.fields import TSVectorField
from tortoise.contrib.postgres.indexes import (
BloomIndex,
BrinIndex,
GinIndex,
GistIndex,
HashIndex,
PostgreSQLIndex,
SpGistIndex,
)
class Index(Model):
bloom = fields.CharField(max_length=200)
brin = fields.CharField(max_length=200)
gin = TSVectorField()
gist = TSVectorField()
sp_gist = fields.CharField(max_length=200)
hash = fields.CharField(max_length=200)
partial = fields.CharField(max_length=200)
class Meta:
indexes = [
BloomIndex(fields={"bloom"}),
BrinIndex(fields={"brin"}),
GinIndex(fields={"gin"}),
GistIndex(fields={"gist"}),
SpGistIndex(fields={"sp_gist"}),
HashIndex(fields={"hash"}),
PostgreSQLIndex(fields={"partial"}, condition={"id": 1}),
]
| StarcoderdataPython |
1916201 | import os
import logging
import urlparse
import simplejson as json
import itertools
from os.path import join
from uuid import uuid4
from zipfile import ZipFile
from datetime import datetime
from lxml import etree
from shutil import rmtree
from django.utils.functional import cached_property
from .config import DATETIME_FORMAT
logger = logging.getLogger(__name__)
DATA_TEXT = ['html', 'text']
class Data(object):
"""Stores ouput data collected from set of operations, with additional
information"""
def __init__(self, *args, **kwargs):
self.uuid = kwargs.get('uuid') or kwargs.get('id')
self.task = kwargs.get('task_id')
self.url = kwargs.get('url')
self.start = kwargs.get('start') or datetime.now()
self.end = kwargs.get('end') or None
self.results = []
@property
def dict(self):
if self.end is None:
self.end = datetime.now()
result = {
'id': self.uuid,
'task': self.task,
'url': self.url,
'start': print_time(self.start),
'end': print_time(self.end),
'results': self.results,
}
return result
def update(self, **kwargs):
""" Update this object data with provided dictionary """
for key in kwargs:
self.__setattr__(key, kwargs[key])
def add_result(self, result):
self.results.append(result.dict)
@cached_property
def json(self):
""" Return as pretty JSON """
return json.dumps(self.dict, indent=2)
class Datum(object):
"""Holds ouput of a single operation, supports export to JSON.
...
extras - Holds non-result information"""
def __init__(self, content, media=None, images=None, **kwargs):
self.content = content
self.media = media or []
self.images = images or []
self.extras = kwargs
@property
def dict(self):
return self.__dict__
@property
def json(self):
return json.dumps(self.__dict__, indent=2)
def complete_url(base, link):
"""Test and complete an URL with scheme, domain, base path if missing.
If base doesn't have scheme, it will be auto added."""
url = link['url'] if isinstance(link, dict) else link
elements = urlparse.urlsplit(url)
if not elements.scheme:
url = urlparse.urljoin(base, url)
if isinstance(link, dict):
link['url'] = url
else:
link = url
return link
def get_link_info(link, make_root=False):
"""Extract basic information from a given link (as etree Element),
and return a dictionary:
{
'url': '...',
'text': '...',
}
In case of having invalid URL, the function will return None
"""
if isinstance(link, etree._Element):
href = link.get('href') if not make_root else '/'+link.get('href')
text = link.text.strip() if isinstance(link.text, basestring) else ''
if href:
return {'url': href.strip(), 'text': text}
def get_single_content(element, data_type):
"""Return the processed content of given element"""
if isinstance(element, basestring) or \
isinstance(element, etree._ElementStringResult) or \
isinstance(element, etree._ElementUnicodeResult):
return element
if data_type == 'text':
# Return element.text or ''
return etree.tounicode(element, method='text').strip()
elif data_type == 'html':
return etree.tounicode(element, pretty_print=True).strip()
def get_content(elements, data_type='html'):
"""Receive XPath result and returns appropriate content"""
# Eliminate empty string elements
items = []
if hasattr(elements, '__iter__'):
items = [get_single_content(el, data_type) for el in elements]
else:
items = get_single_content(elements, data_type)
if data_type in DATA_TEXT:
[items.remove(val) for val in items if not val]
return items
def print_time(atime=None, with_time=True):
"""Return string friendly value of given time"""
if isinstance(atime, basestring):
return atime
atime = atime or datetime.now()
try:
return atime.strftime(DATETIME_FORMAT)
except AttributeError:
pass
return ''
def get_uuid(url='', base_dir='', size=8):
""" Return whole new and unique ID and make sure not being duplicated
if base_dir is provided
url (optional) - Address of related page
base_dir (optional) - Directory path to check for duplication
size (optional) - Size of the UUID prefix
"""
netloc = urlparse.urlsplit(url).netloc
duplicated = True
while duplicated:
value = uuid4().get_hex()[:size]
uuid = '{0}-{1}'.format(value, netloc) if netloc else value
if base_dir:
duplicated = os.path.exists(join(base_dir, uuid))
else:
duplicated = False
return uuid
def write_storage_file(storage, file_path, content):
""" Write a file with path and content into given storage. This
merely tries to support both FileSystem and S3 storage
Arguments:
storage - Django file storage
file_path - relative path to the file
content - content of file to be written
"""
try:
mfile = storage.open(file_path, 'w')
mfile.write(content)
mfile.close()
except IOError:
# When directories are not auto being created, exception raised.
# Then try to rewrite using the FileSystemStorage
location = join(storage.base_location, os.path.dirname(file_path))
if not os.path.exists(location):
os.makedirs(location)
mfile = storage.open(file_path, 'w')
mfile.write(content)
mfile.close()
return file_path
def move_to_storage(storage, source, location):
""" Move single file or whole directory to storage. Empty directory
will not be moved.
Arguments:
storage: Instance of the file storage (FileSystemStorage,...)
source: File or directory to be moved
location: Relative path where the file/dir will be placed into.
Returns:
Path of file in storage
"""
source = source.strip().rstrip('/')
if os.path.isfile(source):
saved_path = write_storage_file(
storage, join(location, os.path.basename(source)),
open(source, 'r').read())
else:
blank_size = len(source.rsplit('/', 1)[0]) + 1
for items in os.walk(source):
loc = join(location, items[0][blank_size:])
for item in items[2]:
write_storage_file(
storage, join(loc, item),
open(join(items[0], item), 'r').read())
saved_path = join(location, os.path.basename(source))
# Nuke old file/dir
try:
if os.path.isfile(source):
os.remove(source)
else:
rmtree(source)
except OSError:
logger.exception('Error when deleting: {0}'.format(source))
return saved_path
class SimpleArchive(object):
""" This class provides functionalities to create and maintain archive
file, which is normally used for storing results. """
_file = None
def __init__(self, file_path='', base_dir='', *args, **kwargs):
# Generate new file in case of duplicate or missing
if not file_path:
file_path = get_uuid(base_dir=base_dir)
self.file_path = join(base_dir, file_path)
# Create directories if not existing
location = os.path.dirname(self.file_path)
if not os.path.exists(location):
os.makedirs(location)
if os.path.exists(self.file_path):
os.remove(self.file_path)
self._file = ZipFile(self.file_path, 'w')
def write(self, file_name, content):
""" Write file with content into current archive """
self._file.writestr(file_name, content)
def finish(self):
self._file.close()
def move_to_storage(self, storage, location, remove=True):
""" Move the current archive to given location (directory) in storage.
Arguments:
storage: Instance of the file storage (FileSystemStorage,...)
location: Absolute path where the file will be placed into.
remove: Option to remove the current file after moved or not.
Returns:
Path of file in storage
"""
self.finish()
content = open(self._file.filename, 'r').read()
file_path = join(location, os.path.basename(self._file.filename))
saved_path = write_storage_file(storage, file_path, content)
# Remove file if successful
if remove and saved_path:
try:
os.remove(self._file.filename)
self._file = None
except OSError:
logger.error('Error when removing temporary file: {0}'.format(
self._file.filename))
return saved_path
def __str__(self):
dsc = self._file.filename if self._file else '_REMOVED_'
return 'SimpleArchive ({0})'.format(dsc)
def interval_to_list(interval):
"""Convert interval string to list of number
'1-4'
Returns:
[1, 2, 3, 4]
"""
elements = [e.strip().split('-') for e in interval.split(',')]
return [n for r in elements for n in range(int(r[0]), int(r[-1])+1)]
def generate_urls(base_url, elements=None):
"""Returns every URL base on the starting URL and other values
base_url = 'http://domain/class-{0}/?name={1}'
elements = ((1, 2), ('jane', 'john'))
Returns:
[
'http://domain/class-1/?name=jane'
'http://domain/class-1/?name=john'
'http://domain/class-2/?name=jane'
'http://domain/class-2/?name=john'
]
"""
# Convert the intervals into lists
refined = []
for element in elements:
full_list = []
for i, value in enumerate(element):
if isinstance(value, basestring) and '-' in value:
full_list.extend(interval_to_list(value))
else:
full_list.append(value)
refined.append(full_list)
for comb in itertools.product(*refined):
yield base_url.format(*comb)
| StarcoderdataPython |
9734434 | #!/usr/bin/env python
import unittest
import logging
from BaseTest import parse_commandline, BasicTestSetup
import afs
class TestLookupUtilMethods(unittest.TestCase, BasicTestSetup):
"""
Tests LookupUtil Methods
"""
def setUp(self):
"""
setup
"""
BasicTestSetup.__init__(self)
self.HostAlias=self.test_config.get("LookupUtil","HostAlias")
self.primaryHostName=self.test_config.get("LookupUtil","primaryHostName")
self.IPAddr=self.test_config.get("LookupUtil","IPAddr")
self.FsUUID=self.test_config.get("LookupUtil","FsUUID")
return
def test_Lookup_HostAlias(self) :
DNSInfo=afs.LOOKUP_UTIL[afs.CONFIG.cell].get_dns_info(self.HostAlias)
self.assertEqual(self.primaryHostName,DNSInfo["names"][0])
return
def test_Lookup_UUID(self) :
uuid=afs.LOOKUP_UTIL[afs.CONFIG.cell].get_fsuuid(self.HostAlias)
self.assertEqual(self.FsUUID,uuid)
return
def test_Lookup_HostnameByFSUUID(self) :
hostname=afs.LOOKUP_UTIL[afs.CONFIG.cell].get_hostname_by_fsuuid(self.FsUUID)
self.assertEqual(self.primaryHostName,hostname)
return
if __name__ == '__main__' :
parse_commandline()
suite = unittest.TestLoader().loadTestsFromTestCase(TestLookupUtilMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
11271968 | <reponame>Muhammadislom/TuitOpenSource
class RSAMethod:
def pq(self, p, q):
return str(p * q)
| StarcoderdataPython |
5009016 | <gh_stars>1-10
import sys
from csv import DictReader
import os
argv=sys.argv
CLASSES=["CT_plus","CT_minus","PR_plus","PR_minus","PS_plus","PS_minus","Uu"]
csv_filename=argv[1]
MODE=argv[-1]
if MODE == "bow":
factbank_path=r"/Users/pushpendrerastogi/Dropbox/evsem_data/factbank/data"
sentence_file_path = os.path.join(factbank_path, "annotation", "sentences.txt")
FACTBANK_SPLITTER="|||"
d={}
for l in open(sentence_file_path):
l=l.strip().split(FACTBANK_SPLITTER)
fb_filename=l[0][1:-1]; assert l[0][0]=="'"
sentId=l[1]
sentence=l[2][1:-1].replace(r"\'", "'").strip()
d[(fb_filename, sentId)]=sentence
with open(csv_filename) as csv_file:
_csv=DictReader(csv_file)
for row in _csv:
majority = [_class for _class in CLASSES if int(row[_class])>=6]
if majority != []:
sys.stdout.write(majority[0])
sys.stdout.write("\t")
sys.stdout.write(d[(row["file"], row["sentId"])])
sys.stdout.write("\n")
elif MODE=="pgcqmn":
_csv=DictReader(open(csv_filename))
predicate_fn = open(argv[2], "rb")
general_fn = open(argv[3], "rb")
conditional_fn = open(argv[4], "rb")
quotation_fn = open(argv[5], "rb")
modality_fn = open(argv[6], "rb")
negation_fn = open(argv[7], "rb")
for row in map(lambda a,b,c,d,e,f,g : (a,b.strip(),c.strip(),d.strip(),e.strip(),f.strip(),g.strip()), _csv, predicate_fn, general_fn, conditional_fn, quotation_fn, modality_fn, negation_fn):
assert all(e is not None for e in row)
majority = [_class for _class in CLASSES if int(row[0][_class])>=6]
if majority != []:
sys.stdout.write(majority[0])
sys.stdout.write("\t")
sys.stdout.write(" ".join(row[1:]))
sys.stdout.write("\n")
elif MODE=="pgcqmnbow":
_csv=DictReader(open(csv_filename))
predicate_fn = open(argv[2], "rb")
general_fn = open(argv[3], "rb")
conditional_fn = open(argv[4], "rb")
quotation_fn = open(argv[5], "rb")
modality_fn = open(argv[6], "rb")
negation_fn = open(argv[7], "rb")
factbank_path=r"/Users/pushpendrerastogi/Dropbox/evsem_data/factbank/data"
sentence_file_path = os.path.join(factbank_path, "annotation", "sentences.txt")
FACTBANK_SPLITTER="|||"
d={}
for l in open(sentence_file_path):
l=l.strip().split(FACTBANK_SPLITTER)
fb_filename=l[0][1:-1]; assert l[0][0]=="'"
sentId=l[1]
sentence=l[2][1:-1].replace(r"\'", "'").strip()
d[(fb_filename, sentId)]=sentence
for i, row in enumerate(map(lambda a,b,c,d,e,f,g : (a,b.strip(),c.strip(),d.strip(),e.strip(),f.strip(),g.strip()), _csv, predicate_fn, general_fn, conditional_fn, quotation_fn, modality_fn, negation_fn)):
print >>sys.stderr, i
assert all(e is not None for e in row)
majority = [_class for _class in CLASSES if int(row[0][_class])>=6]
if majority != []:
sys.stdout.write(majority[0])
sys.stdout.write("\t")
sys.stdout.write(" ".join(row[1:]))
sys.stdout.write(d[(row[0]["file"], row[0]["sentId"])])
sys.stdout.write("\n")
| StarcoderdataPython |
3464328 | '''
Classes/functions to read and featurize data
'''
import argparse
import logging
import time
import os
import pandas as pd
from sklearn import preprocessing
import torch
from transformers import BertTokenizer, BertModel
import numpy as np
from tqdm import tqdm
import pickle
from utils import get_appendix
from glob import glob
class Data:
def __init__(self, raw_data, split, text_only=False, include_tfidf=False, balanced=False):
self.bert = BertModel.from_pretrained('bert-base-uncased', output_hidden_states = True).eval()
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.scaler = preprocessing.StandardScaler() # Can change this to choose different scaler
self.split = split
self.X, self.y = self.featurize(raw_data, text_only=text_only, include_tfidf=include_tfidf, balanced=balanced)
# Code from https://mccormickml.com/2019/05/14/BERT-word-embeddings-tutorial/
def _get_bert_embed(self, text):
marked_text = "[CLS] " + text + " [SEP]"
# Tokenize our sentence with the BERT tokenizer.
tokenized_text = self.tokenizer.tokenize(marked_text)
# Map the token strings to their vocabulary indeces.
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [1] * len(tokenized_text)
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
with torch.no_grad():
outputs = self.bert(tokens_tensor, segments_tensors)
hidden_states = outputs[2]
token_vecs = hidden_states[-2][0]
# Calculate the average of all token vectors.
sentence_embedding = torch.mean(token_vecs, dim=0)
return sentence_embedding.tolist()
# Change this function to change how data is featurized
def featurize(self, df, text_only=False, include_tfidf=False, balanced=False):
if balanced:
X = df.drop(columns=['label']).values
y = df['label'].values
return X, y
if 'processed_text' not in df.columns:
df['processed_text'] = df['text']
df = df[~df['processed_text'].isna()].reset_index()
if text_only:
X = df['processed_text'].values
y = df['label'].values
else:
x1_cols = ['like_count', 'quote_count',
'mentions_count', 'author_followers',
'sentiment_score_pos', 'sentiment_score_neu',
'sentiment_score_neg', 'sentiment_score_comp',
'text_tfid_sum', 'text_tfid_max', 'text_tfid_min', 'text_tfid_avg',
'text_tfid_std', 'hashtag_tfid_sum', 'hashtag_tfid_max',
'hashtag_tfid_min', 'hashtag_tfid_avg', 'hashtag_tfid_std']
X1 = df[x1_cols]
X1 = self.scaler.fit_transform(X1) # Scale values
X1 = pd.DataFrame(X1, columns=x1_cols)
logging.info('Getting bert embeddings...')
X2 = pd.DataFrame([self._get_bert_embed(tweet) for tweet in tqdm(df['processed_text'].values)])
X2.columns = [f'b{i}' for i in range(X2.shape[1])]
if include_tfidf:
logging.info('Loading precomputed tfidf scores')
X3 = pd.DataFrame([eval(row) for row in tqdm(df['hashtags_tfidf'].values)])
X3.columns = [f'h{i}' for i in range(X3.shape[1])]
X = pd.concat([X1, X2, X3], axis=1, sort=False)
else:
X = pd.concat([X1, X2], axis=1, sort=False)
X = X.values
y = df['label'].values
return X, y
def to_pickle(self, outpath):
with open(outpath, "wb") as f:
pickle.dump(self, f)
def read_pickle(path):
with open(path, 'rb') as f:
data = pickle.load(f)
return data
# Loads and caches data, does not cache if text_only features,
# because this is model dependent and quick to compute
def load(datadir, cachedir=None, override_cache=False,
text_only=False, include_tfidf=False, balanced=False):
# If exists caches for all splits and not override cache, load cached data
appendix = get_appendix(include_tfidf, balanced)
if all((
os.path.exists(os.path.join(cachedir, f'train{appendix}.pkl')),
os.path.exists(os.path.join(cachedir, f'dev{appendix}.pkl')),
os.path.exists(os.path.join(cachedir, f'test{appendix}.pkl')),
not override_cache,
not text_only
)):
train = read_pickle(os.path.join(cachedir, f'train{appendix}.pkl'))
dev = read_pickle(os.path.join(cachedir, f'dev{appendix}.pkl'))
test = read_pickle(os.path.join(cachedir, f'test{appendix}.pkl'))
else:
if balanced:
train = pd.read_csv(glob(os.path.join(datadir, 'balanced', '*train*.csv'))[0])
dev = pd.read_csv(glob(os.path.join(datadir, 'balanced', '*dev*.csv'))[0])
test = pd.read_csv(glob(os.path.join(datadir, 'balanced', '*test*.csv'))[0])
train = Data(train, "train", text_only=text_only, include_tfidf=include_tfidf, balanced=balanced)
dev = Data(dev, "dev", text_only=text_only, include_tfidf=include_tfidf, balanced=balanced)
test = Data(test, "test", text_only=text_only, include_tfidf=include_tfidf, balanced=balanced)
else:
all_data = pd.read_csv(os.path.join(datadir, 'all_data_preprocessed.tsv'), sep='\t')
all_data.drop(columns=['retweet_count', 'reply_count'], inplace=True)
# Read splits
train_ids = pd.read_csv(os.path.join(datadir, 'train-ids.txt'), names=['id'])
dev_ids = pd.read_csv(os.path.join(datadir, 'dev-ids.txt'), names=['id'])
test_ids = pd.read_csv(os.path.join(datadir, 'test-ids.txt'), names=['id'])
train = Data(train_ids.merge(all_data, how='inner', on='id'), "train", text_only=text_only, include_tfidf=include_tfidf)
dev = Data(dev_ids.merge(all_data, how='inner', on='id'), "dev", text_only=text_only, include_tfidf=include_tfidf)
test = Data(test_ids.merge(all_data, how='inner', on='id'), "test", text_only=text_only, include_tfidf=include_tfidf)
if cachedir and not text_only:
train.to_pickle(os.path.join(cachedir, f'train{appendix}.pkl'))
dev.to_pickle(os.path.join(cachedir, f'dev{appendix}.pkl'))
test.to_pickle(os.path.join(cachedir, f'test{appendix}.pkl'))
return train, dev, test
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--datadir', default='data')
parser.add_argument('--cachedir', default='data')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(message)s')
start = time.time()
# Dataframes
train, dev, test = load(args.datadir, cachedir=args.cachedir, override_cache=True)
end = time.time()
logging.info(f'Time to run script: {end-start} secs') | StarcoderdataPython |
8065495 | # -*- coding: utf-8 -*-
# Copyright (C) 2018 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
from __future__ import division
from pyboltzmann.class_builder import DummyBuilder
from pyboltzmann.decomposition_grammar import DecompositionGrammar
from pyboltzmann.generic_samplers import *
from pyboltzmann.generic_samplers import BoltzmannSamplerBase, AliasSampler
from pyboltzmann.utils import bern
from planar_graph_sampler.grammar.grammar_utils import divide_by_2
def to_K_dy(dummy):
dummy._u_size += 1
return UDerivedClass(dummy)
# def underive(dummy):
# dummy._l_size += 1
# return dummy
#
#
# def to_u_derived_class(dummy):
# dummy._u_size -= 1
# return dummy
#
#
# def to_l_derived_class(dummy):
# dummy._l_size -= 1
# return dummy
def dummy_sampling_grammar():
"""Returns the adapted grammar for sampling dummies.
This is useful for making experiments about the sizes of the objects output by the sampler.
The grammar still needs to be initialized and set to dummy sampling mode.
"""
# Some general shortcuts to make the grammar more readable.
L = LAtomSampler()
U = UAtomSampler()
Z = ZeroAtomSampler()
Set = SetSampler
USubs = USubsSampler
LSubs = LSubsSampler
Bij = BijectionSampler
Rej = RejectionSampler
Trans = TransformationSampler
DxFromDy = LDerFromUDerSampler
DyFromDx = UDerFromLDerSampler
Hook = HookSampler
grammar = DecompositionGrammar()
# Binary trees.
class NodeCounter(object):
L = 0
@staticmethod
def reset():
NodeCounter.L = 0
class ModifiedDummyBuilder(DummyBuilder):
activate_rejection_K = False
@staticmethod
def activate_rejection():
ModifiedDummyBuilder.activate_rejection_K = True
@staticmethod
def deactivate_rejection():
ModifiedDummyBuilder.activate_rejection_K = False
def __init__(self, grammar):
self._grammar = grammar
def u_atom(self):
if ModifiedDummyBuilder.activate_rejection_K:
NodeCounter.L += 1
L = NodeCounter.L
if L > 1 and not bern(L / (L + 1)):
self._grammar.restart_sampler()
return DummyClass(u_size=1)
K_dy = AliasSampler('K_dy')
R_b = AliasSampler('R_b')
R_w = AliasSampler('R_w')
R_b_as = AliasSampler('R_b_as')
R_w_as = AliasSampler('R_w_as')
R_b_head = AliasSampler('R_b_head')
R_w_head = AliasSampler('R_w_head')
K_dy_dx = AliasSampler('K_dy_dx')
R_b_dx = AliasSampler('R_b_dx')
R_w_dx = AliasSampler('R_w_dx')
R_b_as_dx = AliasSampler('R_b_as_dx')
R_w_as_dx = AliasSampler('R_w_as_dx')
R_b_head_dx = AliasSampler('R_b_head_dx')
R_w_head_dx = AliasSampler('R_w_head_dx')
def rej_to_K(u_derived_tree):
# return bern(2 / (u_derived_tree.u_size + 1))
return True
def rej_is_assymetric(dummy):
return dummy._u_size != 2 and (dummy._u_size != 5 or dummy._l_size != 1)
# return True
def rej_K_dy_to_K_dx(dummy):
return bern(1.5 * dummy._l_size / (dummy._u_size + 1))
def bij_to_l_derived(dummy):
dummy._l_size -= 1
return dummy
def bij_underive_dy(dummy):
dummy._u_size += 1
return dummy
binary_tree_rules = {
# Underived and derived binary trees.
'K':
Hook(
Trans(
K_dy,
bij_underive_dy
),
before=ModifiedDummyBuilder.activate_rejection,
after=ModifiedDummyBuilder.deactivate_rejection
),
# This sampler seems to be about half as fast as the one for K_dy (due to the rejection)
# So it looks like this invokes K_dy twice on average
'K_dx':
Bij(
Rej(
K_dy,
rej_K_dy_to_K_dx
),
bij_to_l_derived
),
# I measured that the more complicated grammar is about 10 - 15% faster than this rejection !
# (For values of size 10000)
'K_dy':
RestartableSampler(
Hook(
R_b_as + R_w_as,
NodeCounter.reset
),
),
'R_b_as':
2 * R_w * L * U + R_w ** 2 * L,
'R_w_as':
2 * R_b_head * U + R_b ** 2,
'R_b_head':
2 * R_w_head * L * U ** 2 + R_w_head ** 2 * L,
'R_w_head':
U + 2 * R_b * U + R_b ** 2,
'R_b':
L * (U + R_w) ** 2,
'R_w':
(U + R_b) ** 2,
# Bi-derived binary trees.
'K_dx_dx':
Bij(
Rej(
K_dy_dx,
rej_K_dy_to_K_dx
),
bij_to_l_derived
),
'K_dy_dx':
R_b_as_dx + R_w_as_dx,
'R_b_as_dx':
2 * U * (R_w_dx * L + R_w) + 2 * R_w * R_w_dx * L + R_w ** 2,
'R_w_as_dx':
2 * R_b_head_dx * U + 2 * R_b * R_b_dx,
'R_b_head_dx':
2 * U ** 2 * (R_w_head_dx * L + R_w_head) + 2 * R_w_head * R_w_head_dx * L + R_w_head ** 2,
'R_w_head_dx':
2 * R_b_dx * U + 2 * R_b * R_b_dx,
'R_b_dx':
(U + R_w) ** 2 + 2 * R_w_dx * L * (U + R_w),
'R_w_dx':
2 * (U + R_b) * R_b_dx
}
grammar.rules = binary_tree_rules
# Irreducible dissection.
K = AliasSampler('K')
K_dx = AliasSampler('K_dx')
K_dx_dx = AliasSampler('K_dx_dx')
I = AliasSampler('I')
I_dx = AliasSampler('I_dx')
I_dx_dx = AliasSampler('I_dx_dx')
J = AliasSampler('J')
J_dx = AliasSampler('J_dx')
J_a = AliasSampler('J_a')
J_a_dx = AliasSampler('J_a_dx')
J_dx_dx = AliasSampler('J_dx_dx')
J_a_dx_dx = AliasSampler('J_a_dx_dx')
irreducible_dissection_rules = {
# Non-derived dissections (standard, rooted, admissible).
'I': K,
'J': 3 * L * U * I,
# Using only dummies we cannot make the admissibility check as it depends on internal structure. Based on
# experiments however, we conjecture that the admissibility of an object in J is "almost" independent of its
# size. Ignoring the check, we get an average size which is ~4% too small (for values N=100)
# We also measured that the success probability seems to be around 20% only. So this rejection is quite costly.
'J_a': Trans(J),
# Derived dissections.
'I_dx': K_dx,
'J_dx':
3 * U * (I + L * I_dx),
'J_a_dx': J_dx,
# Bi-derived dissections.
'I_dx_dx': K_dx_dx,
'J_dx_dx': 3 * U * I_dx + 3 * U * I_dx + 3 * L * U * I_dx_dx,
'J_a_dx_dx': J_dx_dx,
}
grammar.rules = irreducible_dissection_rules
# 3-connected planar graphs.
G_3_arrow = AliasSampler('G_3_arrow')
G_3_arrow_dy = AliasSampler('G_3_arrow_dy')
G_3_arrow_dx = AliasSampler('G_3_arrow_dx')
G_3_arrow_dx_dx = AliasSampler('G_3_arrow_dx_dx')
G_3_arrow_dx_dy = AliasSampler('G_3_arrow_dx_dy')
G_3_arrow_dy_dy = AliasSampler('G_3_arrow_dy_dy')
M_3_arrow = AliasSampler('M_3_arrow')
M_3_arrow_dx = AliasSampler('M_3_arrow_dx')
M_3_arrow_dx_dx = AliasSampler('M_3_arrow_dx_dx')
def rej_G_3_arrow_dx_to_dy(dummy):
return bern(1 / 3 * dummy._u_size / (dummy._l_size + 1)) # todo check this
three_connected_rules = {
# Non-derived 3-connected rooted planar maps/graphs.
'M_3_arrow': J_a, # primal map
'G_3_arrow': Trans(M_3_arrow, eval_transform=divide_by_2), # See 4.1.9.
# Derived 3-connected rooted planar maps/graphs.
'M_3_arrow_dx': J_a_dx,
'G_3_arrow_dx': Trans(M_3_arrow_dx, eval_transform=divide_by_2),
'G_3_arrow_dy': Rej(G_3_arrow_dx, rej_G_3_arrow_dx_to_dy), # See 5.3.3.
# Bi-derived 3-connected rooted planar maps/graphs.
'M_3_arrow_dx_dx': J_a_dx_dx,
'G_3_arrow_dx_dx': Trans(M_3_arrow_dx_dx, eval_transform=divide_by_2),
'G_3_arrow_dx_dy': Rej(G_3_arrow_dx_dx, rej_G_3_arrow_dx_to_dy),
'G_3_arrow_dy_dy': Rej(G_3_arrow_dx_dy, rej_G_3_arrow_dx_to_dy),
}
grammar.rules = three_connected_rules
# Networks.
D = AliasSampler('D')
S = AliasSampler('S')
P = AliasSampler('P')
H = AliasSampler('H')
D_dx = AliasSampler('D_dx')
S_dx = AliasSampler('S_dx')
P_dx = AliasSampler('P_dx')
H_dx = AliasSampler('H_dx')
D_dx_dx = AliasSampler('D_dx_dx')
S_dx_dx = AliasSampler('S_dx_dx')
P_dx_dx = AliasSampler('P_dx_dx')
H_dx_dx = AliasSampler('H_dx_dx')
network_rules = {
# networks
'D': U + S + P + H,
'S': (U + P + H) * L * D,
'P': U * Set(1, S + H) + Set(2, S + H),
'H': USubs(G_3_arrow, D),
# l-derived networks
'D_dx': S_dx + P_dx + H_dx,
'S_dx': (P_dx + H_dx) * L * D + (U + P + H) * (D + L * D_dx),
'P_dx': U * (S_dx + H_dx) * Set(0, S + H) + (S_dx + H_dx) * Set(1, S + H),
'H_dx': USubs(G_3_arrow_dx, D) + D_dx * USubs(G_3_arrow_dy, D),
# bi-l-derived networks
'D_dx_dx':
S_dx_dx + P_dx_dx + H_dx_dx,
'S_dx_dx':
(P_dx_dx + H_dx_dx) * L * D
+ 2 * (P_dx + H_dx) * (D + L * D_dx)
+ (U + P + H) * (2 * D_dx + L * D_dx_dx),
'P_dx_dx':
U * ((S_dx_dx + H_dx_dx) * Set(0, S + H)
+ U * (S_dx + H_dx) ** 2 * Set(0, S + H))
+ (S_dx_dx + H_dx_dx) * Set(1, S + H)
+ (S_dx + H_dx) ** 2 * Set(0, S + H),
'H_dx_dx':
USubs(G_3_arrow_dx_dx, D) + D_dx * USubs(G_3_arrow_dx_dy, D)
+ D_dx_dx * USubs(G_3_arrow_dy, D)
+ D_dx * USubs(G_3_arrow_dx_dy, D) + D_dx**2 * USubs(G_3_arrow_dy_dy, D)
}
grammar.rules = network_rules
# Set default dummy builder in all rules.
grammar.dummy_sampling_mode()
# In binary tree rules set modified builder for the early rejection.
grammar.set_builder(
['R_b', 'R_b_head', 'R_b_as', 'R_w', 'R_w_head', 'R_w_as', 'R_b_dx', 'R_b_head_dx', 'R_b_as_dx', 'R_w_dx',
'R_w_head_dx', 'R_w_as_dx'],
ModifiedDummyBuilder(grammar)
)
return grammar
if __name__ == "__main__":
from pyboltzmann.evaluation_oracle import EvaluationOracle
from planar_graph_sampler.evaluations_planar_graph import *
from pyboltzmann.utils import boltzmann_framework_random_gen
from timeit import default_timer as timer
# oracle = EvaluationOracle(planar_graph_evals[10000])
oracle = EvaluationOracle(my_evals_100)
BoltzmannSamplerBase.oracle = oracle
BoltzmannSamplerBase.debug_mode = False
grammar = dummy_sampling_grammar()
grammar.init()
# grammar.dummy_sampling_mode()
# symbolic_x = 'x'
symbolic_y = 'y'
symbolic_x = 'x*G_1_dx(x,y)'
# symbolic_y = 'D(x*G_1_dx(x,y),y)'
sampled_class = 'D_dx_dx'
grammar._precompute_evals(sampled_class, symbolic_x, symbolic_y)
try:
print("expected: {}\n".format(oracle.get_expected_l_size(sampled_class, symbolic_x, symbolic_y)))
except PyBoltzmannError:
pass
# random.seed(0)
# boltzmann_framework_random_gen.seed(0)
l_sizes = []
i = 0
samples = 100
start = timer()
while i < samples:
dummy = grammar.sample_iterative(sampled_class, symbolic_x, symbolic_y)
l_sizes.append(dummy.l_size)
i += 1
end = timer()
print()
print()
print("avg. size: {}".format(sum(l_sizes) / len(l_sizes)))
print("time: {}".format(end - start))
# while True:
# dummy = grammar.sample_iterative(sampled_class, symbolic_x, symbolic_y)
# if dummy.l_size > 100:
# print(dummy.u_size / dummy.l_size )
| StarcoderdataPython |
9668973 | class build:
def __init__(self):
# 安装插件
import os
os.system("pip install pyinstaller")
os.system("pip install requests")
# 下载压缩工具
import requests
url1 = "https://www.hestudio.xyz/nonsense-literature/7z.dll"
url2 = "https://www.hestudio.xyz/nonsense-literature/7z.exe"
r1 = requests.get(url1)
r2 = requests.get(url2)
with open("7z.dll", "wb") as dll:
dll.write(r1.content)
with open("7z.exe", "wb") as exe:
exe.write(r2.content)
# 打包
os.system("pyinstaller main.py")
os.system("copy asktip.txt dist/main/")
os.system("copy question.txt dist/main/")
os.system("7z a -tzip share_windows.zip dist/main/")
build()
| StarcoderdataPython |
11345842 | <gh_stars>0
class Port:
def __init__(self, gear, index, basename, producer=None, consumer=None):
self.gear = gear
self.index = index
self.producer = producer
self.consumer = consumer
self.basename = basename
@property
def dtype(self):
if self.producer is not None:
return self.producer.dtype
else:
return self.consumer.dtype
def get_queue(self, port=None):
if port is None:
port = self
return self.producer.get_consumer_queue(port)
def finish(self):
self.consumer.finish()
class InPort(Port):
pass
class OutPort(Port):
pass
| StarcoderdataPython |
31823 | # -------------------------------------------------------------------------- #
# OpenSim Muscollo: plot_inverse_dynamics.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2017 Stanford University and the Authors #
# #
# Author(s): <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
import sys
import pylab as pl
import pandas as pd
if len(sys.argv) != 2:
raise Exception("Requires actual inverse dynamics csv file as argument.")
filtered = pd.read_csv('DEBUG_desiredMoments.csv', index_col=0, header=None)
actual = pd.read_csv(sys.argv[1], index_col=0, skiprows=3)
fig = pl.figure()
num_columns = len(filtered.columns)
for i in range(num_columns):
ax = fig.add_subplot(num_columns, 1, i + 1)
ax.plot(filtered.index, filtered[filtered.columns[i]], label='filtered')
ax.plot(actual.index, actual[actual.columns[i]], label='actual')
pl.legend()
pl.show() | StarcoderdataPython |
8083210 | <reponame>jamboree/mrustc
import argparse
import sys
def main():
argp = argparse.ArgumentParser()
argp.add_argument("-o", "--output", type=lambda v: open(v, 'w'), default=sys.stdout)
argp.add_argument("logfile", type=open)
argp.add_argument("fcn_name", type=str, nargs='?')
args = argp.parse_args()
fcn_lines = []
found_fcn = False
for line in args.logfile:
if 'visit_function: ' in line \
or 'evaluate_constant: ' in line \
or 'Trans_Monomorphise_List: ' in line \
or 'Trans_Codegen: FUNCTION CODE' in line \
or 'Trans_Codegen- emit_' in line \
or 'MIR_OptimiseInline: >> (' in line \
:
if found_fcn:
break
fcn_lines = []
if args.fcn_name is not None and args.fcn_name in line:
found_fcn = True
fcn_lines.append(line.strip())
for l in fcn_lines:
args.output.write(l)
args.output.write("\n")
main()
| StarcoderdataPython |
5053640 | <reponame>luoy2/polyhymnia
from flask import Blueprint, abort
from polyhymnia.decorators.json import *
import logging
from polyhymnia.serializers import NpEncoder
import importlib
from pprint import pformat
simbert_bp = Blueprint('simbert', __name__)
logger = logging.getLogger(__name__)
gunicorn_logger = logging.getLogger('gunicorn.error')
def allocate_job(job_name, job_params):
result = {"job": job_name}
try:
predictor = importlib.import_module(f"interfaces.predictors.{job_name}")
predict_result = predictor.predict(**job_params, logger=gunicorn_logger)
result.update(predict_result)
except Exception as e:
gunicorn_logger.error(e, exc_info=True)
result.update({"result": "failed", "msg": "", "data": {}})
gunicorn_logger.debug(pformat(result))
return result
@simbert_bp.route('/submit', methods=['POST'])
@validate_json
@validate_json_param(['jobs', 'shared_params'])
def submit_jobs():
gunicorn_logger.info("")
if not request.json:
abort(400)
try:
data = request.json
jobs = data.get('jobs', [])
shared_params = data.get('shared_params', {})
job_results = []
for job in jobs:
job_results.append(allocate_job(job, shared_params))
succeeded = []
failed = []
for i in job_results:
if i['result'] == 'ok':
succeeded.append(i['job'])
else:
failed.append(i['job'])
gunicorn_logger.info(f"succeeded jobs are: {succeeded}")
gunicorn_logger.info(f"failed jobs are: {failed}")
return json.dumps({"status": "ok", "data": job_results}, cls=NpEncoder, ensure_ascii=False)
except Exception as e:
logger.error(e, exc_info=True)
return jsonify({"status": "failed", "data": {}})
| StarcoderdataPython |
6507249 | <reponame>tushar-agarwal2909/MoPulseGen
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 16:35:59 2020
@author: agarwal.270a
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow.keras.layers as layers
#import modules.custom_layers as clayers
from tensorflow.keras import initializers as initizers
from lib.model_funcs import make_data_pipe, find_batch_size
import numpy as np
import copy
import time
import matplotlib.pyplot as plt
#tf.keras.backend.set_floatx('float64')
class Generator(tf.keras.layers.Layer):
def __init__(self,layer_list,optimizer):
super(Generator, self).__init__()
self.layer_list=layer_list
if optimizer is not None:
self.optimizer=optimizer
else:
self.optimizer=tf.keras.optimizers.Adam(1e-4)
self.bc=tf.keras.losses.BinaryCrossentropy(from_logits=True)
def loss(self,fake_output):
return self.bc(tf.ones_like(fake_output), fake_output)
def call(self,x,training=None):
for lay in self.layer_list:
x=lay(x,training=training)
#print(x.shape.as_list())
return x
class Discriminator(tf.keras.layers.Layer):
def __init__(self,layer_list,optimizer):
super(Discriminator, self).__init__()
self.layer_list=layer_list
if optimizer is not None:
self.optimizer=optimizer
else:
self.optimizer=tf.keras.optimizers.Adam(1e-4)
self.bc=tf.keras.losses.BinaryCrossentropy(from_logits=True)
def loss(self,real_output, fake_output):
real_loss = self.bc(tf.ones_like(real_output), real_output)
fake_loss = self.bc(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def call(self,x,training=None):
for lay in self.layer_list:
x=lay(x,training=training)
#print(x.shape.as_list())
return x
class Net_stitchGAN(tf.keras.layers.Layer):
def __init__(self,in_shape,out_shape,optimizers=[None,None],drop=0.2):
super(Net_stitchGAN, self).__init__()
self.in_shape=in_shape
self.out_shape=out_shape
if type(optimizers)!=type([]):
raise AssertionError(('optimizers must be a list of 2 optimizers'
', one each for generator and discrimator'
'respectively.'))
self.optimizers=optimizers
self.GenL=[]
self.GenL.append(layers.GRU(64,return_sequences=True,name='gru_gen_1'))
self.GenL.append(layers.Conv1D(1,1,name='conv1d_gen_1'))
self.gen=Generator(self.GenL,self.optimizers[0])
self.DiscL=[]
self.DiscL.append(layers.GRU(64,name='gru_disc_1'))
self.DiscL.append(layers.Dropout(0.3,name='drop_disc_1'))
self.DiscL.append(layers.Flatten(name='flat_disc_1'))
self.DiscL.append(layers.Dense(1,name='fc_disc_1'))
self.disc=Discriminator(self.DiscL,self.optimizers[1])
return
def call(self, x, training=None):
'''
Defining the architecture of our model. This is where we run
through our whole dataset and return it, when training and
testing.
'''
x=self.gen(x,training=training)
x=self.disc(x,training=training)
return x
class Model_stitchGAN(tf.keras.Model):
def __init__(self,net,model_path,mode='stitch'):
'''
Setting all the variables for our model.
'''
super(Model_stitchGAN, self).__init__()
self.net=net
self.model_path=model_path
self.mode=mode
#self.optimizer=self.net.optimizer
#self.get_data=modify_get_data(get_data_old)
#'Stateful' Metrics
self.train_loss1 = tf.keras.metrics.Mean(name='train_loss1')
self.train_loss2 = tf.keras.metrics.Mean(name='train_loss2')
#self.train_loss = tf.keras.metrics.Mean(name='train_loss')
#self.test_loss1 = tf.keras.metrics.Mean(name='test_loss1')
#self.test_loss2 = tf.keras.metrics.Mean(name='test_loss2')
self.test_loss = tf.keras.metrics.Mean(name='test_loss')
#'Stateless' Losses
self.loss_bc=tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.loss_mse = tf.keras.losses.MeanSquaredError()
#self.l1_loss=lambda z: tf.reduce_mean(tf.abs(z))
self.acc= lambda y,y_hat: tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(y,axis=1),tf.argmax(y_hat,axis=1)),tf.float64))
#Checkpoint objects
self.ckpt = tf.train.Checkpoint(step=tf.Variable(1),
optimizer=self.net.gen.optimizer,
model=self.net)
self.manager = tf.train.CheckpointManager(self.ckpt,self.model_path
,max_to_keep=2)
#For fit function initialization
self.fit_init=False
return
# =============================================================================
# def recon_loss(self,x_true,x_pred):
# # E[log P(X|z)]
# recon = tf.reduce_sum(tf.square(x_true - x_pred),axis=1)
# return tf.reduce_sum(recon,axis=0)
#
# def KL_loss(self,mu,logsigma):
# # D_KL(Q(z|X) || P(z|X))
# kl = 0.5 * tf.reduce_sum(tf.exp(logsigma) + tf.square(mu) - 1. -
# logsigma, axis=1)
# return tf.reduce_sum(kl,axis=0)
#
# def sample_z(self,mu,logsig):
# eps=tf.random.normal(shape=tf.shape(mu),mean=0.,stddev=1.,
# dtype=tf.dtypes.float64)
# z=mu + tf.exp(logsig / 2) * eps
# return z
#
# def encoder(self,x,cond):
# cond=tf.pow(tf.cast(cond,tf.float64),-1) #invert to get HR in BPS
# VAE_in=tf.concat([x,cond],axis=-1)
# x = self.relu(tf.matmul(VAE_in,self.w1)+self.b1)
# mu = tf.matmul(x,self.w_mu)+self.b_mu
# logsig = tf.matmul(x,self.w_logsig)+self.b_logsig
# return mu,logsig
#
# def decoder(self,z,cond):
# cond=tf.pow(tf.cast(cond,tf.float64),-1) #invert to get HR in BPS
# latent=tf.concat([z,cond],axis=-1)
# x = self.relu(tf.matmul(latent,self.w2)+self.b2)
# x_hat=tf.matmul(x,self.w3)+self.b3
# return x_hat
#
# def nn_model(self, x,cond):
# '''
# Defining the architecture of our model. This is where we run
# through our whole dataset and return it, when training and
# testing.
# '''
# mu,logsig=self.encoder(x,cond)
# z=self.sample_z(mu,logsig)
# print(z.get_shape())
# x_hat=self.decoder(z,cond)
# return mu,logsig,x_hat
#
#
# def check_model(self,z):
# return self.nn_model(z)
# =============================================================================
@tf.function
def train_step_stitch(self,interm_sig,true_sig):
'''
This is a TensorFlow function, run once for each epoch for the
whole input. We move forward first, then calculate gradients
with Gradient Tape to move backwards.
'''
#noise = tf.random.normal([BATCH_SIZE, noise_dim])
generator=self.net.gen
with tf.GradientTape() as gen_tape:
sig_hat = generator(interm_sig, training=True)
stitch_loss = self.loss_mse(true_sig,sig_hat)
gradients = gen_tape.gradient(stitch_loss, generator.trainable_variables)
generator.optimizer.apply_gradients(zip(gradients, generator.trainable_variables))
self.train_loss1(stitch_loss)
@tf.function
def train_step(self,raw_synth_sig,true_sig):
'''
This is a TensorFlow function, run once for each epoch for the
whole input. We move forward first, then calculate gradients
with Gradient Tape to move backwards.
'''
#noise = tf.random.normal([BATCH_SIZE, noise_dim])
generator,discriminator=self.net.gen,self.net.disc
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
synth_sig = generator(raw_synth_sig, training=True)
real_output = discriminator(true_sig, training=True)
fake_output = discriminator(synth_sig, training=True)
gen_loss = generator.loss(fake_output)
disc_loss = discriminator.loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator.optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator.optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
self.train_loss1(gen_loss)
self.train_loss2(disc_loss)
return
def test_step_stitch(self,interm_sig,true_sig=None, in_prediction=False):
'''
This is a TensorFlow function, run once for each epoch for the
whole input.
'''
generator=self.net.gen
sig_hat = generator(interm_sig, training=False)
if in_prediction:
return sig_hat
stitch_loss = self.loss_mse(true_sig,sig_hat)
self.test_loss(stitch_loss)
#self.test_metric(x, predictions)
return
def test_step(self,raw_synth_sig,true_sig=None, in_prediction=False):
'''
This is a TensorFlow function, run once for each epoch for the
whole input.
'''
generator,discriminator=self.net.gen,self.net.disc
synth_sig = generator(raw_synth_sig, training=False)
if in_prediction:
return synth_sig
fake_output = discriminator(synth_sig, training=False)
real_output = discriminator(true_sig, training=False)
gen_loss_fake = generator.loss(fake_output)
gen_loss_real = generator.loss(real_output)
test_loss=tf.abs(gen_loss_real-gen_loss_fake)
self.test_loss(test_loss)
#self.test_metric(x, predictions)
return
@tf.function
def val_step_stitch(self, interm_sig,true_sig,in_prediction=False):
return self.test_step_stitch(interm_sig,true_sig,
in_prediction=in_prediction)
@tf.function
def val_step(self, raw_synth_sig,true_sig,in_prediction=False):
return self.test_step(raw_synth_sig,true_sig,in_prediction=in_prediction)
def fit(self, data, summaries, epochs):
'''
This fit function runs training and testing.
'''
if self.mode=='stitch':
train_step,val_step=self.train_step_stitch,self.val_step_stitch
template = ('Epoch {}, Train_Loss: {},Val Loss: {},'
'Time used: {} \n')
else:
train_step,val_step=self.train_step,self.val_step
template = ('Epoch {}, Gen_Loss: {}, Disc_Loss: {},Val Loss: {},'
'Time used: {} \n')
train, val=data
batch_size_train,N=find_batch_size(train[0].shape[0],thres=1000)
batch_size_val,N_test=find_batch_size(val[0].shape[0],thres=900,
mode='val')
#TODO: Overridden stuff here
#batch_size_train*=8
#batch_size_val=int(batch_size_val/2)
print(batch_size_train,batch_size_val)
train_ds=make_data_pipe(train,batch_size_train)
val_ds=make_data_pipe(val,batch_size_val)
train_summary_writer, test_summary_writer=summaries
for epoch in range(epochs):
start = time.time()
# Reset the metrics for the next epoch
self.train_loss1.reset_states()
self.train_loss2.reset_states()
self.test_loss.reset_states()
#self.test_loss2.reset_states()
#if epoch in arr_epochs:
# self.K.assign(arr_K[(arr_epochs==epoch)][0])
# print('Changed K to {}'.format(arr_K[(arr_epochs==epoch)][0]))
for sigs in train_ds:
train_step(sigs[0],sigs[1])
with train_summary_writer.as_default():
tf.summary.scalar('loss', self.train_loss1.result(), step=epoch)
for test_sigs in val_ds:
val_step(test_sigs[0],test_sigs[1])
with test_summary_writer.as_default():
tf.summary.scalar('loss', self.test_loss.result(), step=epoch)
if self.mode=='stitch':
print(template.format(epoch+1,
self.train_loss1.result(),
self.test_loss.result(),
time.time()-start))
else:
print(template.format(epoch+1,
self.train_loss1.result(),
self.train_loss2.result(),
self.test_loss.result(),
time.time()-start))
def predict(self,test_data):
if self.mode=='stitch':
test_step=self.test_step_stitch
else:
test_step=self.test_step
self.test_loss.reset_states()
test_synth_sig_list=[]
batch_size_test,N_test=find_batch_size(test_data[0].shape[0],thres=1024
,mode='val')
for i in range(N_test):
# Reset the metrics for the next batch and test z values
synth_sig=test_step(test_data[0][i:i+batch_size_test],
in_prediction=True)
test_synth_sig_list.append(synth_sig)
#test_data.append(np.concatenate(test_synth_sig_list,axis=0))
#return test_data
return np.concatenate(test_synth_sig_list,axis=0)
def call(self,x):
return self.net(x)
def make_plot(self,x,x_hat,y):
avg_y=np.mean(y)
freq=np.fft.fftfreq(x.shape[0])*25
spect=np.abs(np.fft.fft(x))
z_hat=np.abs(np.fft.fft(x_hat))
#spect=dct(x,norm='ortho')
#z_hat=dct(x_hat,norm='ortho')
plt.figure()
plt.subplot(211);plt.plot(np.array(2*[avg_y]),
np.array([np.min(spect),np.max(spect)]),'k')
plt.plot(freq,spect,'b');plt.plot(freq,np.abs(z_hat),'r--')
plt.legend(['True avg freq.','input FFT','Predicted FFT'])
plt.title('Signal Spectrum');plt.grid(True)
plt.subplot(212);plt.plot(np.real(x),'b');plt.plot(np.real(x_hat),'r--')
plt.legend(['True Signal','Reconstructed Signal'])
plt.title('Time domain Signal');plt.grid(True) | StarcoderdataPython |
96726 | from .utility import get_automation_runas_credential
from .utility import get_automation_runas_token
from .utility import import_child_runbook
from .utility import load_webhook_body | StarcoderdataPython |
5045384 | import pytest
from fbmessenger import attachments
from fbmessenger import elements
from fbmessenger import templates
from fbmessenger import quick_replies
class TestTemplates:
def test_button_template_with_single_button(self):
btn = elements.Button(
button_type="web_url", title="Web button", url="http://facebook.com"
)
res = templates.ButtonTemplate(text="Button template", buttons=btn)
expected = {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": "Button template",
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
}
],
},
}
}
assert expected == res.to_dict()
def test_button_template_with_multiple_buttons(self):
btn = elements.Button(
button_type="web_url", title="Web button", url="http://facebook.com"
)
btn2 = elements.Button(
button_type="postback", title="Postback button", payload="payload"
)
res = templates.ButtonTemplate(text="Button template", buttons=[btn, btn2])
expected = {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": "Button template",
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
},
{
"type": "postback",
"title": "Postback button",
"payload": "payload",
},
],
},
}
}
assert expected == res.to_dict()
def test_button_template_with_too_many_buttons(self):
btn = elements.Button(
button_type="web_url", title="Web button", url="http://facebook.com"
)
with pytest.raises(ValueError) as err:
res = templates.ButtonTemplate(
text="Button template",
buttons=[btn] * 4,
)
res.to_dict()
assert str(err.value) == "You cannot have more than 3 buttons in the template."
def test_button_template_with_no_buttons(self):
with pytest.raises(ValueError) as err:
res = templates.ButtonTemplate(
text="Button template",
buttons=[],
)
res.to_dict()
assert str(err.value) == "At least 1 buttons are required."
def test_generic_template(self):
btn = elements.Button(
button_type="web_url", title="Web button", url="http://facebook.com"
)
elems = elements.Element(
title="Element",
item_url="http://facebook.com",
image_url="http://facebook.com/image.jpg",
subtitle="Subtitle",
buttons=[btn],
)
res = templates.GenericTemplate(
elements=[elems] * 2,
image_aspect_ratio="square",
sharable=True,
)
expected = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"sharable": True,
"image_aspect_ratio": "square",
"elements": [
{
"title": "Element",
"item_url": "http://facebook.com",
"image_url": "http://facebook.com/image.jpg",
"subtitle": "Subtitle",
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
}
],
},
{
"title": "Element",
"item_url": "http://facebook.com",
"image_url": "http://facebook.com/image.jpg",
"subtitle": "Subtitle",
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
}
],
},
],
},
}
}
assert expected == res.to_dict()
def test_generic_template_with_single_element(self):
btn = elements.Button(
button_type="web_url", title="Web button", url="http://facebook.com"
)
elems = elements.Element(
title="Element",
item_url="http://facebook.com",
image_url="http://facebook.com/image.jpg",
subtitle="Subtitle",
buttons=[btn],
)
res = templates.GenericTemplate(elements=elems)
expected = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"sharable": False,
"elements": [
{
"title": "Element",
"item_url": "http://facebook.com",
"image_url": "http://facebook.com/image.jpg",
"subtitle": "Subtitle",
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
}
],
}
],
},
}
}
assert expected == res.to_dict()
def test_generic_template_with_quick_replies(self):
btn = elements.Button(
button_type="web_url", title="Web button", url="http://facebook.com"
)
elems = elements.Element(
title="Element",
item_url="http://facebook.com",
image_url="http://facebook.com/image.jpg",
subtitle="Subtitle",
buttons=[btn],
)
qr = quick_replies.QuickReply(title="QR", payload="QR payload")
qrs = quick_replies.QuickReplies(quick_replies=[qr] * 2)
res = templates.GenericTemplate(elements=[elems], quick_replies=qrs)
expected = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"sharable": False,
"elements": [
{
"title": "Element",
"item_url": "http://facebook.com",
"image_url": "http://facebook.com/image.jpg",
"subtitle": "Subtitle",
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
}
],
}
],
},
},
"quick_replies": [
{"content_type": "text", "title": "QR", "payload": "QR payload"},
{"content_type": "text", "title": "QR", "payload": "QR payload"},
],
}
assert expected == res.to_dict()
def test_template_with_too_many_elements(self):
btn = elements.Button(
button_type="web_url", title="Web button", url="http://facebook.com"
)
elem = elements.Element(
title="Element",
item_url="http://facebook.com",
image_url="http://facebook.com/image.jpg",
subtitle="Subtitle",
buttons=[btn],
)
elem_list = [elem] * 11
with pytest.raises(ValueError) as err:
res = templates.GenericTemplate(elements=elem_list)
res.to_dict()
assert (
str(err.value) == "You cannot have more than 10 elements in the template."
)
def test_generic_template_with_no_elements(self):
with pytest.raises(ValueError) as err:
res = templates.GenericTemplate(elements=[])
res.to_dict()
assert str(err.value) == "At least 1 elements are required."
def test_template_with_invalid_quick_replies(self):
with pytest.raises(ValueError) as err:
templates.GenericTemplate(elements=None, quick_replies="wrong")
assert str(err.value) == "quick_replies must be an instance of QuickReplies."
def test_receipt_template(self):
element = elements.Element(
title="Classic White T-Shirt",
subtitle="100% Soft and Luxurious Cotton",
quantity=2,
price=50,
currency="USD",
image_url="http://petersapparel.parseapp.com/img/whiteshirt.png",
)
adjustment1 = elements.Adjustment(name="New Customer Discount", amount=20)
adjustment2 = elements.Adjustment(name="$10 Off Coupon", amount=10)
address = elements.Address(
street_1="1 Hacker Way",
city="Menlo Park",
postal_code="94025",
state="CA",
country="US",
)
summary = elements.Summary(
subtotal=75.00, shipping_cost=4.95, total_tax=6.19, total_cost=56.14
)
res = templates.ReceiptTemplate(
recipient_name="<NAME>",
order_number="12345678902",
currency="USD",
payment_method="Visa 2345",
order_url="http://petersapparel.parseapp.com/order?order_id=123456",
timestamp="1428444852",
address=address,
summary=summary,
adjustments=[adjustment1, adjustment2],
elements=[element],
)
expected = {
"attachment": {
"type": "template",
"payload": {
"template_type": "receipt",
"sharable": False,
"recipient_name": "<NAME>",
"order_number": "12345678902",
"currency": "USD",
"payment_method": "Visa 2345",
"order_url": "http://petersapparel.parseapp.com/order?order_id=123456",
"timestamp": "1428444852",
"elements": [
{
"title": "Classic White T-Shirt",
"subtitle": "100% Soft and Luxurious Cotton",
"quantity": 2,
"price": 50,
"currency": "USD",
"image_url": "http://petersapparel.parseapp.com/img/whiteshirt.png",
}
],
"address": {
"street_1": "1 Hacker Way",
"street_2": "",
"city": "Menlo Park",
"postal_code": "94025",
"state": "CA",
"country": "US",
},
"summary": {
"subtotal": 75.00,
"shipping_cost": 4.95,
"total_tax": 6.19,
"total_cost": 56.14,
},
"adjustments": [
{"name": "New Customer Discount", "amount": 20},
{"name": "$10 Off Coupon", "amount": 10},
],
},
}
}
assert expected == res.to_dict()
def test_list_template(self):
btn = elements.Button(
button_type="web_url", title="Web button", url="http://facebook.com"
)
elems = elements.Element(
title="Element",
image_url="http://facebook.com/image.jpg",
subtitle="Subtitle",
buttons=[btn],
)
res = templates.ListTemplate(
elements=[elems] * 2,
buttons=[btn],
top_element_style="large",
)
expected = {
"attachment": {
"type": "template",
"payload": {
"template_type": "list",
"top_element_style": "large",
"elements": [
{
"title": "Element",
"image_url": "http://facebook.com/image.jpg",
"subtitle": "Subtitle",
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
}
],
},
{
"title": "Element",
"image_url": "http://facebook.com/image.jpg",
"subtitle": "Subtitle",
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
}
],
},
],
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
}
],
},
}
}
assert expected == res.to_dict()
def test_media_template(self):
btn = elements.Button(
button_type="web_url", title="Web button", url="http://facebook.com"
)
attachment = attachments.Image(attachment_id="12345")
res = templates.MediaTemplate(attachment, buttons=[btn])
expected = {
"attachment": {
"type": "template",
"payload": {
"template_type": "media",
"elements": [
{
"media_type": "image",
"attachment_id": "12345",
"buttons": [
{
"type": "web_url",
"title": "Web button",
"url": "http://facebook.com",
}
],
}
],
},
}
}
assert expected == res.to_dict()
def test_media_template_no_buttons(self):
attachment = attachments.Image(attachment_id="12345")
res = templates.MediaTemplate(attachment)
expected = {
"attachment": {
"type": "template",
"payload": {
"template_type": "media",
"elements": [
{
"media_type": "image",
"attachment_id": "12345",
}
],
},
}
}
assert expected == res.to_dict()
def test_media_template_invalid(self):
bad_attachment = attachments.File(url="https://some/file.doc")
with pytest.raises(ValueError):
templates.MediaTemplate(bad_attachment)
| StarcoderdataPython |
1824690 | # -*- coding: utf-8 -*-
# @Time : 2020/10/7 00:03
# @Author : ooooo
from typing import *
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
count_0, count_1, count_2 = 0, 0, 0
for x in nums:
if 0 == x:
count_0 += 1
elif 1 == x:
count_1 += 1
elif 2 == x:
count_2 += 1
i = 0
for x in range(count_0):
nums[i] = 0
i += 1
for x in range(count_1):
nums[i] = 1
i += 1
for x in range(count_2):
nums[i] = 2
i += 1
| StarcoderdataPython |
40838 | # -*- coding: utf-8 -*-
"""
======
Slider
======
A slideshow component which may be similar to Album but with difference that
a slide item can have HTML content.
Slide items are ordered from their ``order`` field value. Items with a zero
value for their order will be ordered in an almost arbitrary order (mostly
depending from item object id).
"""
from django.conf import settings
from django.core.validators import FileExtensionValidator
from django.db import models
from django.utils.html import strip_tags
from django.utils.text import Truncator
from django.utils.translation import gettext_lazy as _
from cms.models.pluginmodel import CMSPlugin
from cmsplugin_blocks.choices_helpers import (get_slider_default_template,
get_slider_template_choices)
from cmsplugin_blocks.utils import SmartFormatMixin
class Slider(CMSPlugin):
"""
Slide container for items.
"""
title = models.CharField(
_("Title"),
blank=False,
max_length=150,
default="",
)
"""
A required title string.
"""
template = models.CharField(
_("Template"),
blank=False,
max_length=150,
choices=get_slider_template_choices(),
default=get_slider_default_template(),
help_text=_("Used template for content look."),
)
"""
Template choice from available plugin templates in setting
``BLOCKS_SLIDER_TEMPLATES``. Default to the first choice item.
"""
def __str__(self):
return Truncator(strip_tags(self.title)).words(
settings.BLOCKS_MODEL_TRUNCATION_LENGTH,
truncate=settings.BLOCKS_MODEL_TRUNCATION_CHR
)
def copy_relations(self, oldinstance):
"""
Copy FK relations when plugin object is copied as another object
See:
http://docs.django-cms.org/en/latest/how_to/custom_plugins.html#for-foreign-key-relations-from-other-objects
:meta private:
"""
self.slide_item.all().delete()
for slide_item in oldinstance.slide_item.all():
slide_item.pk = None
slide_item.slider = self
slide_item.save()
class Meta:
verbose_name = _("Slider")
verbose_name_plural = _("Sliders")
class SlideItem(SmartFormatMixin, models.Model):
"""
Slide item to include in container.
"""
slider = models.ForeignKey(
Slider,
related_name="slide_item",
on_delete=models.CASCADE
)
title = models.CharField(
_("Title"),
blank=False,
max_length=150,
default="",
)
"""
Required title string.
"""
image = models.FileField(
_("Image"),
upload_to="blocks/slider/%y/%m",
max_length=255,
null=True,
blank=False,
default=None,
validators=[
FileExtensionValidator(
allowed_extensions=settings.BLOCKS_ALLOWED_IMAGE_EXTENSIONS
),
]
)
"""
Required image file, limited to enabled image formats from settings
``BLOCKS_ALLOWED_IMAGE_EXTENSIONS``.
"""
content = models.TextField(
_(u"Content"),
blank=True,
default="",
)
"""
Optional long text, it will be editable through CKeditor on plugin form.
"""
order = models.IntegerField(
_("Order"),
blank=False,
default=0
)
"""
Number for order position in item list.
"""
link_name = models.CharField(
_("link name"),
blank=True,
max_length=45,
)
"""
Optional string for link name.
"""
link_url = models.CharField(
_("link url"),
blank=True,
max_length=255,
)
"""
Optional string for link URL.
"""
link_open_blank = models.BooleanField(
_("open new window"),
default=False,
help_text=_("If checked the link will be open in a new window"),
)
"""
Checkbox to enable opening link URL in a new window/tab.
"""
def __str__(self):
return Truncator(strip_tags(self.title)).words(
settings.BLOCKS_MODEL_TRUNCATION_LENGTH,
truncate=settings.BLOCKS_MODEL_TRUNCATION_CHR
)
def get_image_format(self):
return self.media_format(self.image)
class Meta:
verbose_name = _("Slide item")
verbose_name_plural = _("Slide items")
| StarcoderdataPython |
11306057 | <filename>from_3b1b/active/diffyq/part2/fourier_series.py
for i in range(0,1000000000):
print(i) | StarcoderdataPython |
6554538 | """
matrix-api v0.1
@author <NAME>
@created on 04/14/2016
multiply.py
Route handler for the subtraction endpoint.
POST /v1/add
"""
from flask import Flask, Blueprint, abort, request, jsonify
from app.matrix import Matrix
from app.decorators import validate
import app.schema
sub = Blueprint('sub', __name__)
@sub.before_request
@validate(request, app.schema.binaryop)
def buildMatricies(model):
request.matrixA = Matrix.fromArray(model['lvalue'])
request.matrixB = Matrix.fromArray(model['rvalue'])
@sub.route('/v1/sub', methods=["POST"])
def postSubtraction():
return jsonify((request.matrixA - request.matrixB).toKeyValuePair('result'))
| StarcoderdataPython |
4808081 | import pathmagic # noqa isort:skip
import datetime
import os
import unittest
from database import Database
class TestDB(unittest.TestCase):
def test_run(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.db = Database()
EVENT_COUNT = 4
ARTIST_COUNT = 3
# Check if initially empty
self.db_empty_test("events")
self.db_empty_test("artists")
# Check if not empty after insert from file
self.db.insert_events(TEST_DIR + "/events.json")
self.db_not_empty_test("events", EVENT_COUNT)
# Check if empty after clean
self.db.clean_events()
self.db_empty_test("events")
# Check if not empty after insert from file
self.db.insert_artists(TEST_DIR + "/artists.json")
self.db_not_empty_test("artists", ARTIST_COUNT)
# Check if empty after clean
self.db.clean_artists()
self.db_empty_test("artists")
# Check if not empty after insert one
start_date = datetime.date(2020, 12, 1)
end_date = datetime.date(2020, 12, 31)
self.db.insert_event_from_date("test_title", start_date, end_date)
self.db_not_empty_test("events", 1)
# Check if get elem returns correct elem
res = self.db.get_event(2020, 12, 15)
self.assertTrue(res[0][0] == "test_title")
# Check if file exists after save
self.db.save_events(TEST_DIR + "/events2.json")
print(TEST_DIR)
print(TEST_DIR + "/events2.json")
self.assertTrue(os.path.exists(TEST_DIR + "/events2.json"))
# Check if empty after clean
self.db.delete_event("test_title")
self.db_empty_test("events")
# Check if not empty after insert one
self.db.insert_artist("test_name", "test_make", "test_model")
self.db_not_empty_test("artists", 1)
# Check if get elem returns correct elem
res = self.db.get_artist("test_make", "test_model")
self.assertTrue(res[0][0] == "test_name")
# Check if file exists after save
self.db.save_artists(TEST_DIR + "/artists2.json")
self.assertTrue(os.path.exists(TEST_DIR + "/artists2.json"))
# Check if empty after clean
self.db.delete_artist("test_name")
self.db_empty_test("artists")
# Check if not empty after insert from saved file
self.db.insert_events(TEST_DIR + "/events2.json")
self.db_not_empty_test("events", 1)
# Check if not empty after insert from saved file
self.db.insert_artists(TEST_DIR + "/artists2.json")
self.db_not_empty_test("artists", 1)
def db_not_empty_test(self, table: str, size):
res = self.db.get_all_from_table(table)
self.assertTrue(len(res) == size)
def db_empty_test(self, table: str):
res = self.db.get_all_from_table(table)
self.assertTrue(len(res) == 0)
| StarcoderdataPython |
3383602 | <filename>tamil_utils.py
# -*- coding: utf-8 -*-
import regex
_UYIRGAL = ["அ","ஆ","இ","ஈ","உ","ஊ","எ","ஏ","ஐ","ஒ","ஓ","ஔ"]
_MEYGAL=("க்","ங்","ச்","ஞ்","ட்","ண்","த்","ந்","ன்","ப்","ம்","ய்","ர்","ற்","ல்","ள்","ழ்","வ்","ஜ்","ஷ்","ஸ்","ஹ்","க்ஷ்","ஃ","்",)
_VALLINAM = ("க","கா","கி","கீ","கூ","கு","கெ","கே","கை","கொ","கோ","கௌ","ச","சா","சி","சீ","சூ","சு","செ","சே","சை","சொ","சோ","சௌ","ட","டா","டி","டீ","டூ","டு","டெ","டே","டை","டொ","டோ","டௌ","த","தா","தி","தீ","தூ","து","தெ","தே","தை","தொ","தோ","தௌ","ப","பா","பி","பீ","பூ","பு","பெ","பே","பை","பொ","போ","பௌ","ற","றா","றி","றீ","றூ","று","றெ","றே","றை","றொ","றோ","றௌ", "க்","ச்", "ட்", "த்", "ப்", "ற்", )
_TAMIL_UNICODE_1_TA = ["க","ங","ச","ஞ","ட","ண","த","ந","ன","ப","ம","ய","ர","ற","ல","ள","ழ","வ",]
_TAMIL_UNICODE_1_SAN = ["ஜ", "ஷ", "ஸ", "ஹ", "க்ஷ",]
_TAMIL_UNICODE_1 = _TAMIL_UNICODE_1_TA+_TAMIL_UNICODE_1_SAN
_TAMIL_UNICODE_2 = ["ா","ி","ீ","ூ","ு","ெ","ே","ை","ொ","ோ","ௌ","்",]
def _is_uyir_ezhuthu(tamil_char):
return _list_has_element(_UYIRGAL, tamil_char)
def _is_mey_ezhuthu(tamil_char):
return _list_has_element(_MEYGAL, tamil_char)
def _is_vallinam(tamil_char):
return _list_has_element(_VALLINAM, tamil_char)
def _get_last_morpheme(word):
if word.strip()=='':
return ''
last_char = word[-1]
if last_char == "்":
last_char = _MEYGAL[_TAMIL_UNICODE_1.index(word[-2])]
if _is_uyir_ezhuthu(last_char) or _is_mey_ezhuthu(last_char):
return last_char
index = _get_index(_TAMIL_UNICODE_2,last_char)
if (index == -1):
return "அ"
index = _get_index(_TAMIL_UNICODE_2,last_char)
if index != -1:
return _UYIRGAL[index+1]
return last_char
def _get_first_morpheme(word):
if word.strip()=='':
return ''
first_char = word[0]
if _is_uyir_ezhuthu(first_char) or _is_mey_ezhuthu(first_char):
return first_char
index = _get_index(_TAMIL_UNICODE_1,first_char)
if (index != -1 ):
return _MEYGAL[index]
index = _get_index(_YIYAIBU_ENDING_LETTERS,first_char)
if index != -1:
return _UYIRGAL[index+1]
return first_char
def _get_index(list, element):
index = -1
try:
index = list.index(element)
except:
index = -1
return index
def _list_has_element(list,element):
try:
return element in list
except:
return False
def _get_unicode_characters(word):
if (' ' in word):
return regex.findall('\p{L}\p{M}*|\p{Z}*',word)
else:
return regex.findall('\p{L}\p{M}*',word)
def _remove_punctuation_numbers(text):
import string
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.replace("‘", '')
text = text.replace("’", '')
text = text.replace("“", '')
text = text.replace("”", '')
text = text.replace("–",'')
text = text.replace("…",'')
text = regex.sub("\d+",'',text)
return text
def _cleanup_generated_poem(text):
#print(text)
new_text = ''
lines = text.split("\n")
text_words = [[ word for word in line.split()] for line in lines if line.strip()!='']
#print('last word of the poem',text_words[-1][-1])
for l,line in enumerate(lines):
line_words = line.split()
for w,word2 in enumerate(line_words):
if l==0 and w==0:
continue
if w==0:
word1 = text_words[l-1][-1]
else:
word1 = text_words[l][w-1]
last_char = _get_last_morpheme(word1)
first_char = _get_first_morpheme(word2)
corrected_word1 = word1
if (_is_vallinam(last_char) and _is_mey_ezhuthu(last_char) and
first_char != last_char ):
corrected_word1 = ''.join(_get_unicode_characters(word1)[:-1])
#print(word1,last_char,first_char,word2,'corrected word1',corrected_word1)
new_text += corrected_word1 + " "
if w==0:
new_text+="\n"
new_text += " " + text_words[-1][-1]
return new_text
| StarcoderdataPython |
393559 | <reponame>NightySide/simple
import ProgramState
from funcs import next_line
ps = ProgramState.ProgramState()
ps.load("test.smp")
while ps.line < len(ps.program):
next_line(ps)
print(ps)
| StarcoderdataPython |
1865593 | <reponame>Joevaen/Scikit-image_On_CT
# canny边缘检测
from skimage import data, feature, img_as_float, io
image = img_as_float(io.imread('/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/10.jpg'))
gamma_corrected = feature.canny(image)
io.imshow(image)
io.show()
io.imshow(gamma_corrected)
io.show() | StarcoderdataPython |
1713827 | <reponame>ahfuck/panki<filename>tests/test_util.py
import unittest
from datetime import datetime, timezone
import click
import panki.util
class TestUtil(unittest.TestCase):
def test_strip_split(self):
self.assertEqual(
panki.util.strip_split(' a , b , c '),
['a', 'b', 'c']
)
def test_strip_lines(self):
self.assertEqual(panki.util.strip_lines(['', '', '']), [])
self.assertEqual(
panki.util.strip_lines(['', 'a', '', 'b', '', 'c', '\n']),
['a', '', 'b', '', 'c']
)
def test_generate_id(self):
self.assertWithinMilliseconds(
datetime.now(timezone.utc).timestamp() * 1000,
panki.util.generate_id()
)
def test_timestamp(self):
self.assertWithinMilliseconds(
datetime.now(timezone.utc).timestamp() * 1000,
panki.util.timestamp() * 1000
)
def test_utcnow(self):
self.assertWithinMilliseconds(
datetime.now(timezone.utc).timestamp() * 1000,
panki.util.utcnow().timestamp() * 1000
)
def test_bad_param(self):
with self.assertRaises(click.BadParameter) as cm:
panki.util.bad_param('foobar', 'foo bar baz')
exception = cm.exception
self.assertEqual(exception.param_hint, 'foobar')
self.assertEqual(exception.message, 'foo bar baz')
def test_multi_opt(self):
self.assertEqual(
panki.util.multi_opt(),
dict(multiple=True, nargs=1, default=[])
)
self.assertEqual(
panki.util.multi_opt(3),
dict(multiple=True, nargs=3, default=[])
)
self.assertEqual(
panki.util.multi_opt(3, ['foo', 'bar']),
dict(multiple=True, nargs=3, default=['foo', 'bar'])
)
def assertWithinMilliseconds(self, timestamp1, timestamp2, ms=1000):
self.assertTrue(timestamp1 > (timestamp2 - ms))
self.assertTrue(timestamp1 < (timestamp2 + ms))
| StarcoderdataPython |
1745165 | <filename>phantombuild/__main__.py
"""Phantom-build command line program."""
import click
from . import __version__
from .phantombuild import build_phantom, read_config, setup_calculation, write_config
@click.group()
@click.version_option(version=__version__)
def cli():
"""Build and set up Phantom runs.
phantombuild compiles Phantom and sets up one or more runs.
"""
@cli.command()
@click.argument('filename')
def template(filename):
"""Write a template config file to FILENAME."""
write_config(filename)
@cli.command()
@click.argument('config', nargs=-1, type=click.Path(exists=True))
@click.pass_context
def build(ctx, config):
"""Build Phantom."""
if len(config) == 0:
click.echo(ctx.get_help())
ctx.exit()
for _config in config:
conf = read_config(_config)
build_phantom(**conf['phantom'])
@cli.command()
@click.argument('config', nargs=-1, type=click.Path(exists=True))
@click.pass_context
def setup(ctx, config):
"""Build and set up Phantom runs.
phantombuild compiles Phantom and sets up one or more runs. Pass in
one CONFIG file per Phantom build config. Each CONFIG file may
contain multiple runs.
"""
if len(config) == 0:
click.echo(ctx.get_help())
ctx.exit()
for _config in config:
conf = read_config(_config)
build_phantom(**conf['phantom'])
phantom_path = conf['phantom']['path']
for run in conf.get('runs', []):
run_path = run.pop('path')
setup_calculation(run_path=run_path, phantom_path=phantom_path, **run)
if __name__ == '__main__':
cli(prog_name='python -m phantombuild')
| StarcoderdataPython |
1670671 | <filename>make_datasets.py
import os
import os.path as path
import pandas as pd
src_path = '../disk'
trgt_path = './Data'
os.makedirs(trgt_path, exist_ok=True)
def make_Bank():
dataset_path = path.join(src_path, 'Bank')
def build(gt_file, csv_file):
with open(path.join(dataset_path, gt_file), 'r') as f:
info = f.read()
lines = info.split('\n')
lines = [line.split('\t') for line in lines]
items = []
for line in lines:
if line == ['']:
continue
info_dict = {}
info_dict['image_path'] = path.abspath(
path.join(dataset_path, line[0]))
items.append(info_dict)
df = pd.DataFrame(columns=['image_path'])
df = df.append(items, ignore_index=True)
df.to_csv(path.join(trgt_path, csv_file), index=False)
build('GroundTruth_Train.txt', 'Bank_Train.csv')
build('GroundTruth_Val.txt', 'Bank_Val.csv')
build('GroundTruth_Test.txt', 'Bank_Test.csv')
def make_Doc():
dataset_path = path.join(src_path, 'Doc')
def build(gt_file, csv_file):
with open(path.join(dataset_path, gt_file), 'r') as f:
info = f.read()
lines = info.split('\n')
lines = [line.split('\t') for line in lines]
items = []
for line in lines:
if line == ['']:
continue
info_dict = {}
info_dict['image_path'] = path.abspath(
path.join(dataset_path, line[0]))
items.append(info_dict)
df = pd.DataFrame(columns=['image_path'])
df = df.append(items, ignore_index=True)
df.to_csv(path.join(trgt_path, csv_file), index=False)
build('GroundTruth_Train.txt', 'Doc_Train.csv')
build('GroundTruth_Val.txt', 'Doc_Val.csv')
build('GroundTruth_Test.txt', 'Doc_Test.csv')
if __name__ == '__main__':
make_Bank()
make_Doc()
| StarcoderdataPython |
9697114 | '''
# ambre.test.design_unit.py
#
# Copyright March 2013 by <NAME>
#
# This program is free software; you may redistribute it and/or modify its
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License or
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# For any other inquiries send an email to: <NAME> (<EMAIL>)
'''
from ambre.config import CONFIG
from ambre.design.workflow import PrimerDesignWorkflow
from pkg_resources import resource_filename
import unittest
import os, errno
class TestDesignWorkflow(unittest.TestCase):
def setUp(self):
pass
def test_workflow(self):
CONFIG.param['reference_fpath'] = os.path.abspath(resource_filename('ambre', os.path.join('examples', 'reference.fasta')))
w = PrimerDesignWorkflow(primer3_path=CONFIG.dir['primer3'],
primer3_param=CONFIG.param['primer3_long'],
aligner=CONFIG.bin['aligner'],
multiplx=CONFIG.bin['multiplx'],
d=1000,rho=0.1)
ex_regions = os.path.abspath(resource_filename('ambre', os.path.join('examples','regions.test')))
if CONFIG.dir['examples'] is None:
ex_temp = os.path.abspath(resource_filename('ambre', os.path.join('examples','regions_ex', 'design_ex')))
else:
ex_temp = os.path.join(CONFIG.dir['examples'], 'design_ex')
try:
os.link(ex_regions, os.path.join(CONFIG.dir['examples'], os.path.basename(ex_regions)))
os.link(CONFIG.param['reference_fpath'], os.path.join(CONFIG.dir['examples'], os.path.basename(CONFIG.param['reference_fpath'])))
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else: raise
pamp_t_ms= map(float,CONFIG.param['design_sa_ms'].split(','))
pamp_t_bs=map(float,CONFIG.param['design_sa_bs'].split(','))
print pamp_t_ms, pamp_t_bs
w.run(ex_regions,
delete_flag=(CONFIG.param['cleanup_flag']=="True"),
temp_tag=ex_temp,
primers_per_kbp=int(CONFIG.param['design_primer3_primers_per_kbp']),
max_primer_penalty=float(CONFIG.param['design_max_primer3_penalty']),
max_cross_amp_dist=int(CONFIG.param['design_max_cross_amp_dist']),
max_alignment_count=int(CONFIG.param['design_max_alignments']),
min_alignment_len=int(CONFIG.param['design_3end_len_alignment']),
pamp_max_iterations= int(CONFIG.param['design_sa_max_iterations']),
pamp_repeats=2,
pamp_t_ms= (-(10**-1),),
pamp_t_bs=(10**4,))
# w.run(ex_regions,temp_tag=ex_temp,
# primers_per_bp=75, max_cross_amp_dist=20000,
# max_primer_penalty=1.5,
# max_alignment_count=10,
# min_alignment_len=18,
# pamp_max_iterations= 1000000,pamp_repeats=2,
# pamp_t_ms= (-(10**-1),), pamp_t_bs=(10**4,))
w.print_solutions(out_fpath = "%s.out"%ex_temp)
#w.validate()
def test_ambre_design(self):
CONFIG.param['reference_fpath'] = os.path.abspath(resource_filename('ambre', os.path.join('examples', 'reference.fasta')))
w = PrimerDesignWorkflow(primer3_path=CONFIG.dir['primer3'],
primer3_param=CONFIG.param['primer3_long'],
aligner=CONFIG.bin['aligner'],
multiplx=CONFIG.bin['multiplx'],
d=1000,rho=0.1)
def test_workflow_valid(self):
CONFIG.param['reference_fpath'] = resource_filename('ambre', os.path.join('examples', 'reference.fasta'))
w = PrimerDesignWorkflow(primer3_path=CONFIG.dir['primer3'],
primer3_param=CONFIG.param['primer3_long'],
aligner=CONFIG.bin['aligner'],
multiplx=CONFIG.bin['multiplx'],
d=1000,rho=0.1)
ex_regions = resource_filename('ambre', os.path.join('examples','regions.test'))
ex_temp = resource_filename('ambre', os.path.join('examples','regions_ex', 'design_ex'))
w.check(ex_regions,temp_tag=ex_temp)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDesignWorkflow)
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
4939963 | <reponame>evonove/threejs-prototype
from .base import *
# security enforcement
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = env('DJANGO_SECURE_SSL_REDIRECT', True)
SESSION_COOKIE_SECURE = env('DJANGO_SESSION_COOKIE_SECURE', True)
# uncomment for cross-domain cookies
# SESSION_COOKIE_DOMAIN = '.{}'.format(env('DJANGO_ALLOWED_HOSTS'))
# emails
DEFAULT_EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', EMAIL_BACKEND_DEFAULT)
EMAIL_HOST = env('DJANGO_EMAIL_HOST')
EMAIL_PORT = env('DJANGO_EMAIL_HOST_PORT')
EMAIL_HOST_USER = env('DJANGO_EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env('DJANGO_EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = env('DJANGO_EMAIL_USE_TLS', True)
# logging
LOGGING['loggers'] = {
'django': {
'handlers': ['console', 'syslog'],
'level': env('DJANGO_LOG_LEVEL', 'INFO'),
},
'threejs': {
'handlers': ['logstash', 'syslog'],
'level': env('THREEJS_LOG_LEVEL', 'INFO'),
},
}
| StarcoderdataPython |
1778805 | # Imports
import cv2
import mediapipe as mp
from numpy import result_type
import pyautogui
import math
from enum import IntEnum
from google.protobuf.json_format import MessageToDict
from constants.hand_landmarks import HandLandmarks
from constants.gest import Gest
from models.hand_recog import HandRecog
from models.controller import Controller
# from ctypes import cast, POINTER
# from comtypes import CLSCTX_ALL
# from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
# import screen_brightness_control as sbcontrol
pyautogui.FAILSAFE = False
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
# Multi-handedness Labels
class HLabel(IntEnum):
LEFT = 0
RIGHT = 1
class GestureController:
gc_mode = 0
cap = None
CAM_HEIGHT = None
CAM_WIDTH = None
right_hand_result = None # Right Hand by default
left_hand_result = None # Left hand by default
dom_hand = True
def __init__(self):
GestureController.gc_mode = 1
GestureController.cap = cv2.VideoCapture(0)
GestureController.CAM_HEIGHT = GestureController.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
GestureController.CAM_WIDTH = GestureController.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
def classify_hands(results):
left, right = None, None
try:
handedness_dict = MessageToDict(results.multi_handedness[0])
if handedness_dict['classification'][0]['label'] == 'Right':
right = results.multi_hand_landmarks[0]
else:
left = results.multi_hand_landmarks[0]
except:
pass
try:
handedness_dict = MessageToDict(results.multi_handedness[1])
if handedness_dict['classification'][0]['label'] == 'Right':
right = results.multi_hand_landmarks[1]
else:
left = results.multi_hand_landmarks[1]
except:
pass
GestureController.right_hand_result = right
GestureController.left_hand_result = left
def start(self):
right_hand = HandRecog(HLabel.RIGHT)
left_hand = HandRecog(HLabel.LEFT)
with mp_hands.Hands(max_num_hands=2, min_detection_confidence=0.7, min_tracking_confidence=0.7) as hands:
while GestureController.cap.isOpened() and GestureController.gc_mode:
success, image = GestureController.cap.read()
if not success:
print("Ignoring empty camera frame.")
continue
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
GestureController.classify_hands(results)
right_hand.set_hand_result(GestureController.right_hand_result)
left_hand.set_hand_result(GestureController.left_hand_result)
if right_hand.hand_result and left_hand.hand_result:
# Do two-handed gesture
right_hand.set_finger_state()
right_gest_name = right_hand.get_gesture()
left_hand.set_finger_state()
left_gest_name = left_hand.get_gesture()
# print('left finger state: ', left_hand.finger, 'right finger state: ', right_hand.finger)
print('left gesture: ', left_gest_name, 'right gesture: ', right_gest_name)
leftlmList, rightlmList = HandRecog.findPosition2Hands(results, image, 1)
Controller.two_handle_controls(right_gest_name, left_gest_name, right_hand.hand_result, left_hand.hand_result, leftlmList, rightlmList)
elif right_hand.hand_result and not left_hand.hand_result:
# Do one-handed gesture with right hand
right_hand.set_finger_state()
gest_name = right_hand.get_gesture()
# print('right finger state: ', right_hand.finger)
print('right gesture: ', gest_name)
lmList = HandRecog.findPosition(results, image, 0)
Controller.handle_controls(gest_name, right_hand.hand_result, lmList)
elif not right_hand.hand_result and left_hand.hand_result:
# Do one-handed gesture with left hand
left_hand.set_finger_state()
gest_name = left_hand.get_gesture()
# print('left finger state: ', left_hand.finger)
print('left gesture: ', gest_name)
lmList = HandRecog.findPosition(results, image, 0)
Controller.handle_controls(gest_name, left_hand.hand_result, lmList)
else:
pass
else:
Controller.prev_hand = None
cv2.imshow('Gesture Controller', image)
if cv2.waitKey(5) & 0xFF == 13:
break
GestureController.cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
gc1 = GestureController()
gc1.start()
| StarcoderdataPython |
8089956 | #!/usr/bin/env python
#
# Copyright (C) 2011 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Build image output_image_file from input_directory and properties_file.
Usage: build_image input_directory properties_file output_image_file
"""
import os
import os.path
import subprocess
import sys
import commands
import shutil
import tempfile
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
def RunCommand(cmd):
""" Echo and run the given command
Args:
cmd: the command represented as a list of strings.
Returns:
The exit code.
"""
print "Running: ", " ".join(cmd)
p = subprocess.Popen(cmd)
p.communicate()
return p.returncode
def GetVerityTreeSize(partition_size):
cmd = "build_verity_tree -s %d"
cmd %= partition_size
status, output = commands.getstatusoutput(cmd)
if status:
print output
return False, 0
return True, int(output)
def GetVerityMetadataSize(partition_size):
cmd = "system/extras/verity/build_verity_metadata.py -s %d"
cmd %= partition_size
status, output = commands.getstatusoutput(cmd)
if status:
print output
return False, 0
return True, int(output)
def AdjustPartitionSizeForVerity(partition_size):
"""Modifies the provided partition size to account for the verity metadata.
This information is used to size the created image appropriately.
Args:
partition_size: the size of the partition to be verified.
Returns:
The size of the partition adjusted for verity metadata.
"""
success, verity_tree_size = GetVerityTreeSize(partition_size)
if not success:
return 0;
success, verity_metadata_size = GetVerityMetadataSize(partition_size)
if not success:
return 0
return partition_size - verity_tree_size - verity_metadata_size
def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
cmd = ("build_verity_tree -A %s %s %s" % (FIXED_SALT, sparse_image_path, verity_image_path))
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
print "Could not build verity tree! Error: %s" % output
return False
root, salt = output.split()
prop_dict["verity_root_hash"] = root
prop_dict["verity_salt"] = salt
return True
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key):
cmd = ("system/extras/verity/build_verity_metadata.py %s %s %s %s %s %s %s" %
(image_size,
verity_metadata_path,
root_hash,
salt,
block_device,
signer_path,
key))
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
print "Could not build verity metadata! Error: %s" % output
return False
return True
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
"""Appends the unsparse image to the given sparse image.
Args:
sparse_image_path: the path to the (sparse) image
unsparse_image_path: the path to the (unsparse) image
Returns:
True on success, False on failure.
"""
cmd = "append2simg %s %s"
cmd %= (sparse_image_path, unsparse_image_path)
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
print "%s: %s" % (error_message, output)
return False
return True
def BuildVerifiedImage(data_image_path, verity_image_path, verity_metadata_path):
if not Append2Simg(data_image_path, verity_metadata_path, "Could not append verity metadata!"):
return False
if not Append2Simg(data_image_path, verity_image_path, "Could not append verity tree!"):
return False
return True
def UnsparseImage(sparse_image_path, replace=True):
img_dir = os.path.dirname(sparse_image_path)
unsparse_image_path = "unsparse_" + os.path.basename(sparse_image_path)
unsparse_image_path = os.path.join(img_dir, unsparse_image_path)
if os.path.exists(unsparse_image_path):
if replace:
os.unlink(unsparse_image_path)
else:
return True, unsparse_image_path
inflate_command = ["simg2img", sparse_image_path, unsparse_image_path]
exit_code = RunCommand(inflate_command)
if exit_code != 0:
os.remove(unsparse_image_path)
return False, None
return True, unsparse_image_path
def MakeVerityEnabledImage(out_file, prop_dict):
"""Creates an image that is verifiable using dm-verity.
Args:
out_file: the location to write the verifiable image at
prop_dict: a dictionary of properties required for image creation and verification
Returns:
True on success, False otherwise.
"""
# get properties
image_size = prop_dict["partition_size"]
block_dev = prop_dict["verity_block_device"]
signer_key = prop_dict["verity_key"]
signer_path = prop_dict["verity_signer_cmd"]
# make a tempdir
tempdir_name = tempfile.mkdtemp(suffix="_verity_images")
# get partial image paths
verity_image_path = os.path.join(tempdir_name, "verity.img")
verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
# build the verity tree and get the root hash and salt
if not BuildVerityTree(out_file, verity_image_path, prop_dict):
shutil.rmtree(tempdir_name, ignore_errors=True)
return False
# build the metadata blocks
root_hash = prop_dict["verity_root_hash"]
salt = prop_dict["verity_salt"]
if not BuildVerityMetadata(image_size,
verity_metadata_path,
root_hash,
salt,
block_dev,
signer_path,
signer_key):
shutil.rmtree(tempdir_name, ignore_errors=True)
return False
# build the full verified image
if not BuildVerifiedImage(out_file,
verity_image_path,
verity_metadata_path):
shutil.rmtree(tempdir_name, ignore_errors=True)
return False
shutil.rmtree(tempdir_name, ignore_errors=True)
return True
def BuildImage(in_dir, prop_dict, out_file,
fs_config=None,
fc_config=None,
block_list=None):
"""Build an image to out_file from in_dir with property prop_dict.
Args:
in_dir: path of input directory.
prop_dict: property dictionary.
out_file: path of the output image file.
fs_config: path to the fs_config file (typically
META/filesystem_config.txt). If None then the configuration in
the local client will be used.
fc_config: path to the SELinux file_contexts file. If None then
the value from prop_dict['selinux_fc'] will be used.
Returns:
True iff the image is built successfully.
"""
build_command = []
fs_type = prop_dict.get("fs_type", "")
run_fsck = False
is_verity_partition = "verity_block_device" in prop_dict
verity_supported = prop_dict.get("verity") == "true"
# adjust the partition size to make room for the hashes if this is to be verified
if verity_supported and is_verity_partition:
partition_size = int(prop_dict.get("partition_size"))
adjusted_size = AdjustPartitionSizeForVerity(partition_size)
if not adjusted_size:
return False
prop_dict["partition_size"] = str(adjusted_size)
prop_dict["original_partition_size"] = str(partition_size)
if fs_type.startswith("ext"):
build_command = ["mkuserimg.sh"]
if "extfs_sparse_flag" in prop_dict:
build_command.append(prop_dict["extfs_sparse_flag"])
run_fsck = True
build_command.extend([in_dir, out_file, fs_type,
prop_dict["mount_point"]])
build_command.append(prop_dict["partition_size"])
if "timestamp" in prop_dict:
build_command.extend(["-T", str(prop_dict["timestamp"])])
if fs_config is not None:
build_command.extend(["-C", fs_config])
if block_list is not None:
build_command.extend(["-B", block_list])
if fc_config is not None:
build_command.append(fc_config)
elif "selinux_fc" in prop_dict:
build_command.append(prop_dict["selinux_fc"])
elif fs_type.startswith("f2fs"):
build_command = ["mkf2fsuserimg.sh"]
build_command.extend([out_file, prop_dict["partition_size"]])
else:
build_command = ["mkyaffs2image", "-f"]
if prop_dict.get("mkyaffs2_extra_flags", None):
build_command.extend(prop_dict["mkyaffs2_extra_flags"].split())
build_command.append(in_dir)
build_command.append(out_file)
if "selinux_fc" in prop_dict:
build_command.append(prop_dict["selinux_fc"])
build_command.append(prop_dict["mount_point"])
exit_code = RunCommand(build_command)
if exit_code != 0:
return False
# create the verified image if this is to be verified
if verity_supported and is_verity_partition:
if not MakeVerityEnabledImage(out_file, prop_dict):
return False
if run_fsck and prop_dict.get("skip_fsck") != "true":
success, unsparse_image = UnsparseImage(out_file, replace=False)
if not success:
return False
# Run e2fsck on the inflated image file
e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
exit_code = RunCommand(e2fsck_command)
os.remove(unsparse_image)
return exit_code == 0
def ImagePropFromGlobalDict(glob_dict, mount_point):
"""Build an image property dictionary from the global dictionary.
Args:
glob_dict: the global dictionary from the build system.
mount_point: such as "system", "data" etc.
"""
d = {}
if "build.prop" in glob_dict:
bp = glob_dict["build.prop"]
if "ro.build.date.utc" in bp:
d["timestamp"] = bp["ro.build.date.utc"]
def copy_prop(src_p, dest_p):
if src_p in glob_dict:
d[dest_p] = str(glob_dict[src_p])
common_props = (
"extfs_sparse_flag",
"mkyaffs2_extra_flags",
"selinux_fc",
"skip_fsck",
"verity",
"verity_key",
"verity_signer_cmd"
)
for p in common_props:
copy_prop(p, p)
d["mount_point"] = mount_point
if mount_point == "system":
copy_prop("fs_type", "fs_type")
copy_prop("system_size", "partition_size")
copy_prop("system_verity_block_device", "verity_block_device")
elif mount_point == "data":
# Copy the generic fs type first, override with specific one if available.
copy_prop("fs_type", "fs_type")
copy_prop("userdata_fs_type", "fs_type")
copy_prop("userdata_size", "partition_size")
elif mount_point == "cache":
copy_prop("cache_fs_type", "fs_type")
copy_prop("cache_size", "partition_size")
elif mount_point == "vendor":
copy_prop("vendor_fs_type", "fs_type")
copy_prop("vendor_size", "partition_size")
copy_prop("vendor_verity_block_device", "verity_block_device")
elif mount_point == "oem":
copy_prop("fs_type", "fs_type")
copy_prop("oem_size", "partition_size")
return d
def LoadGlobalDict(filename):
"""Load "name=value" pairs from filename"""
d = {}
f = open(filename)
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
k, v = line.split("=", 1)
d[k] = v
f.close()
return d
def main(argv):
if len(argv) != 3:
print __doc__
sys.exit(1)
in_dir = argv[0]
glob_dict_file = argv[1]
out_file = argv[2]
glob_dict = LoadGlobalDict(glob_dict_file)
image_filename = os.path.basename(out_file)
mount_point = ""
if image_filename == "system.img":
mount_point = "system"
elif image_filename == "userdata.img":
mount_point = "data"
elif image_filename == "cache.img":
mount_point = "cache"
elif image_filename == "vendor.img":
mount_point = "vendor"
elif image_filename == "oem.img":
mount_point = "oem"
else:
print >> sys.stderr, "error: unknown image file name ", image_filename
exit(1)
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
if not BuildImage(in_dir, image_properties, out_file):
print >> sys.stderr, "error: failed to build %s from %s" % (out_file, in_dir)
exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
| StarcoderdataPython |
11390258 | #!/usr/bin/env python
from setuptools import setup, find_packages
execfile('src/cuisine_sweet/version.py')
setup(
name = "cuisine_sweet",
version = __version__,
# pypi stuff
author = "<NAME>",
author_email = "<EMAIL>",
description = "Sugar-coated declarative deployment recipes built on top of Fabric and Cuisine",
license = "Revised BSD License",
keywords = [ "fabric", "cuisine", "deployment" ],
url = "http://github.com/dexterbt1/cuisine_sweet",
packages = find_packages('src/'),
package_dir = {
'': 'src',
},
scripts = [],
install_requires = [
'Fabric',
'PyYAML',
'cuisine==0.3.2',
'distribute',
'docutils',
'decorator',
'pexpect',
],
# could also include long_description, download_url, classifiers, etc.
download_url = 'https://github.com/dexterbt1/cuisine_sweet/tarball/%s' % __version__,
classifiers = [
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Natural Language :: English",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Topic :: Utilities",
],
)
| StarcoderdataPython |
6424547 | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from shaker.engine import utils
class TestUtils(testtools.TestCase):
def setUp(self):
super(TestUtils, self).setUp()
def test_split_address_valid(self):
self.assertEqual(('10.0.0.1', '6777'),
utils.split_address('10.0.0.1:6777'))
def test_split_address_invalid(self):
self.assertRaises(ValueError, utils.split_address, 'erroneous')
def test_flatten_dict(self):
self.assertEqual({}, dict(utils.flatten_dict({})))
self.assertEqual(
{'pa_b': 1},
dict(utils.flatten_dict({'a': {'b': 1}}, prefix='p', sep='_')))
self.assertEqual(
{'a': 1, 'b.c': 2, 'b.d': 3},
dict(utils.flatten_dict({'a': 1, 'b': {'c': 2, 'd': 3}})))
@mock.patch('os.walk')
@mock.patch('shaker.engine.utils.resolve_relative_path')
def test_make_help_options(self, resolve_mock, walk_mock):
base_dir = 'abc/def'
abs_dir = '/files/' + base_dir
walk_mock.side_effect = [
[(abs_dir, [], ['klm.yaml']), (abs_dir, [], ['ijk.yaml'])],
]
resolve_mock.side_effect = [abs_dir]
expected = 'List: "ijk", "klm"'
observed = utils.make_help_options('List: %s', base_dir)
self.assertEqual(expected, observed)
@mock.patch('os.walk')
@mock.patch('shaker.engine.utils.resolve_relative_path')
def test_make_help_options_subdir(self, resolve_mock, walk_mock):
base_dir = 'abc/def'
abs_dir = '/files/' + base_dir
walk_mock.side_effect = [
[(abs_dir + '/sub', [], ['klm.yaml']),
(abs_dir + '/sub', [], ['ijk.yaml'])],
]
resolve_mock.side_effect = [abs_dir]
expected = 'List: "sub/ijk", "sub/klm"'
observed = utils.make_help_options('List: %s', base_dir)
self.assertEqual(expected, observed)
@mock.patch('os.walk')
@mock.patch('shaker.engine.utils.resolve_relative_path')
def test_make_help_options_with_filter(self, resolve_mock, walk_mock):
base_dir = 'abc/def'
abs_dir = '/files/' + base_dir
walk_mock.side_effect = [
[(abs_dir + '/sub', [], ['klm.yaml']),
(abs_dir + '/sub', [], ['ijk.html']),
(abs_dir + '/sub', [], ['mno.yaml'])],
]
resolve_mock.side_effect = [abs_dir]
expected = 'List: "sub/klm", "sub/mno"'
observed = utils.make_help_options(
'List: %s', base_dir, type_filter=lambda x: x.endswith('.yaml'))
self.assertEqual(expected, observed)
def test_algebraic_product_empty(self):
expected = [{}]
observed = list(utils.algebraic_product())
self.assertEqual(expected, observed)
def test_algebraic_product_string(self):
expected = [{'a': 1, 'b': 'zebra'}, {'a': 2, 'b': 'zebra'}]
observed = list(utils.algebraic_product(a=[1, 2], b='zebra'))
self.assertEqual(expected, observed)
def test_algebraic_product_number(self):
expected = [{'a': 'x', 'b': 4}, {'a': 2, 'b': 4}]
observed = list(utils.algebraic_product(a=['x', 2], b=4))
self.assertEqual(expected, observed)
def test_strict(self):
self.assertEqual('some_01_string_a',
utils.strict('Some 01-string (brr!) + %% A'))
def test_set_value_by_path(self):
data = {}
utils.set_value_by_path(data, 'jitter.avg', 10)
self.assertEqual({'jitter': {'avg': 10}}, data)
def test_set_value_by_path_with_update(self):
data = {'jitter': {'min': 5}}
utils.set_value_by_path(data, 'jitter.avg', 10)
self.assertEqual({'jitter': {'avg': 10, 'min': 5}}, data)
def test_get_value_by_path(self):
data = {'jitter': {'min': 5}}
self.assertEqual(5, utils.get_value_by_path(data, 'jitter.min'))
def test_get_value_by_path_none(self):
data = {'jitter': {'min': 5}}
self.assertIsNone(utils.get_value_by_path(data, 'jitter.avg'))
def test_copy_value_by_path(self):
src = {'sum': {'jitter_ms': 7}}
dst = {}
res = utils.copy_value_by_path(src, 'sum.jitter_ms', dst, 'jitter.avg')
self.assertEqual({'jitter': {'avg': 7}}, dst)
self.assertTrue(res)
def test_copy_value_by_path_src_not_found(self):
src = {}
dst = {}
res = utils.copy_value_by_path(src, 'sum.jitter_ms', dst, 'jitter.avg')
self.assertEqual({}, dst)
self.assertFalse(res)
def test_merge_dicts(self):
src = [{'a': {1: 9, 2: 8}, 'b': {1: 3}}, {'a': {3: 7}, 'c': {4: 8}}]
expected = {'a': {1: 9, 2: 8, 3: 7}, 'b': {1: 3}, 'c': {4: 8}}
res = utils.merge_dicts(src)
self.assertEqual(expected, res)
| StarcoderdataPython |
245584 | import logging
import os
import tempfile
import gym
import importlib_resources
import pytest
import pytest_notebook.nb_regression as nb
from smarts.core.agent import Agent, AgentSpec
from smarts.core.agent_interface import AgentInterface, AgentType
from smarts.core.sensors import Observation
from smarts.core.utils.episodes import episodes
logging.basicConfig(level=logging.INFO)
AGENT_ID = "Agent-007"
class KeepLaneAgent(Agent):
def act(self, obs: Observation):
return "keep_lane"
def run_scenario(
scenarios, sim_name, headless, num_episodes, seed, max_episode_steps=None,
):
agent_spec = AgentSpec(
interface=AgentInterface.from_type(
AgentType.Laner, max_episode_steps=max_episode_steps
),
agent_builder=KeepLaneAgent,
)
env = gym.make(
"smarts.env:hiway-v0",
scenarios=scenarios,
agent_specs={AGENT_ID: agent_spec},
sim_name=sim_name,
headless=headless,
visdom=False,
timestep_sec=0.1,
sumo_headless=True,
seed=seed,
)
for episode in episodes(n=num_episodes):
agent = agent_spec.build_agent()
observations = env.reset()
episode.record_scenario(env.scenario_log)
dones = {"__all__": False}
while not dones["__all__"]:
agent_obs = observations[AGENT_ID]
agent_action = agent.act(agent_obs)
observations, rewards, dones, infos = env.step({AGENT_ID: agent_action})
episode.record_step(observations, rewards, dones, infos)
env.close()
@pytest.fixture(scope="module")
def notebook():
_, tmppath = tempfile.mkstemp(suffix=".ipynb")
with open(tmppath, "w") as handle:
import smarts.core.tests
handle.write(
importlib_resources.read_text(smarts.core.tests, "test_notebook.ipynb")
)
yield tmppath
os.remove(tmppath)
def test_notebook1(nb_regression: nb.NBRegressionFixture, notebook):
## Generate from the un-run notebook
nb_regression.force_regen = True
nb_regression.check(notebook, False)
## Run notebook against generated
## ignore output for now
nb_regression.diff_ignore = ("/cells/*/outputs/*/text",)
nb_regression.force_regen = False
nb_regression.check(notebook)
| StarcoderdataPython |
11224831 | # Generated by Django 3.2.3 on 2021-07-18 08:21
import api.validators
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0062_auto_20210717_0033'),
]
operations = [
migrations.AlterField(
model_name='article',
name='image',
field=models.ImageField(blank=True, help_text='Поддерживаемые форматы jpg, jpeg, gif, png, bmp. Размер до 10М.', null=True, upload_to='articles/', validators=[api.validators.file_size_validator, api.validators.image_extension_validator], verbose_name='Изображение'),
),
migrations.AlterField(
model_name='article',
name='image_url',
field=models.URLField(blank=True, help_text='Альтернативный способ загрузки изображения. Приоритет у файла.', max_length=192, null=True, verbose_name='Ссылка на изображение'),
),
migrations.AlterField(
model_name='article',
name='output_to_main',
field=models.BooleanField(default=False, help_text='Статьи с этой меткой будут отображаться на главной странице сайта.', verbose_name='Отображать на главной странице'),
),
migrations.AlterField(
model_name='article',
name='pinned_full_size',
field=models.BooleanField(default=False, help_text='Статья с этой меткой будет отображаться в полноразмерном формате вверху страницы.', verbose_name='Закрепить'),
),
migrations.AlterField(
model_name='book',
name='url',
field=models.URLField(verbose_name='Ссылка на книгу'),
),
migrations.AlterField(
model_name='booktype',
name='slug',
field=models.SlugField(unique=True, verbose_name='Слаг (Ссылка)'),
),
migrations.AlterField(
model_name='catalog',
name='image',
field=models.ImageField(blank=True, help_text='Поддерживаемые форматы jpg, jpeg, gif, png, bmp. Размер до 10М.', null=True, upload_to='catalogs/', validators=[api.validators.file_size_validator, api.validators.image_extension_validator], verbose_name='Изображение'),
),
migrations.AlterField(
model_name='catalog',
name='image_url',
field=models.URLField(help_text='Альтернативный способ загрузки изображения. Приоритет у файла.', max_length=192, verbose_name='Ссылка на изображение'),
),
migrations.AlterField(
model_name='catalog',
name='raw_html',
field=models.TextField(help_text='Поле для html кода страницы.', max_length=4000000, verbose_name='HTML'),
),
migrations.AlterField(
model_name='city',
name='is_primary',
field=models.BooleanField(default=False, help_text='Города с этой меткой будут отображаться в начале списка.', verbose_name='Приоритет вывода'),
),
migrations.AlterField(
model_name='diary',
name='image',
field=models.ImageField(blank=True, help_text='Поддерживаемые форматы jpg, jpeg, gif, png, bmp. Размер до 10М.', null=True, upload_to='diaries/', validators=[api.validators.file_size_validator, api.validators.image_extension_validator], verbose_name='Изображение'),
),
migrations.AlterField(
model_name='history',
name='image',
field=models.ImageField(blank=True, help_text='Поддерживаемые форматы jpg, jpeg, gif, png, bmp. Размер до 10М.', null=True, upload_to='history/', validators=[api.validators.file_size_validator, api.validators.image_extension_validator], verbose_name='Изображение'),
),
migrations.AlterField(
model_name='history',
name='output_to_main',
field=models.BooleanField(default=False, help_text='Истории с этой меткой будут отображаться на главной странице сайта.', verbose_name='Отображать на главной странице'),
),
migrations.AlterField(
model_name='history',
name='raw_html',
field=models.TextField(help_text='Поле для html кода страницы.', max_length=4000000, verbose_name='HTML'),
),
migrations.AlterField(
model_name='movie',
name='image',
field=models.ImageField(blank=True, help_text='Поддерживаемые форматы jpg, jpeg, gif, png, bmp. Размер до 10М.', null=True, upload_to='movies/', validators=[api.validators.file_size_validator, api.validators.image_extension_validator], verbose_name='Изображение'),
),
migrations.AlterField(
model_name='movie',
name='output_to_main',
field=models.BooleanField(default=False, help_text='Фильмы с этой меткой будут отображаться на главной странице сайта.', verbose_name='Отображать на главной странице'),
),
migrations.AlterField(
model_name='place',
name='image',
field=models.ImageField(blank=True, help_text='Поддерживаемые форматы jpg, jpeg, gif, png, bmp. Размер до 10М.', null=True, upload_to='places/', validators=[api.validators.file_size_validator, api.validators.image_extension_validator], verbose_name='Изображение'),
),
migrations.AlterField(
model_name='place',
name='image_url',
field=models.URLField(blank=True, help_text='Альтернативный способ загрузки изображения. Приоритет у файла.', null=True, verbose_name='Ссылка на изображение'),
),
migrations.AlterField(
model_name='place',
name='moderation_flag',
field=models.BooleanField(default=False, help_text='Места без этой метки не будут отображаться на сайте.', verbose_name='Отметка о модерации'),
),
migrations.AlterField(
model_name='place',
name='output_to_main',
field=models.BooleanField(default=False, help_text='Места с этой меткой будут отображаться на главной странице сайта.', verbose_name='Отображать на главной странице'),
),
migrations.AlterField(
model_name='question',
name='output_to_main',
field=models.BooleanField(default=False, help_text='Вопросы с этой меткой будут отображаться на главной странице сайта.', verbose_name='Отображать на главной странице'),
),
migrations.AlterField(
model_name='right',
name='raw_html',
field=models.TextField(help_text='Поле для html кода страницы.', max_length=4000000, verbose_name='HTML'),
),
migrations.AlterField(
model_name='tag',
name='category',
field=models.CharField(choices=[('Книги', 'Книги'), ('Фильмы', 'Фильмы'), ('Места', 'Места'), ('Вопросы', 'Вопросы'), ('Права', 'Права'), ('Видеоролики', 'Видеоролики'), ('События', 'События')], max_length=50, verbose_name='Категория'),
),
migrations.AlterField(
model_name='tag',
name='name',
field=models.CharField(max_length=50, verbose_name='Название'),
),
migrations.AlterField(
model_name='video',
name='duration',
field=models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(86400)], verbose_name='Длина видео в сек.'),
),
migrations.AlterField(
model_name='video',
name='image',
field=models.ImageField(blank=True, help_text='Поддерживаемые форматы jpg, jpeg, gif, png, bmp. Размер до 10М.', null=True, upload_to='videos/', validators=[api.validators.file_size_validator, api.validators.image_extension_validator], verbose_name='Изображение'),
),
migrations.AlterField(
model_name='video',
name='output_to_main',
field=models.BooleanField(default=False, help_text='Видео с этой меткой будут отображаться на главной странице сайта.', verbose_name='Отображать на главной странице'),
),
migrations.AlterField(
model_name='video',
name='pinned_full_size',
field=models.BooleanField(default=False, help_text='Видео с этой меткой будет отображаться в полноразмерном формате вверху страницы.', verbose_name='Закрепить'),
),
migrations.AlterField(
model_name='video',
name='resource_group',
field=models.BooleanField(default=False, help_text='Видео с этой меткой не будут показаны не авторизованным пользователям.', verbose_name='Ресурсная группа'),
),
]
| StarcoderdataPython |
5176602 | <gh_stars>1-10
import os
import config
import discord
from discord.ext import commands
from colorama import Fore, Style # Цветная консоль
from colorama import init # Цветная консоль
TOKEN = config.TOKEN
PREFIX = config.PREFIX
STATUS = config.STATUS
COLOR_ERROR = config.COLOR_ERROR
client = commands.Bot(command_prefix=PREFIX)
client.remove_command('help')
init()
# Запуск Бота
@client.event
async def on_ready():
print(" ")
print(Fore.CYAN + "===================================" + Style.RESET_ALL)
print(
Fore.CYAN + '|' + Style.RESET_ALL + f' Смена статуса на стандартный... ' + Fore.CYAN + '|' + Style.RESET_ALL)
await client.change_presence(activity=discord.Game(name=STATUS))
print(
Fore.CYAN + '|' + Style.RESET_ALL + f' Бот активирован! ' + Fore.CYAN + '|' + Style.RESET_ALL)
print(Fore.CYAN + "===================================" + Style.RESET_ALL)
print(f' Статус - {STATUS} ')
print(f' Имя бота - {client.user.name}')
print(f' ID бота - {client.user.id} ')
print(Fore.CYAN + "===================================" + Style.RESET_ALL)
print(" ")
@client.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
return # await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, Команда не найдена!', color=COLOR_ERROR))
elif isinstance(error, discord.Forbidden):
return await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, У бота недостаточно прав!\n'
f'❗️ Если это не модераторская команда: то значит у бота нету права управлением сообщениями или права на установку реакций.',
color=COLOR_ERROR))
elif isinstance(error, commands.MissingPermissions) or isinstance(error, discord.Forbidden):
return await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, У вас недостаточно прав!', color=COLOR_ERROR))
elif isinstance(error, commands.BadArgument):
if "Member" in str(error):
return await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, Пользователь не найден!', color=COLOR_ERROR))
if "Guild" in str(error):
return await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, Сервер не найден!', color=COLOR_ERROR))
else:
return await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, Введён неверный аргумент!', color=COLOR_ERROR))
elif isinstance(error, commands.MissingRequiredArgument):
return await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, Пропущен аргумент с названием {error.param.name}!', color=COLOR_ERROR))
elif isinstance(error, commands.CommandOnCooldown):
await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, Воу, Воу, Не надо так быстро прописывать команды.\n'
f'❗️ Подожди {error.retry_after:.2f} секунд и сможешь написать команду ещё раз.'))
else:
if "ValueError: invalid literal for int()" in str(error):
return await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, Укажите число а не строку!', color=COLOR_ERROR))
else:
print(Fore.RED + f"[ERROR] " + Style.RESET_ALL + f"Команда: {ctx.message.content}")
print(Fore.RED + f"[ERROR] " + Style.RESET_ALL + f"Сервер: {ctx.message.guild}")
print(Fore.RED + f"[ERROR] " + Style.RESET_ALL + f"Ошибка: {error}")
await ctx.send(embed=discord.Embed(description=f'❗️ {ctx.author.name}, \n**`ERROR:`** {error}', color=COLOR_ERROR))
raise error
@client.event
async def on_message(message):
if not message.author.bot and message.author.roles:
if message.guild:
mute_role = discord.utils.get(message.guild.roles, name="Muted")
if mute_role in message.author.roles:
await message.delete()
else:
await client.process_commands(message)
else:
try:
await message.author.send("Не пиши мне в лс <:blya_placat_budu:707670077926932520>")
except Exception:
pass
for filename in os.listdir('./Modules'):
if filename.endswith('.py'):
client.load_extension(f'Modules.{filename[:-3]}')
print(Fore.YELLOW + "[F-COMM] " + Style.RESET_ALL + f"Загружен модуль - {filename[:-3]}")
client.run(TOKEN)
| StarcoderdataPython |
1954275 | # -*- coding: utf-8 -*-
"""
ImageDataExtractor
Microscopy image quantification.
<EMAIL>
~~~~~~~~~~~~~~~
"""
import logging
from .extract import *
from .figsplit import figsplit
__title__ = 'ImageDataExtractor'
__version__ = '2.0.0'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = 'MIT License'
logging.basicConfig(level=logging.INFO, format='%(asctime)s : %(levelname)s : %(message)s')
| StarcoderdataPython |
9731169 | import imageio
import os
images = []
i = 0
for filename in os.listdir('./frames'):
# print(filename)
if i >= 700:
break
images.append(imageio.imread('./frames/frame{}.png'.format(i+300)))
i += 1
imageio.mimsave('../assets/example_breakout.gif', images)
| StarcoderdataPython |
8053010 | <filename>dfibers/examples/lorenz.py<gh_stars>1-10
"""
Fiber-based fixed point location in the Lorenz system
f(v)[0] = s*(v[1]-v[0])
f(v)[1] = r*v[0] - v[1] - v[0]*v[2]
f(v)[2] = v[0]*v[1] - b*v[2]
Reference:
http://www.emba.uvm.edu/~jxyang/teaching/Math266notes13.pdf
https://en.wikipedia.org/wiki/Lorenz_system
"""
import numpy as np
import matplotlib.pyplot as pt
import scipy.integrate as si
import dfibers.traversal as tv
import dfibers.numerical_utilities as nu
import dfibers.fixed_points as fx
import dfibers.solvers as sv
from mpl_toolkits.mplot3d import Axes3D
N = 3
s, b, r = 10, 8./3., 28
def f(v):
return np.array([
s*(v[1,:]-v[0,:]),
r*v[0,:] - v[1,:] - v[0,:]*v[2,:],
v[0,:]*v[1,:] - b*v[2,:],
])
def ef(v):
return 0.001*np.ones((N,1))
def Df(v):
Dfv = np.empty((v.shape[1],3,3))
Dfv[:,0,0], Dfv[:,0,1], Dfv[:,0,2] = -s, s, 0
Dfv[:,1,0], Dfv[:,1,1], Dfv[:,1,2] = r - v[2], -1, -v[0]
Dfv[:,2,0], Dfv[:,2,1], Dfv[:,2,2] = v[1], v[0], -b
return Dfv
if __name__ == "__main__":
# Collect attractor points
t = np.arange(0,40,0.01)
v = np.ones((N,1))
A = si.odeint(lambda v, t: f(v.reshape((N,1))).flatten(), v.flatten(), t).T
# Set up fiber arguments
v = np.zeros((N,1))
# c = np.random.randn(N,1)
c = np.array([[0.83736021, -1.87848114, 0.43935044]]).T
fiber_kwargs = {
"f": f,
"ef": ef,
"Df": Df,
"compute_step_amount": lambda trace: (0.1, 0, False),
"v": v,
"c": c,
"terminate": lambda trace: (np.fabs(trace.x[:N,:]) > 50).any(),
"max_step_size": 1,
"max_traverse_steps": 2000,
"max_solve_iterations": 2**5,
}
print("using c:")
print(c.T)
# Visualize strange attractor
ax = pt.gca(projection="3d")
ax.plot(*A, color='gray', linestyle='-', alpha=0.5)
br1 = np.sqrt(b*(r-1))
U = np.array([[0, 0, 0],[br1,br1,r-1],[-br1,-br1,r-1]]).T
ax.scatter(*U, color='black')
# Run and visualize fiber components, for each fxpt
xlims, ylims, zlims = [-20,20], [-30,30], [-20,60]
for fc in [0,2]:
# start from current fxpt
fiber_kwargs["v"] = U[:,[fc]]
# ax.text(U[0,fc],U[1,fc],U[2,fc], str(fc))
# Run in one direction
solution = sv.fiber_solver(**fiber_kwargs)
V1 = np.concatenate(solution["Fiber trace"].points, axis=1)[:N,:]
z = solution["Fiber trace"].z_initial
# Run in other direction (negate initial tangent)
fiber_kwargs["z"] = -z
solution = sv.fiber_solver(**fiber_kwargs)
V2 = np.concatenate(solution["Fiber trace"].points, axis=1)[:N,:]
# Join fiber segments, restrict to figure limits
V = np.concatenate((np.fliplr(V1), V2), axis=1)
V = V[:,::50]
for i, (lo, hi) in enumerate([xlims, ylims, zlims]):
V = V[:,(lo < V[i,:]) & (V[i,:] < hi)]
C = f(V)
# Visualize fiber
ax.plot(*V, color='black', linestyle='-')
ax.quiver(*np.concatenate((V,.1*C),axis=0),color='black')
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(elev=15,azim=145)
pt.tight_layout()
pt.show()
| StarcoderdataPython |
11214870 | import numpy
import pandas
from phipkit.score import compute_scores
def test_basic():
counts_df = pandas.DataFrame(index=["clone_%d" % i for i in range(1000)])
beads_lambda = numpy.random.randint(0, 10, len(counts_df))**2
for i in range(8):
counts_df["beads_%d" % i] = numpy.random.poisson(
beads_lambda, len(counts_df))
counts_df["beads_1"] *= 1000
counts_df.loc["clone_2", "beads_2"] = 1e6
counts_df.loc["clone_2", "beads_3"] = 1e6
counts_df["beads_uninformative_0"] = 0
counts_df["sample_a"] = numpy.random.poisson(
beads_lambda * 5, len(counts_df))
counts_df["sample_b"] = numpy.random.poisson(
beads_lambda * 0.5, len(counts_df))
counts_df.loc["clone_0", "sample_a"] = 1000
counts_df.loc["clone_1", "sample_b"] = 1000
counts_df.loc["clone_2", "sample_b"] = 1e9
counts_df.loc["clone_bogus"] = 1e10
print("COUNTS")
print(counts_df)
results = compute_scores(counts_df=counts_df)
print("RESULTS")
print(results)
for col in results:
if col.startswith("sample_"):
print("Sorted values for %s" % col)
sort_series = results[col].sort_values(ascending=False)
print(sort_series)
print("Counts:")
print(counts_df.loc[sort_series.head(5).index])
max_scores = results.idxmax()
assert max_scores["sample_a"] == "clone_0"
assert max_scores["sample_b"] == "clone_1" | StarcoderdataPython |
9693397 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import User, Category, Item, Base
engine = create_engine('sqlite:///catalog.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create Users
user1 = User(name="<NAME>", email="<EMAIL>", picture="https://upload.wikimedia.org/wikipedia/en/thumb/3/3c/Wile_E._Coyote.svg/200px-Wile_E._Coyote.svg.png")
session.add(user1)
session.commit()
# Category 1: Transport category
category1 = Category(user_id=1, name="Transport")
session.add(category1)
session.commit()
item1 = Item(user_id=1, name="Jet Bike Kit", description="Like a motorcycle, but without the wheels. Travels up to 200mph.",
category=category1)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Rocket Sled", description="Powered by a spare Saturn-era rocket engine, this sled can accelerate to 300 mph in 5 seconds.",
category=category1)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Foot Springs", description="Attach a pair to your feet and put some spring in your step.",
category=category1)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Roller Skis", description="Now you can ski wherever you go! Not just on the snow, but everywhere!",
category=category1)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Jet-Propelled Pogo-Stick", description="Like a pogo-stick, this one has jet-rocket attachments so that you'll get a much quicker result! Just watch where you're going though.",
category=category1)
session.add(item5)
session.commit()
# Category 2: Pest Removal category
category2 = Category(user_id=1, name="Pest Removal")
session.add(category2)
session.commit()
item1 = Item(user_id=1, name="Giant Mouse Trap", description="Traps anything, especially Giant Rats!",
category=category2)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Super Bug Spray", description="Guaranteed to work. They won't be back!",
category=category2)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Giant Fly Paper", description="Traps anything, especially Giant Flies!",
category=category2)
session.add(item3)
session.commit()
# Category 3: Infrastructure Creation category
category3 = Category(user_id=1, name="Infrastructure Creation")
session.add(category3)
session.commit()
item1 = Item(user_id=1, name="Instant Road", description="Lets you roll out an instant road with ease as it makes a good detour trick. Provided that you watch where you're rolling.",
category=category3)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Artificial Rock", description="Lets you become like a real rock out in the open.",
category=category3)
session.add(item2)
session.commit()
# Category 4: Defense category
category4 = Category(user_id=1, name="Defense")
session.add(category4)
session.commit()
item1 = Item(user_id=1, name="Triple-Strength Battleship Steel Armor Plate", description="Usually it would fend off most objects, but not Road Runners.",
category=category4)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Boom-Erang", description="Like a regular boomerang but better. Guaranteed to return.",
category=category4)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Indestructo Steel Ball", description="Lets you roll in a ball that's literally indestructable.",
category=category4)
session.add(item3)
session.commit()
# Category 5: Disguise category
category5 = Category(user_id=1, name="Disguise")
session.add(category5)
session.commit()
item1 = Item(user_id=1, name="<NAME>", description="Disguise yourself as a cactus and scare others.",
category=category5)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Artificial Rock", description="Lets you become like a real rock out in the open.",
category=category5)
session.add(item2)
session.commit()
# Category 6: Super Nutrition category
category6 = Category(user_id=1, name="Super Nutrition")
session.add(category6)
session.commit()
item1 = Item(user_id=1, name="Super Speed Vitamins", description="Lets you run fast. Super fast.",
category=category6)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Earthquake Pills", description="Why wait? Make your own earthquakes. Loads of fun!",
category=category6)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Hi-Speed Tonic", description="Contains vitamins R P + M",
category=category6)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Triple-Strength Fortified Leg Muscle Vitamins", description="Gives your legs the vitamins it needs to run faster than ever before!",
category=category6)
session.add(item4)
session.commit()
# Category 7: Just Add Water category
category7 = Category(user_id=1, name="Just Add Water")
session.add(category7)
session.commit()
item1 = Item(user_id=1, name="Tornado Seeds", description="Part of the tornado kit. Contains one thousand seeds.",
category=category7)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Dehydrated Boulders", description="Makes instant boulders with just a drop of water.",
category=category7)
session.add(item2)
session.commit()
print "Acme catalog successfully populated!!!"
| StarcoderdataPython |
4847527 | from data import get_csv
from collections import deque
from math import exp, log
from datetime import datetime
from statistics import median
def parse():
from template import by_block, by_month
#setup output list with keys as first element
block_output = [[i] for i in list(by_block.keys())]
month_output = [[i] for i in list(by_month.keys())]
#import data from csv
blocks = get_csv("blocks")[::-1]
price = get_csv("price")[::-1]
reddit = get_csv("reddit")[::-1]
price_btc = get_csv("price_btc")[::-1]
price_xmr = get_csv("price_xmr")[::-1]
by_month['timestamp'] = blocks[-1][0]
#initiate price
p = price.pop()
p_btc = price_btc.pop()
p_xmr = price_xmr.pop()
#stacks for fast processing
day = deque([])
month = deque([])
year = deque([])
nonces = deque([])
#nonce uniformity constants
#there are 2160 nonces and 2160 bins
max_nonce=2**32
bin_size = int((max_nonce)/(360*3))
#we count how many nonces are in each bin
bin_count = [0 for i in range((2**32)//(bin_size)+1)]
#the number of bins with exactly 1 nonce should be about 1/e
singleton_bins = 0
locked_txs=[]
#block reward over 1y
reward_1y = 0
block_count = 0
while blocks:
#block data
x = blocks.pop()
if blocks and len(blocks)%int(blocks[0][1]/100)==0:
print(int(x[1]))
#locked_txs
if len(x[10])>0:
locked_txs = locked_txs + [list(y) for y in x[10]]
by_block['supply_locked'] += sum([y[0] for y in x[10]])
new_locked_txs = []
while locked_txs:
tx = locked_txs.pop()
if tx[1] > 500000000 and tx[1] < x[0]:
by_block['supply_locked'] -= tx[0]
elif tx[1] == 1:
by_block['supply_locked'] -= tx[0]
elif tx[1]< by_block['timestamp']:
tx[1] = int(tx[1] - 1)
new_locked_txs.append(tx)
else:
new_locked_txs.append(tx)
locked_txs=new_locked_txs
#no-calculation data
by_block['timestamp'] = x[0]
by_block['block_height'] = int(x[1])
by_block['version'] = str(int(x[8]))+"."+str(int(x[9]))
by_block['supply_total'] += x[3]
by_block['supply_circulating'] = by_block['supply_total']-by_block['supply_locked']
by_block['block_size_cum'] += x[6]
by_block['block_difficulty'] = x[2]
by_block['block_reward'] = x[3]
day.append(x[0])
by_block['block_count_24h'] += 1
while day[0] < x[0] - 24*60*60:
head = day.popleft()
by_block['block_count_24h'] -= 1
#price
while p[0] < x[0] and price:
p = price.pop()
by_block['price'] = p[1]
if(by_block['price'] != None):
by_block['marketcap']=by_block['supply_total']*by_block['price']
if(by_month['miner_revenue'] == None):
by_month['miner_revenue'] = 0
by_month['miner_revenue'] += (x[3]+x[5])*by_block['price']
else:
by_month['miner_revenue'] = None
#btc_price
while p_btc[0] < x[0] and price_btc:
p_btc = price_btc.pop()
by_block['price_btc'] = p_btc[1]
if(by_block['price_btc'] != None):
by_block['marketcap_btc']=by_block['supply_total']*by_block['price_btc']
#xmr_price
while p_xmr[0] < x[0] and price_xmr:
p_xmr = price_xmr.pop()
by_block['price_xmr'] = p_xmr[1]
if( by_block['price_xmr'] != None):
by_block['marketcap_xmr']=by_block['supply_total']*by_block['price_xmr']
by_month['fee'] += x[5]
by_month['tx'] += x[4]
by_month['volume'] += x[13]/(1.0*10**12)
by_month['block_size'] += x[6]
by_month['inputs'] += x[11]
by_month['outputs'] += x[12]
by_month['outputs_inputs'] += (x[12]-x[11])
#reddit
while reddit and reddit[-1][0] < x[0] :
submission = reddit.pop()
by_month['reddit_posts'] += 1
by_month['reddit_comments'] += submission[1]
#inflation 1y %
year.append([x[0],x[3]])
reward_1y += x[3]
while year[0][0] < x[0] - 365*24*60*60:
amt = year.popleft()
reward_1y -= amt[1]
by_block['inflation_1Y'] = reward_1y / by_block['supply_total']
#nonce uniformity
#calculate which bin the nonce belongs in like a histogram
if by_block['block_height']>=1146200:
#nonce uniformity constants
#there are 2160 nonces and 2160 bins
max_nonce = 2**64
bin_size = int((max_nonce)/(360*3))
nonce = int(x[7])//bin_size
#add to memory
nonces.append(nonce)
#put nonce into bin
bin_count[nonce] += 1
#recalculate bins with exactly one
if bin_count[nonce] == 1:
singleton_bins += 1
elif bin_count[nonce] == 2:
singleton_bins -= 1
if len(nonces)> 360*3:
#remove nonce and recalculate
nonce = nonces.popleft()
bin_count[nonce] -= 1
if bin_count[nonce] == 1:
singleton_bins += 1
elif bin_count[nonce] == 0:
singleton_bins -= 1
#assuming the nonces are uniformly random, singleton_bins / 2160 == 1 / e
#thus e * singleton_bins / 2160 == 1
by_block['nonce_dist'] = (exp(1)*singleton_bins/(360*3))
by_block['nonce'] = int(x[7])/max_nonce
for i in range(len(block_output)):
block_output[i].append(list(by_block.values())[i])
# by month
if datetime.fromtimestamp(x[0]).strftime('%B') != datetime.fromtimestamp(by_month['timestamp']).strftime('%B') or len(blocks)==0:
if(by_month['tx']>0):
by_month['tx_avg']=by_month['volume']/by_month['tx']
by_month['fee']=by_month['fee']/by_month['tx']
for i in range(len(month_output)):
month_output[i].append(list(by_month.values())[i])
by_month = {
'timestamp' : x[0],
'block_size' : 0,
'fee' : 0,
'fee_usd' : 0,
'inputs' : 0,
'miner_revenue' : 0,
'outputs' : 0,
'outputs_inputs' : 0,
'reddit_posts' : 0,
'reddit_comments' : 0,
'tx' : 0,
'tx_avg' : 0,
'volume' : 0,
}
else:
by_month['timestamp'] = x[0]
return block_output,month_output
| StarcoderdataPython |
6612080 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 29 23:07:14 2021
@author: bartelsaa
"""
import re, os, io
from glob import glob
import pandas as pd
import sys
import pytz
# from sqlite3 import Error
from models import Sensor
from datetime import datetime, timezone
import pathlib
from maad.rois import (select_rois, create_mask)
from maad.features import (centroid_features)
from maad.sound import (load, resample, spectrogram, remove_background, median_equalizer,
remove_background_morpho, remove_background_along_axis,
sharpness, spectral_snr, trim, write, smooth)
from maad.util import (power2dB, plot2d, dB2power,format_features, overlay_rois,
overlay_centroid, crop_image)
# setup django settings and configs
import django
# Setup django env and add to models
sys.path.append("/app")
os.environ['DJANGO_SETTINGS_MODULE'] = 'pams.settings'
django.setup()
from django.utils.timezone import make_aware
from django import db
print(db.connections.databases)
print("DB NAME ")
from pams.models.audio_clip import AudioClip
output_folder = "data/processed_data/"
pd.set_option('display.max_columns', None)
print("STARTING INSERT")
from audio_processing.utils import things_found
import sqlite3
# from pams.
boat_hits = things_found.boat
critter_hits = things_found.critter
background = things_found.background
def __temp_insert_labels_to_db():
boat_hits_keys = list(map(lambda e: (e, "boat"), boat_hits))
critter_hits_keys = list(map(lambda e: (e, "critter"), critter_hits))
background_keys = list(map(lambda e: (e, "background"), background))
# output_folder = "data/processed_data/"
for idd,e in boat_hits_keys:
print('updating key {} to value {}'.format(idd, e))
temp_last_row = AudioClip.objects.get(id=idd)
temp_last_row.label = e
temp_output_folder = os.path.join(output_folder, str(e), "")
# print("temp_output_folder {}".format(temp_output_folder))
clip_path = pathlib.Path(os.path.join(temp_output_folder, str(idd), ""))
# print(clip_path)
# print(os.path.join(temp_output_folder, str(idd), ""))
clip_path.mkdir(parents=True, exist_ok=True)
print(clip_path)
chip_file, fs = load(os.path.join(temp_last_row.audio_path, 'chip.wav'))
# print(os.path.join(clip_path, 'chip.wav'))
write(os.path.join(clip_path, 'chip.wav'), 8000, chip_file) # "chip.wav"
for idd,e in critter_hits_keys:
print('updating key {} to value {}'.format(idd, e))
temp_last_row = AudioClip.objects.get(id=idd)
temp_last_row.label = e
temp_output_folder = os.path.join(output_folder, str(e), "")
print("temp_output_folder {}".format(temp_output_folder))
clip_path = pathlib.Path(os.path.join(temp_output_folder, str(idd)))
clip_path.mkdir(parents=True, exist_ok=True)
chip_file, fs = load(os.path.join(temp_last_row.audio_path, 'chip.wav'))
print(chip_file)
write(os.path.join(clip_path, 'chip.wav'), 8000, chip_file) # "chip.wav"
for idd,e in background_keys:
print('updating key {} to value {}'.format(idd, e))
temp_last_row = AudioClip.objects.get(id=idd)
temp_last_row.label = e
temp_output_folder = os.path.join(output_folder, str(e), "")
print("temp_output_folder {}".format(temp_output_folder))
clip_path = pathlib.Path(os.path.join(temp_output_folder, str(idd)))
clip_path.mkdir(parents=True, exist_ok=True)
chip_file, fs = load(os.path.join(temp_last_row.audio_path, 'chip.wav'))
print(chip_file)
write(os.path.join(clip_path, 'chip.wav'), 8000, chip_file) # "chip.wav"
return None
if __name__ == "__main__":
__temp_insert_labels_to_db()
| StarcoderdataPython |
37073 | Scale.default = Scale.chromatic
Root.default = 0
Clock.bpm = 120
var.ch = var(P[1,5,0,3],8)
~p1 >> play('m', amp=.8, dur=PDur(3,8), rate=[1,(1,2)])
~p2 >> play('-', amp=.5, dur=2, hpf=2000, hpr=linvar([.1,1],16), sample=1).often('stutter', 4, dur=3).every(8, 'sample.offadd', 1)
~p3 >> play('{ ppP[pP][Pp]}', amp=.8, dur=.5, sample=PRand(7), rate=PRand([.5,1,2]))
~p4 >> play('V', amp=.8, dur=1)
~p5 >> play('#', amp=1.2, dur=16, drive=.1, chop=128, formant=1)
~s1 >> glass(var.ch+(0,5,12), amp=1, dur=8, coarse=8)
~s2 >> piano(var.ch+(0,[5,5,3,7],12), amp=1, dur=8, delay=(0,.25,.5))
Group(p1, p2, p3).stop()
p4.lpf = linvar([4000,10],[32,0])
p4.stop()
s2.stop()
~s3 >> saw(var.ch+PWalk(), amp=PRand([0,.8])[:24], dur=PDur(3,8), scale=Scale.minor, oct=PRand([4,5,6])[:32], drive=.05, room=1, mix=.5).spread()
~s3 >> saw(var.ch+PWalk(), amp=PRand([0,.8])[:20], dur=PDur(5,8), scale=Scale.minor, oct=PRand([4,5,6])[:32], drive=.05, room=1, mix=.5).spread()
~s3 >> saw(var.ch+PWalk(), amp=PRand([0,.8])[:64], dur=.25, scale=Scale.minor, oct=PRand([4,5,6])[:32], drive=.05, room=1, mix=.5).spread()
~p4 >> play('V', amp=.5, dur=1, room=1, lpf=1200).every(7, 'stutter', cycle=16)
~p6 >> play('n', amp=.5, dur=1, delay=.5, room=1, hpf=linvar([2000,4000],16), hpr=.1)
s1.oct = 4
s1.formant = 1
~p3 >> play('{ ppP[pP][Pp]}', amp=.5, dur=.5, sample=PRand(7), rate=PRand([.5,1,2]), room=1, mix=.25)
Group(p6, s3).stop()
~s2 >> piano(var.ch+([12,0],[5,5,3,7],[0,12]), amp=1, dur=8, delay=(0,.25,.5), room=1, mix=.5, drive=.05, chop=32, echo=[1,2,1,4])
Group(p3, s1).stop()
Clock.clear()
| StarcoderdataPython |
9771903 | import pandas as pd
import numpy as np
import os
import json
import requests
from bs4 import BeautifulSoup
from io import StringIO
# def get_current_players():
# rootdir = '../resources/players/'
# player_names = []
# for subdir, dirs, files in os.walk(rootdir):
# for file in files:
# data_path = os.path.join(subdir)
# name = data_path.replace(rootdir, "")
# player_names.append(name)
#
# filename = "../resources/scraped_players.csv"
# # opening the file with w+ mode truncates the file
# f = open(filename, "w+")
# f.close()
# for path, subdirs, files in os.walk(rootdir):
# for name in files:
# if name == 'gw.csv':
# trainFile = os.path.join(path, name)
# pwd = os.getcwd()
# os.chdir(os.path.dirname(trainFile))
# df = pd.read_csv(os.path.basename(trainFile), sep=',', skiprows=[0], header=None, encoding='utf-8')
# os.chdir(pwd)
# with open(filename, 'a') as f:
# df.to_csv(f, header=False)
#
# def merge_ids():
# get_current_players()
# player_df = pd.read_csv('../resources/scraped_players.csv', sep=',', encoding='utf-8', header=None)
# id_file = '../resources/player_idlist.csv'
# ids = pd.read_csv(id_file, sep=',', encoding='utf-8')
#
# player_df['season'] = '2017/2018'
# player_df.columns = ['round', 'assists', 'attempted_passes', 'big_chances_created',
# 'big_chances_missed', 'bonus', 'bps', 'clean_sheets',
# 'clearances_blocks_interceptions', 'completed_passes', 'creativity',
# 'dribbles', 'ea_index', 'element', 'errors_leading_to_goal',
# 'errors_leading_to_goal_attempt', 'fixture', 'fouls', 'goals_conceded',
# 'goals_scored', 'ict_index', 'id', 'influence', 'key_passes',
# 'kickoff_time', 'kickoff_time_formatted', 'loaned_in', 'loaned_out',
# 'minutes', 'offside', 'open_play_crosses', 'opponent_team', 'own_goals',
# 'penalties_conceded', 'penalties_missed', 'penalties_saved',
# 'recoveries', 'red_cards', 'round', 'saves', 'selected', 'tackled',
# 'tackles', 'target_missed', 'team_a_score', 'team_h_score', 'threat',
# 'total_points', 'transfers_balance', 'transfers_in', 'transfers_out',
# 'value', 'was_home', 'winning_goals', 'yellow_cards', 'season']
# player_df.drop(['id'], axis=1, inplace=True)
# player_df.rename(columns={'element': 'id'}, inplace=True)
#
# players = player_df.merge(ids, how='left', on=['id'])
# players.to_csv('../resources/BaseData2017-18.csv', sep=',', encoding='utf-8')
#
# def team_data():
# merge_ids()
# raw_file = '../resources/players_raw.csv'
# players_raw = pd.read_csv(raw_file, sep=',', encoding='utf-8')
# teams = '../resources/team_codes.csv'
# team_codes = pd.read_csv(teams, sep=',', encoding='utf-8')
# team_codes.rename(columns={'team_code': 'team'}, inplace=True)
# all_teams = players_raw.merge(team_codes, how='left', on=['team'])
# new = all_teams[['first_name', 'second_name', 'team', 'team_name']].copy()
#
# cuurent_players_file = '../resources/BaseData2017-18.csv'
# current_players = pd.read_csv(cuurent_players_file, sep=',', encoding='utf-8')
#
# merged_players = current_players.merge(new, how='left', on=['first_name', 'second_name'])
#
# opponent_team_codes = team_codes.copy()
# opponent_team_codes.rename(columns={'team': 'opponent_team'}, inplace=True)
# data = merged_players.merge(opponent_team_codes, how='left', on=['opponent_team'])
# data.rename(columns={'team_name_x': 'team_name', 'team_name_y': 'opponent_team_name'}, inplace=True)
# data.drop(['Unnamed: 0', 'winning_goals'], axis=1, inplace=True)
# data.to_csv('../resources/BeforeCreatedFeatures2017-18.csv', sep=',', encoding='utf-8')
def merge_league_ranks():
# team_data()
CurrentPlayers = pd.read_csv('../resources/BeforeCreatedFeatures2017-18.csv', sep=',', encoding='utf-8')
CurrentPlayers.drop(['Unnamed: 0', 'team', 'attempted_passes', 'big_chances_missed', 'bps', 'big_chances_created',
'clearances_blocks_interceptions', 'completed_passes', 'dribbles', 'round',
'errors_leading_to_goal', 'errors_leading_to_goal_attempt', 'fouls',
'kickoff_time', 'kickoff_time_formatted', 'loaned_in', 'loaned_out', 'offside',
'open_play_crosses','own_goals', 'penalties_conceded', 'penalties_missed', 'penalties_saved',
'recoveries', 'red_cards', 'selected', 'tackled', 'tackles', 'target_missed',
'transfers_balance', 'transfers_in', 'transfers_out', 'yellow_cards', 'ea_index'],
axis=1, inplace=True)
CurrentPlayers.rename(columns={'team_name': 'team', 'opponent_team_name': 'opponents', 'second_name': 'name',
'round.1':'round'}, inplace=True)
CurrentPlayers.replace(['Bournmouth', 'Brighton', 'Huddersfield'], ['AFC Bournemouth', 'Brighton and Hove Albion',
'Huddersfield Town'], inplace=True)
a = 0
b = 1
c = 2
df_list = []
for i in range(1, 29):
url = "https://footballapi.pulselive.com/football/standings?compSeasons=79&altIds=true&detail=2&FOOTBALL_COMPETITION=1&gameweekNumbers=1-" + str(
i)
r = requests.get(url, headers={"Content-Type": "application/x-www-form-urlencoded", "Connection": "keep-alive",
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br", "Accept-Language": "en-US, en; q=0.9",
"Host": "footballapi.pulselive.com", "Origin": "https://www.premierleague.com",
"Referer": "https://www.premierleague.com/tables?co=1&se=79&ha=-1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
})
data = json.loads(r.text)
for x in data['tables']:
y = x['entries']
for j in range(0, 20):
rank_data = y[j]
position = rank_data["position"]
team = rank_data['team']
team_name = team['name']
df = pd.DataFrame({'gameweek': i, 'position': position, 'name': team_name}, index=[a, b, c])
df_list.append(df)
a = a + 1
b = b + 1
c = c + 1
result = pd.concat(df_list)
result = result.drop_duplicates()
result.rename(columns={'gameweek': 'round'}, inplace=True)
result.rename(columns={'name': 'team'}, inplace=True)
df = pd.merge(CurrentPlayers, result, how='left', left_on=['round', 'team'], right_on=['round', 'team'])
opponent_ranks = result.rename(columns={'team': 'opponents', 'position': 'opponent_position'})
merged = pd.merge(df, opponent_ranks, how='left', left_on=['round', 'opponents'], right_on=['round', 'opponents'])
merged = merged.dropna()
merged.to_csv('../resources/league_ranks_joined_with_opp.csv', sep=',', encoding='utf-8')
def create_features():
# merge_league_ranks()
# merged = pd.read_csv('../resources/league_ranks_joined_with_opp.csv', sep=',', index_col=0, encoding='utf-8')
# merged['team_goals'] = 0
# merged['opposition_goals'] = 0
# team_array = sorted(merged.team.unique())
# for current_team in team_array:
# for index, row in merged.iterrows():
# if row.team == current_team:
# if row.was_home == True:
# merged.loc[index, 'team_goals'] = row.team_h_score
# merged.loc[index, 'opposition_goals'] = row.team_a_score
# else:
# merged.loc[index, 'team_goals'] = row.team_a_score
# merged.loc[index, 'opposition_goals'] = row.team_h_score
#
# merged.drop(['team_h_score', 'team_a_score'], axis=1, inplace=True)
# merged.to_csv('../resources/team_goals.csv', sep=',', encoding='utf-8')
#
df_array = []
team_counter = 1
merged = pd.read_csv('../resources/team_goals.csv', sep=',', index_col=0, encoding='utf-8')
team_array = sorted(merged.team.unique())
for current_team in team_array:
tempDF = merged.loc[merged['team'] == current_team]
home_count = 0
home_goals = 0
away_count = 0
away_goals = 0
tempDF2 = tempDF.copy(deep=True)
tempDF2.drop_duplicates(subset='round', inplace=True)
tempDF2.sort_values('round', ascending=True, inplace=True)
for index, row in tempDF2.iterrows():
if row.was_home == True:
home_count += 1
home_goals += row.team_goals
tempDF2.loc[index, 'team_pot'] = (home_goals / home_count)
else:
away_count += 1
away_goals += row.team_goals
tempDF2.loc[index, 'team_pot'] = (away_goals / away_count)
keys = tempDF2['round']
values = tempDF2['team_pot']
dictionary = dict(zip(keys, values))
for index, row in tempDF.iterrows():
if row.team == current_team:
for key, value in dictionary.items():
if key == row['round']:
tempDF.loc[index, 'team_pot'] = value
home_count = 0
home_goals = 0
away_count = 0
away_goals = 0
for index, row in tempDF2.iterrows():
if row.was_home == True:
home_count += 1
home_goals += row.opposition_goals
tempDF2.loc[index, 'concede_pot'] = (home_goals / home_count)
else:
away_count += 1
away_goals += row.opposition_goals
tempDF2.loc[index, 'concede_pot'] = (away_goals / away_count)
keys = tempDF2['round']
values = tempDF2['concede_pot']
dictionary = dict(zip(keys, values))
for index, row in tempDF.iterrows():
for key, value in dictionary.items():
if tempDF.loc[index, 'round'] == key:
tempDF.loc[index, 'concede_pot'] = value
globals()['team{}'.format(team_counter)] = tempDF
df_array.append(globals()['team{}'.format(team_counter)])
team_counter += 1
print(current_team)
Players = pd.concat(df_array)
Players.drop_duplicates(subset=['id', 'round'], inplace=True)
Players.sort_values(['id', 'round'], ascending=True, inplace=True)
team_pots = []
concede_pots = []
for team in df_array:
team.drop_duplicates(subset='round', inplace=True)
team.sort_values('round', ascending=True, inplace=True)
keys = team['round']
values = team['team_pot']
dictionary = dict(zip(keys, values))
team_pots.append(dictionary)
for team in df_array:
team.drop_duplicates(subset='round', inplace=True)
team.sort_values('round', ascending=True, inplace=True)
keys = team['round']
values = team['concede_pot']
dictionary = dict(zip(keys, values))
concede_pots.append(dictionary)
for index, row in Players.iterrows():
for key, value in team_pots[0].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'AFC Bournemouth':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[0].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'AFC Bournemouth':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[1].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Arsenal':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[1].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Arsenal':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[2].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Brighton and Hove Albion':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[2].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Brighton and Hove Albion':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[3].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Burnley':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[3].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Burnley':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[4].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Chelsea':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[4].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Chelsea':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[5].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Crystal Palace':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[5].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Crystal Palace':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[6].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Everton':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[6].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Everton':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[7].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Huddersfield Town':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[7].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Huddersfield Town':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[8].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Leicester City':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[8].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Leicester City':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[9].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Liverpool':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[9].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Liverpool':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[10].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Manchester City':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[10].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Manchester City':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[11].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Manchester United':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[11].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Manchester United':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[12].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Newcastle United':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[12].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Newcastle United':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[13].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Southampton':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[13].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Southampton':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[14].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Stoke City':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[14].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Stoke City':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[15].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Swansea City':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[15].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Swansea City':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[16].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == '<NAME>':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[16].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == '<NAME>':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[17].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Watford':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[17].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'Watford':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[18].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'West Bromwich Albion':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[18].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'West Bromwich Albion':
Players.loc[index, 'opp_concede_pot'] = value
for index, row in Players.iterrows():
for key, value in team_pots[19].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'West Ham United':
Players.loc[index, 'opp_pot'] = value
for key, value in concede_pots[19].items():
if Players.loc[index, 'round'] == key:
if Players.loc[index, 'opponents'] == 'West Ham United':
Players.loc[index, 'opp_concede_pot'] = value
# Players.to_csv('../resources/team_pots.csv', sep=',', encoding='utf-8')
# Players = pd.read_csv('../resources/team_pots.csv', sep=',', index_col=0, encoding='utf-8')
Players = Players[Players['minutes'] > 0]
Players['form_points'] = Players.groupby('id')['total_points'].apply(
lambda x: x.rolling(center=False, window=3).mean())
Players = Players.fillna(0)
Players['ict_form'] = Players.groupby('id')['ict_index'].apply(
lambda x: x.rolling(center=False, window=3).mean())
Players = Players.fillna(0)
Players['save_form'] = Players.groupby('id')['saves'].apply(
lambda x: x.rolling(center=False, window=3).mean())
Players = Players.fillna(0)
Players['long_form'] = Players.groupby('id')['total_points'].apply(
lambda x: x.rolling(center=False, window=5).mean())
Players = Players.fillna(0)
temp = Players.copy()
playerIDs = sorted(Players.id.unique())
for player in playerIDs:
for i, row in temp.iterrows():
if temp.loc[i, 'id'] == player:
temp['prev_points'] = temp.groupby('id')['total_points'].shift()
temp = temp.fillna(0)
temp.to_csv('../resources/Dfeatures.csv', sep=',', encoding='utf-8')
create_features()
| StarcoderdataPython |
4864858 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class UpResBlock(nn.Module):
def __init__(self, ch):
super(UpResBlock, self).__init__()
self.c0 = nn.Conv2d(ch, ch, 3, 1, 1)
nn.init.normal_(self.c0.weight, 0.02)
self.c1 = nn.Conv2d(ch, ch, 3, 1, 1)
nn.init.normal_(self.c1.weight, 0.02)
self.cs = nn.Conv2d(ch, ch, 3, 1, 1)
nn.init.normal_(self.cs.weight, 0.02)
self.bn0 = nn.BatchNorm2d(ch)
nn.init.constant_(self.bn0.weight, 1.0)
nn.init.constant_(self.bn0.bias, 0.0)
self.bn1 = nn.BatchNorm2d(ch)
nn.init.constant_(self.bn0.weight, 1.0)
nn.init.constant_(self.bn0.bias, 0.0)
@classmethod
def upsample(cls, x):
h, w = x.shape[2:]
return F.upsample(x, size=(h * 2, w * 2))
def forward(self, x):
h = self.c0(upsample(F.relu(self.bn0(x))))
h = self.c1(F.relu(self.bn1(h)))
hs = self.cs(upsample(x))
return h + hs
class ResNetGenerator(nn.Module):
def __init__(self, ch=64, dim_z=128, bottom_width=4):
super(ResNetGenerator, self).__init__()
self.bottom_width = bottom_width
self.dim_z = dim_z
self.ch = ch
self.l0 = nn.Linear(dim_z, (bottom_width ** 2) * ch * 4)
nn.init.normal_(self.l0.weight, math.sqrt(1.0 / dim_z))
self.r0 = UpResBlock(ch * 4)
self.r1 = UpResBlock(ch * 4)
self.r2 = UpResBlock(ch * 4)
self.bn2 = nn.BatchNorm2d(ch * 4)
nn.init.constant_(self.bn2.weight, 1.0)
nn.init.constant_(self.bn2.bias, 0.0)
self.c3 = nn.Conv2d(ch * 4, 3, 3, 1, 1)
nn.init.normal_(self.c3.weight, 0.02)
def forward(self, x):
h = F.relu(self.l0(x))
h = h.view(-1, self.ch * 4, self.bottom_width, self.bottom_width)
h = self.r0(h)
h = self.r1(h)
h = self.r2(h)
h = self.bn2(F.relu(h))
h = self.c3(h)
h = F.tanh(h)
return h
| StarcoderdataPython |
8052760 | import logging
import logging.config
from zmq.log.handlers import PUBHandler
class ProxyLogger(object):
formatter = logging.Formatter("%(asctime)s - %(name)-30s - %(levelname)-8s - %(message)s")
@classmethod
def init_proxy_logger(cls, config):
if config["logger_config"]:
# NOTE: If user specify the logging config file,
# used it to configure the logger behavior.
# Moreover, the disable_existing_loggers is necessary,
# since most of our code will get logger before we initialize it.
logging.config.fileConfig(config["logger_config"], disable_existing_loggers=False)
else:
# NOTE: Otherwise, we start setup the logger based on other configure value.
logger = logging.getLogger()
log_level = getattr(logging, config["log_level"].upper())
logger.setLevel(log_level)
if config["log_file"]:
cls.register_file_handler(config["log_file"])
else:
cls.register_stream_handler()
@classmethod
def register_zmq_handler(cls, zmq_socket): # pragma: no cover
handler = PUBHandler(zmq_socket)
handler.root_topic = "logger"
logger = logging.getLogger()
logger.addHandler(handler)
@classmethod
def register_file_handler(cls, filename): # pragma: no cover
fileHandler = logging.FileHandler(filename, encoding="utf8")
fileHandler.setFormatter(cls.formatter)
logger = logging.getLogger()
logger.addHandler(fileHandler)
@classmethod
def register_stream_handler(cls): # pragma: no cover
basicHandler = logging.StreamHandler()
basicHandler.setFormatter(cls.formatter)
logger = logging.getLogger()
logger.addHandler(basicHandler)
@classmethod
def get_logger(cls, name): # pragma: no cover
logger = logging.getLogger(name)
return logger
| StarcoderdataPython |
1907865 | <gh_stars>0
from injector import inject
from app.climates.models import Climate
from app.climates.repositories import ClimateRepository
from instance.resources.helpers import read_elements, climates_csv
class ClimatePopulationService:
@inject
def __init__(self, climate_repository: ClimateRepository):
self.climate_repository = climate_repository
def db_populate(self):
for climate in self.read_climates():
self.climate_repository.add(climate)
def read_climates(self):
return read_elements(climates_csv(), self.build_climate)
@staticmethod
def build_climate(row):
return Climate(name=row[0])
| StarcoderdataPython |
3416864 | <filename>array/baseball_game.py
def cal_points(ops):
score = []
for op in ops:
if op == '+':
score.append(score[-1] + score[-2])
elif op == 'D':
score.append(score[-1] * 2)
elif op == 'C':
score.pop()
else:
score.append(int(op))
return sum(score)
print(cal_points(["5", "2", "C", "D", "+"]))
print(cal_points(["5", "-2", "4", "C", "D", "9", "+", "+"]))
print(cal_points(["1"]))
| StarcoderdataPython |
241108 | <reponame>Z2PackDev/TBModels
#!/usr/bin/env python
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: <NAME> <<EMAIL>>
"""
Tests for the 'symmetrize' method.
"""
import copy
import pytest
import tbmodels
@pytest.fixture
def input_model(sample):
return tbmodels.io.load(sample("InAs_nosym.hdf5"))
@pytest.fixture
def symmetries(sample):
return tbmodels.io.load(sample("InAs_symmetries.hdf5"))
def test_symmetrize(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'symmetrize' method.
"""
model_res = input_model
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(sym.symmetries, full_group=sym.full_group)
else:
model_res = model_res.symmetrize([sym], full_group=False)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
models_close(model_res, model_reference)
def test_position_tolerance(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'position_tolerance' argument in the 'symmetrize' method.
"""
model_in = copy.deepcopy(input_model)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
model_in.pos[0] += 0.01
model_reference.pos[0] += 0.01
# First run without 'position_tolerance' argument - this should raise
with pytest.raises(tbmodels.exceptions.TbmodelsException):
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group
)
else:
model_res = model_res.symmetrize([sym], full_group=False)
# Adding the 'position_tolerance' argument suppresses the error
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group, position_tolerance=0.05
)
else:
model_res = model_res.symmetrize(
[sym], full_group=False, position_tolerance=0.05
)
models_close(model_res, model_reference)
| StarcoderdataPython |
8107716 | import torch
def gumbel_sigmoid(logits: torch.Tensor, tau: float = 1, hard: bool = False, eps: float = 1e-10) -> torch.Tensor:
uniform = logits.new_empty([2]+list(logits.shape)).uniform_(0,1)
noise = -((uniform[1] + eps).log() / (uniform[0] + eps).log() + eps).log()
res = torch.sigmoid((logits + noise) / tau)
if hard:
res = ((res > 0.5).type_as(res) - res).detach() + res
return res
def sigmoid(logits: torch.Tensor, mode: str = "simple", tau: float = 1, eps: float = 1e-10):
if mode=="simple":
return torch.sigmoid(logits)
elif mode in ["soft", "hard"]:
return gumbel_sigmoid(logits, tau, hard=mode=="hard", eps=eps)
else:
assert False, "Invalid sigmoid mode: %s" % mode
| StarcoderdataPython |
56763 | <filename>run_defense.py
#from __future__ import print_function
import sys, argparse
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from lib.utils.data_utils import *
from lib.utils.model_utils import *
from lib.attacks.nn_attacks import *
from lib.defenses.nn_defenses import *
#-----------------------------------------------------------------------------#
def main(argv):
"""
Main function of run_defense.py. Create adv. examples and evaluate attack.
Implement defense and reevaluate the same attack (does not aware of the
defense).
"""
# Parameters
batchsize = 500 # Fixing batchsize
no_of_mags = 50 # No. of deviations to consider
dev_list = np.linspace(0.1, 5.0, no_of_mags)
# Reduced dimensions used
rd_list = [784, 331, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10]
# Create model_dict from arguments
model_dict = model_dict_create()
# Load and parse specified dataset into numpy arrays
print('Loading data...')
dataset = model_dict['dataset']
if (dataset == 'MNIST') or (dataset == 'GTSRB'):
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(model_dict)
elif dataset == 'HAR':
X_train, y_train, X_test, y_test = load_dataset(model_dict)
X_val = None
y_val = None
data_dict, test_prediction, dr_alg, X_test, input_var, target_var = \
model_setup(model_dict, X_train, y_train, X_test, y_test, X_val, y_val)
# Running attack and saving samples
print('Creating adversarial samples...')
adv_x_ini, output_list = attack_wrapper(model_dict, data_dict, input_var,
target_var, test_prediction, dev_list,
X_test, y_test)
print_output(model_dict, output_list, dev_list)
save_images(model_dict, data_dict, X_test, adv_x_ini, dev_list)
# Run defense
defense = model_dict['defense']
if defense != None:
for rd in rd_list:
if defense == 'recons':
recons_defense(model_dict, data_dict, input_var, target_var,
test_prediction, dev_list, adv_x_ini, rd,
X_train, y_train, X_test, y_test)
elif defense == 'retrain':
retrain_defense(model_dict, dev_list, adv_x_ini, rd, X_train,
y_train, X_test, y_test, X_val, y_val)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
if __name__ == '__main__':
main(sys.argv[1:])
#-----------------------------------------------------------------------------#
| StarcoderdataPython |
5025849 | <gh_stars>0
print('hello to Every one !!')
| StarcoderdataPython |
11374492 | import pytest
from django.utils.text import slugify
from ..models import Board
pytestmark = pytest.mark.django_db
def test_create_board_via_factory(board):
pass
def test_generated_slug_is_based_on_slugifed_title(board):
assert board.slug.startswith(slugify(board.title))
def test_fields_exist():
board_fields = [field.name for field in Board._meta.get_fields()]
assert {
"id",
"slug",
"title",
"description",
"public",
"created",
"modified",
"organization",
} == set(board_fields)
| StarcoderdataPython |
8127220 | import pandas as pd
import argparse
import numpy as np
import random
import matplotlib.pyplot as plt
from sklearn import linear_model
class Portfolio(object):
def __init__(self, sec1mean, sec2mean, sec1vol, sec2vol, corr, rebalance_threshold):
self.numberOfStocks = 2
self.initprices = np.asarray([5, 5])
self.prices = self.initprices
self.initholdings = [10, 10]
self.holdings = self.initholdings
self.inittotal = 100
self.total = self.inittotal
self.initweightings = [.5, .5]
self.weightings = self.initweightings
# input
self.means = np.asarray([sec1mean, sec2mean])
self.corr = corr
self.sec1vol = sec1vol
self.sec2vol = sec2vol
self.dailymeans = self.means / 252
self.dailysec1vol = self.sec1vol / np.sqrt(252)
self.dailysec2vol = self.sec2vol / np.sqrt(252)
dailycov = self.dailysec1vol * self.dailysec2vol * self.corr
self.dailycovmat = np.asarray([[self.dailysec1vol ** 2, dailycov], [dailycov, self.dailysec2vol ** 2]])
self.rebalance_threshold = rebalance_threshold
# simulate price movements
def Brownian(self, periods):
dt = 1
# standard brownian increment = multivariate_normal distribution * sqrt of dt
b = np.random.multivariate_normal((0., 0.), ((1., 0.), (0., 1.)), int(periods)) * np.sqrt(dt)
# standard brownian motion for two variables ~ N(0,t)
W = np.cumsum(b, axis=0)
W = np.insert(W, 0, (0., 0.), axis=0)
W = np.asarray(W)
return W
# So: initial stock price
# W: brownian motion
# T: time period
def GBM(self, W, T):
S = []
# divide time axis from 0 to 1 into T pieces,
t = np.linspace(0, T, T + 1)
L = np.linalg.cholesky(self.dailycovmat)
var = self.dailycovmat.diagonal()
for i in range(T + 1):
drift = (self.dailymeans - (0.5 * var)) * t[i]
diffusion = np.dot(L, W[i])
S.append(self.initprices * np.exp(drift + diffusion))
S = np.asarray(S)
return S
def PriceMove(self, periods):
W = self.Brownian(periods)
return self.GBM(W, periods)
# simulate portfolio performance
def Simulate(self, paths, tcost, periods, seed):
cost = 0
trade = 0
nRebalance = 0
decreaseReturn = 0
fig, ax = plt.subplots(nrows=1, ncols=1)
np.random.seed(seed)
for i in range(paths):
pricemovements = self.PriceMove(periods)
print("path %d: " % (i + 1))
tradePath, costPath, nRebalancePath, decreaseReturnPath = self.Rebalance(pricemovements, tcost, periods)
cost += costPath
trade += tradePath
nRebalance += nRebalancePath
decreaseReturn += decreaseReturnPath
t = np.linspace(0, periods, periods + 1)
image, = ax.plot(t, pricemovements[:, 0], label="stock1")
image, = ax.plot(t, pricemovements[:, 1], label="stock2", ls='--')
plt.ylabel('stock price, $')
plt.xlabel('time, day')
plt.title('correlated brownian simulation')
plt.draw()
fig.savefig("simulate.png")
averageRebalance = nRebalance / paths
averageDollarTraded = trade / paths
averageTcost = cost / paths
averageDecreaseReturn = decreaseReturn / paths
print(
"average number of rebalances: %.3f\naverage dollars traded: %.3f$\naverage transaction cost as percentage of book value: %.3f%%\nexpected transaction costs: %.3f%%"
% (averageRebalance, averageDollarTraded, averageTcost * 100, averageDecreaseReturn * 100))
def Rebalance(self, pricemovements, tcost, periods):
trades = []
priceSpread = []
costs = []
nRebalance = 0
# len(pricemovements) = periods + 1
for i in range(1, periods + 1):
newPrices = pricemovements[i]
# update prices, dollar value, and weightings of a portfolio each time prices change
self.updatePrices(newPrices)
difference = np.subtract(self.weightings, self.initweightings)
# max returns a (positive) percentage difference between the actual weigntings and the desired weightings
if max(difference) >= self.rebalance_threshold:
# change the holdings so that the actual weightings are as desired
self.updateHoldings()
# difference in weightings * total = change of the amount of dollar invested in two stocks
trade = np.sum(np.absolute(difference * self.total))
trades.append(trade)
costs.append(trade * tcost)
priceSpread.append(np.round(self.prices, 2))
nRebalance += 1
# pandaframe
data = {"price spread, $": priceSpread,
"size of the trade, $": trades,
"transaction cost, $": costs}
df = pd.DataFrame(data=data, index=range(1, nRebalance + 1))
df.index.name = "#rebalancing"
print(df)
# return metrics
tradeTotal = sum(trades)
costTotal = tradeTotal * tcost
annualizedPeriods = periods / 252
annualizedReturn = (self.total / self.inittotal) ** (1 / annualizedPeriods) - 1
postcost = ((self.total - costTotal) / self.inittotal) ** (1 / annualizedPeriods) - 1
decreaseReturn = annualizedReturn - postcost
costTotalPer = costTotal / self.total
# set parameters back to initial value
self.reset()
return tradeTotal, costTotalPer, nRebalance, decreaseReturn
def reset(self):
self.weightings = self.initweightings
self.holdings = self.initholdings
self.prices = self.initprices
self.total = self.inittotal
def updatePrices(self, newPrices):
self.prices = newPrices
# dot product of the number of shares and price per share
self.total = np.dot(self.holdings, newPrices)
# the weight of stocks after stock prices change = (number of share * price of stock per share)/total amount of asset
self.weightings = [holding * price / self.total for price, holding in zip(self.prices, self.holdings)]
def updateHoldings(self):
self.holdings = [self.total * initWeight / price for initWeight, price in zip(self.initweightings, self.prices)]
self.weightings = [price * holding / self.total for holding, price in zip(self.holdings, self.prices)]
# compute how tcost vary with respect to other variables
def decreaseReturn(self, pricemovements, tcost, periods):
costTotal = 0
for i in range(1, len(pricemovements)):
newPrices = pricemovements[i]
self.updatePrices(newPrices)
difference = np.subtract(self.weightings, self.initweightings)
if max(difference) >= self.rebalance_threshold:
self.updateHoldings()
trade = np.sum(np.absolute(difference * self.total))
costTotal += trade * tcost
annualizedPeriods = periods / 252
annualizedReturn = (self.total / self.inittotal) ** (1 / annualizedPeriods) - 1
postcost = ((self.total - costTotal) / self.inittotal) ** (1 / annualizedPeriods) - 1
decreaseReturn = annualizedReturn - postcost
self.reset()
return decreaseReturn
def Tests(self, paths, tcost, periods, step, seed):
meanDecrease = []
totalDecrease = 0
fig, ax = plt.subplots(nrows=1, ncols=1)
np.random.seed(seed)
for i in range(1, paths + 1):
pricemovements = self.PriceMove(periods)
decreaseReturn = self.decreaseReturn(pricemovements, tcost, periods)
totalDecrease += decreaseReturn * 100
if (i % step == 0):
meanDecrease.append(totalDecrease / i)
print("when seed = %d, paths = %d, the average transaction cost is: %f%%" % (seed, paths, meanDecrease[-1]))
t = np.linspace(1, paths, len(meanDecrease))
image, = ax.plot(t, meanDecrease)
plt.ylabel('sample mean transaction cost (%)')
plt.xlabel('number of paths')
plt.title('convergence test (seed = %d)' % (seed))
plt.draw()
fig.savefig("convergence test (seed=%d).png" % (seed))
def updateCorr(self, corr):
dailycov = self.dailysec1vol * self.dailysec2vol * corr
self.dailycovmat = np.asarray([[self.dailysec1vol ** 2, dailycov], [dailycov, self.dailysec2vol ** 2]])
def updateSec1Vol(self, sec1vol):
self.sec1vol = sec1vol
self.dailysec1vol = sec1vol / np.sqrt(252)
dailycov = self.dailysec1vol * self.dailysec2vol * self.corr
self.dailycovmat = np.asarray([[self.dailysec1vol ** 2, dailycov], [dailycov, self.dailysec2vol ** 2]])
def updateThreshold(self, threshold):
self.rebalance_threshold = threshold
def updateSec1Mean(self, sec1mean):
self.means[0] = sec1mean
self.dailymeans = self.means / 252
def solveCorr(self, paths, tcost, periods, seed):
start = 0
end = 1
x = np.linspace(0, 1, 11)
y = []
for i in range(len(x)):
totalDecrease = 0
self.updateCorr(x[i])
np.random.seed(seed)
for i in range(paths):
pricemovements = self.PriceMove(periods)
decreaseReturn = self.decreaseReturn(pricemovements, tcost, periods)
totalDecrease += decreaseReturn * 100
meanDecrease = np.round(totalDecrease / paths, 1)
y.append(meanDecrease)
fig, ax = plt.subplots(nrows=1, ncols=1)
image, = ax.plot(x, y)
plt.ylabel('transaction cost (%)')
plt.xlabel('correlation coefficient')
plt.title('corr - tcost graph')
plt.draw()
fig.savefig('corr-tcost graph')
print(
'corr-tcost:\nseed=%d\nsec1vol=%.2f\nsec2vol=%.2f\ncorr=%.2f-%.2f\nsec1mean=%.2f\nsec2mean=%.2f\nthreshold=%.2f'
% (seed, self.sec1vol, self.sec2vol, start, end, self.means[0], self.means[1], self.rebalance_threshold))
print('coeff:', np.polyfit(x, y, 1))
'''reg = linear_model.Lasso(alpha = 0.1)
reg.fit(x,y)
print('lasso coeff:',reg.coef_)
print('lasso intercept',reg.intercept_)'''
def solveSec1Vol(self, paths, tcost, periods, seed):
start = .01
end = .51
x = np.linspace(start, end, 11)
y = []
for i in range(len(x)):
totalDecrease = 0
self.updateSec1Vol(x[i])
np.random.seed(seed)
for i in range(paths):
pricemovements = self.PriceMove(periods)
decreaseReturn = self.decreaseReturn(pricemovements, tcost, periods)
totalDecrease += decreaseReturn * 100
meanDecrease = np.round(totalDecrease / paths, 1)
y.append(meanDecrease)
fig, ax = plt.subplots(nrows=1, ncols=1)
image, = ax.plot(x, y)
plt.ylabel('transaction cost (%)')
plt.xlabel('security 1 volatility')
plt.title('sec1vol - tcost graph')
plt.draw()
fig.savefig('sec1vol-tcost graph')
print(
'sec1vol_tcost:\nseed=%d\nsec1vol=%.2f-%.2f\nsec2vol=%.2f\ncorr=%.2f\nsec1mean=%.2f\nsec2mean=%.2f\nthreshold=%.2f'
% (seed, start, end, self.sec2vol, self.corr, self.means[0], self.means[1], self.rebalance_threshold))
print("coeff:", np.polyfit(x, y, 1))
def solveSec1Mean(self, paths, tcost, periods, seed):
start = 0
end = .5
x = np.linspace(0, .5, 11)
paths = 500
y = []
for i in range(len(x)):
totalDecrease = 0
self.updateSec1Mean(x[i])
np.random.seed(seed)
for i in range(paths):
pricemovements = self.PriceMove(periods)
decreaseReturn = self.decreaseReturn(pricemovements, tcost, periods)
totalDecrease += decreaseReturn * 100
meanDecrease = np.round(totalDecrease / paths, 1)
y.append(meanDecrease)
fig, ax = plt.subplots(nrows=1, ncols=1)
image, = ax.plot(x, y)
plt.ylabel('transaction cost (%)')
plt.xlabel('security 1 return')
plt.title('sec1mean - tcost graph')
plt.draw()
fig.savefig('sec1mean-tcost graph')
print(
'sec1mean-tcost:\nseed=%d\nsec1vol=%.2f\nsec2vol=%.2f\ncorr=%.2f\nsec1mean=%.2f-%.2f\nsec2mean=%.2f\nthreshold=%.2f'
% (seed, self.sec1vol, self.sec2vol, self.corr, start, end, self.means[1], self.rebalance_threshold))
print('coef:', np.polyfit(x, y, 1))
def solveThreshold(self, paths, tcost, periods, seed):
start = 1
end = 10
x = np.linspace(1, 10, 11)
y = []
for i in range(len(x)):
totalDecrease = 0
self.updateThreshold(x[i] / 100)
np.random.seed(seed)
for i in range(paths):
pricemovements = self.PriceMove(periods)
decreaseReturn = self.decreaseReturn(pricemovements, tcost, periods)
totalDecrease += decreaseReturn * 100
meanDecrease = np.round(totalDecrease / paths, 1)
y.append(meanDecrease)
fig, ax = plt.subplots(nrows=1, ncols=1)
image, = ax.plot(x, y)
plt.ylabel('transaction cost (%)')
plt.xlabel('rebalance threshold (%)')
plt.title('threshold - tcost graph')
plt.draw()
fig.savefig("threshold-tcost graph")
print(
"threshold-tcost:\nseed=%d\nsec1vol=%.2f\nsec2vol=%.2f\ncorr=%.2f\nsec1mean=%.2f\nsec2mean=%.2f\nthreshold=%.2f-%.2f"
% (seed, self.sec1vol, self.sec2vol, self.corr, self.means[0], self.means[1], start, end))
print('coef:', np.polyfit(x, y, 1))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--sec1vol", help="annualized volatility of security 1", type=float, default=.4)
parser.add_argument("--sec2vol", help="annualized volatility of security 2", type=float, default=.3)
parser.add_argument("--corr", help="correlation between security 1 and 2", type=float, default=.8)
parser.add_argument("--sec1mean", help="annualized return of security 1", type=float, default=.05)
parser.add_argument("--sec2mean", help="annualized return of security 2", type=float, default=.1)
parser.add_argument("--paths", help="number of monte carlo iterations", type=int, default=500)
parser.add_argument("--periods", help="number of days", type=int, default=252)
parser.add_argument("--tcost", help="transaction cost per trade", type=float, default=.1)
parser.add_argument("--rebalance_threshold", help="the minimal divergence that causes rebalance", type=float,
default=.01)
parser.add_argument("--seed", help="set seed for the simulation", type=int, default=5)
parser.add_argument("--simulate",
help="plot price movements of two stocks and print information about their transaction costs",
type=bool, default=False)
parser.add_argument("--convergence_test", help="test convergence of transaction cost", type=bool, default=False)
parser.add_argument("--step", help="set the step for convergence test", type=int, default=10)
parser.add_argument("--solveCorr", help="solve transaction cost with respect to correlation coefficient", type=bool,
default=False)
parser.add_argument("--solveVol", help="solve transaction cost with respect to the volatity of a security",
type=bool, default=False)
parser.add_argument("--solveReturn", help="solve transaction cost with respect to the return of a security",
type=bool, default=False)
parser.add_argument("--solveThreshold", help="solve transaction cost with respect to the rebalance threshold",
type=bool, default=False)
args = parser.parse_args()
portfolio = Portfolio(args.sec1mean, args.sec2mean,
args.sec1vol, args.sec2vol, args.corr, args.rebalance_threshold)
if args.simulate == True:
portfolio.Simulate(args.paths, args.tcost, args.periods, args.seed)
elif args.convergence_test == True:
portfolio.Tests(args.paths, args.tcost, args.periods, args.step, args.seed)
elif args.solveCorr == True:
portfolio.solveCorr(args.paths, args.tcost, args.periods, args.seed)
elif args.solveVol == True:
portfolio.solveSec1Vol(args.paths, args.tcost, args.periods, args.seed)
elif args.solveThreshold == True:
portfolio.solveThreshold(args.paths, args.tcost, args.periods, args.seed)
elif args.solveReturn == True:
portfolio.solveSec1Mean(args.paths, args.tcost, args.periods, args.seed)
main()
| StarcoderdataPython |
6654008 | <filename>setup.py<gh_stars>0
import os
from setuptools import setup, find_packages
__version__ = '0.0.1'
# We use the README as the long_description
readme_path = os.path.join(os.path.dirname(__file__), "README.md")
setup(
name='insights-analytics-collector',
version=__version__,
url='http://github.com/slemrmartin/insights-analytics-collector/',
author='<NAME>',
author_email='<EMAIL>',
description='TODO',
long_description= open(readme_path).read(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords='TODO',
license='Apache',
zip_safe=False,
packages=find_packages(),
include_package_data=True,
install_requires=['django'],
tests_require=['pytest'],
)
| StarcoderdataPython |
4903880 | import geopandas as gpd
# Networkx werkt erg traag
gdf = gpd.read_file(r"C:\Users\bruno\Downloads\snelwegen_provincie.geojson")
gdf
| StarcoderdataPython |
8166687 | <gh_stars>1-10
"""
Custom exceptions for errors related to Linear Algebra.
"""
class LinearAlgebraError(Exception):
"""
Base class for error related to Linear Algebra.
"""
pass
class InconsistentDataSetError(LinearAlgebraError):
"""
Exception raised for errors
that data set of two vectors are inconsistent.
Attributes:
vec1 (list of int or float, or sympy Mul): a vector
which caused the error.
vec2 (list of int or float, or sympy Mul): a vector
which caused the error.
message (str): Explanation of the error.
"""
def __init__(self, vec1: list, vec2: list) -> None:
self.vec1 = vec1
self.vec2 = vec2
self.message = (
"Inconsistent data set of two vectors. "
+ "The number of elements of "
+ f"vector 1 [{len(self.vec1)}] and vector 2 [{len(self.vec2)}] are different."
)
def __str__(self) -> str:
return self.message
| StarcoderdataPython |
1977925 | <filename>test/test_del_contact.py
from model.contact import Contact
import random
def test_delete_random_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.add(Contact(firstname="new", middlename="new", lastname="new", nickname="new", title="new",
company="new", address="new", homenumber="000987987", mobilenumber="000876876",
worknumber="000765765", fax="000654654", email="<EMAIL>",
email2="<EMAIL>", email3="<EMAIL>",
homepage="www.somethingnew.com", bday="13", bmonth="October", byear="1955",
aday="14", amonth="July", ayear="2002", address2="new2", phone2="000543543",
notes="new"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
old_contacts.remove(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contacts_list(),
key=Contact.id_or_max)
| StarcoderdataPython |
346882 | <filename>job_crawler/config.py<gh_stars>0
import os
import json
from enum import Enum
from typing import Dict, List
from dotenv import load_dotenv
class CrawlerType(Enum):
JSON = "json"
class CrawlerConfig:
name: str
type: CrawlerType
url: str
base_path_parts: List[str]
params: Dict[str, str]
result: List[str]
def __init__(self, config: Dict[str, any]):
self.name = config['name']
self.type = CrawlerType[config['type'].upper()]
self.url = config["url"]
self.base_path_parts = config["base_path"].split("/")
self.params = config["params"]
self.result = config["result"]
load_dotenv() # load from .env file if present
config_file = "config.json"
telegram_token = os.getenv("TELEGRAM_TOKEN")
db_connection = os.getenv("DB_CONNECTION")
__config = json.load(open(config_file, "r"))
crawlers_config: List[CrawlerConfig] = list(map(CrawlerConfig, __config["crawlers"]))
| StarcoderdataPython |
8150084 | import pickle
from os import path
from ctypes import Structure, windll, c_uint, sizeof, byref
import time
import schedule
from rm_sync import get_files_from_zotero_storage
from rm_sync import sync
from config import config
'''
Checks every 5 minutes if changes are made to the zotero storage
if a change is detected:
wait until user is idle for 5 minutes
then run sync
update current file_log
'''
CONFIG = config()
class LASTINPUTINFO(Structure):
_fields_ = [
('cbSize', c_uint),
('dwTime', c_uint),
]
def get_idle_duration():
lastInputInfo = LASTINPUTINFO()
lastInputInfo.cbSize = sizeof(lastInputInfo)
windll.user32.GetLastInputInfo(byref(lastInputInfo))
millis = windll.kernel32.GetTickCount() - lastInputInfo.dwTime
return millis / 1000.0
def wait_for_idle(n_seconds, wait_interval):
while get_idle_duration() < n_seconds:
print('Idle: {}'.format(get_idle_duration()))
time.sleep(wait_interval)
def check():
file_log_name = './file_log.pkl'
zotero_path = CONFIG['path_to_local_zotero_storage']
zotero_pdfs = [file.stem for file in get_files_from_zotero_storage(zotero_path)]
if path.exists(file_log_name):
with open(file_log_name, 'rb') as f:
logged_pdfs = pickle.load(f)
start_sync = True if not zotero_pdfs == logged_pdfs else False
else:
start_sync = True
if start_sync:
wait_for_idle(CONFIG['wait_for_n_seconds_idle'], 1)
sync()
with open(file_log_name, 'wb') as f:
pickle.dump(zotero_pdfs, f)
def monitor():
schedule.every(CONFIG['check_log_every_n_minutes']).minutes.do(check)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == '__main__':
monitor() | StarcoderdataPython |
3520350 | <gh_stars>1-10
import tensorflow as tf
import argparse
import os, re
import numpy as np
import skimage as ski
import skimage.data
import skimage.transform
import cv2
import tensorflow.contrib.layers as layers
from tensorflow.contrib.framework import arg_scope
import losses
import eval_helper
#import datasets.reader_rgbd_depth as reader
import datasets.reader_rgbd as reader
#import datasets.reader as reader
FLAGS = tf.app.flags.FLAGS
MODEL_DEPTH = 50
#MEAN_RGB = [75.2051479, 85.01498926, 75.08929598]
MEAN_BGR = [75.08929598, 85.01498926, 75.2051479]
#MEAN_BGR = [103.939, 116.779, 123.68]
def evaluate(name, sess, epoch_num, run_ops, dataset, data):
#TODO iIOU
loss_val, accuracy, iou, recall, precision = eval_helper.evaluate_segmentation(
sess, epoch_num, run_ops, dataset.num_examples())
if iou > data['best_iou'][0]:
data['best_iou'] = [iou, epoch_num]
data['iou'] += [iou]
data['acc'] += [accuracy]
data['loss'] += [loss_val]
def plot_results(train_data, valid_data):
eval_helper.plot_training_progress(os.path.join(FLAGS.train_dir, 'stats'),
train_data, valid_data)
def print_results(data):
print('Best validation IOU = %.2f (epoch %d)' % tuple(data['best_iou']))
def init_eval_data():
train_data = {}
valid_data = {}
train_data['lr'] = []
train_data['loss'] = []
train_data['iou'] = []
train_data['acc'] = []
train_data['best_iou'] = [0, 0]
valid_data['best_iou'] = [0, 0]
valid_data['loss'] = []
valid_data['iou'] = []
valid_data['acc'] = []
return train_data, valid_data
def normalize_input(img, depth):
return img - MEAN_BGR, depth - 33.0
#"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
#with tf.name_scope('input'), tf.device('/cpu:0'):
# #rgb -= MEAN_RGB
# red, green, blue = tf.split(3, 3, rgb)
# bgr = tf.concat(3, [blue, green, red])
# #bgr -= MEAN_BGR
# return bgr
bn_params = {
# Decay for the moving averages.
#'decay': 0.999,
'decay': 0.9,
'center': True,
'scale': True,
# epsilon to prevent 0s in variance.
#'epsilon': 0.001,
'epsilon': 1e-5,
# None to force the updates
'updates_collections': None,
'is_training': True
}
def layer(net, num_filters, name, is_training):
with tf.variable_scope(name):
net = tf.contrib.layers.batch_norm(net, **bn_params)
net = tf.nn.relu(net)
net = layers.convolution2d(net, num_filters, kernel_size=3)
#if is_training:
#net = tf.nn.dropout(net, keep_prob=0.8)
return net
def dense_block(net, size, r, name, is_training):
with tf.variable_scope(name):
outputs = []
for i in range(size):
if i < size - 1:
x = net
net = layer(net, r, 'layer'+str(i), is_training)
outputs += [net]
net = tf.concat([x, net], 3)
else:
net = layer(net, r, 'layer'+str(i), is_training)
outputs += [net]
net = tf.concat(outputs, 3)
return net
def downsample(net, name, is_training):
with tf.variable_scope(name):
net = tf.contrib.layers.batch_norm(net)
net = tf.nn.relu(net)
num_filters = net.get_shape().as_list()[3]
net = layers.convolution2d(net, num_filters, kernel_size=1)
#if is_training:
# net = tf.nn.dropout(net, keep_prob=0.8)
net = layers.max_pool2d(net, 2, stride=2, padding='SAME')
return net
def upsample(net, shape, name):
with tf.variable_scope(name):
#return tf.image.resize_nearest_neighbor(net, shape[1:3], name='resize_logits_middle')
return tf.image.resize_bilinear(net, shape[1:3], name='resize_logits_biliner')
#def upsample(net, name):
# with tf.variable_scope(name):
# num_filters = net.get_shape().as_list()[3]
# net = tf.contrib.layers.convolution2d_transpose(net, num_filters, kernel_size=3, stride=2)
# return net
def _build(image, depth, is_training):
bn_params['is_training'] = is_training
weight_decay = 1e-4
#init_func = layers.variance_scaling_initializer(mode='FAN_OUT')
init_func = layers.variance_scaling_initializer()
cfg = {
#5: [4,5,7,10,12,15],
5: [2,3,4,5,6,8],
#5: [3,3,3,3,3,3],
#5: [3,3,3],
#5: [2,2],
}
block_sizes = [2,3,4,5,6,7]
#block_sizes_back = [4,3,3,2,2]
block_sizes_back = [2,2,2,2,2]
#block_sizes_back = [4,4,4,4,4]
r = 16
r_up = 32
#r_up = 16
#r = 12
with arg_scope([layers.convolution2d, layers.convolution2d_transpose],
stride=1, padding='SAME', activation_fn=None,
normalizer_fn=None, normalizer_params=None,
weights_initializer=init_func, biases_initializer=None,
weights_regularizer=layers.l2_regularizer(weight_decay)):
net = layers.convolution2d(image, 48, 3, scope='conv0')
#depth = tf.Print(depth, [tf.reduce_mean(depth)], message='depth = ')
net = tf.concat([depth, net], 3)
block_outputs = []
for i, size in enumerate(block_sizes):
print(i, size)
x = net
net = dense_block(net, size, r, 'block'+str(i), is_training)
net = tf.concat([x, net], 3)
print(net)
if i < len(block_sizes) - 1:
block_outputs += [net]
net = downsample(net, 'block'+str(i)+'_downsample', is_training)
#logits_mid = layers.convolution2d(net, FLAGS.num_classes, 1,
# biases_initializer=tf.zeros_initializer, scope='logits_middle')
#logits_mid = tf.image.resize_bilinear(logits_mid, [FLAGS.img_height, FLAGS.img_width],
# name='resize_logits_middle')
# TODO add 5x5
#net = tf.nn.relu(net)
#num_filters = net.get_shape().as_list()[3]
#net = layers.convolution2d(net, num_filters, kernel_size=1)
#for i, size in reversed(list(enumerate(block_sizes[:-1]))):
for i, size in enumerate(block_sizes_back):
print(i, size)
#net = upsample(net, 'block'+str(i)+'_back_upsample')
#net = upsample(net, tf.shape(block_outputs[i]), 'block'+str(i)+'_back_upsample')
skip_input = block_outputs[-1-i]
net = upsample(net, tf.shape(skip_input), 'block'+str(i)+'_back_upsample')
print(skip_input)
net = tf.concat([skip_input, net], 3)
print(net)
net = dense_block(net, size, r_up, 'block'+str(i)+'_back', is_training)
print(net)
logits = layers.conv2d(net, FLAGS.num_classes, 1, activation_fn=None, scope='logits')
#logits = tf.image.resize_bilinear(logits, [FLAGS.img_height, FLAGS.img_width],
# name='resize_logits')
return logits, None
def build(dataset, is_training, reuse=False):
with tf.variable_scope('', reuse=reuse):
x, labels, weights, depth, img_names = reader.inputs(dataset, is_training=is_training, num_epochs=FLAGS.max_epochs)
x, depth = normalize_input(x, depth)
logits, logits_mid = _build(x, depth, is_training)
total_loss = _loss(logits, logits_mid, labels, weights, is_training)
if is_training:
return [total_loss], None, None
else:
return [total_loss, logits, labels, img_names]
def _loss(logits, logits_mid, labels, weights, is_training=True):
#def loss(logits, labels, weights, is_training=True):
xent_loss = losses.weighted_cross_entropy_loss(logits, labels, weights, max_weight=10)
#xent_loss += losses.weighted_cross_entropy_loss(logits_mid, labels, weights)
#xent_loss /= 2
#xent_loss = losses.weighted_cross_entropy_loss(logits, labels)
#xent_loss += losses.weighted_cross_entropy_loss(logits_mid, labels)
#loss_tf = tf.contrib.losses.softmax_cross_entropy()
#loss_val = losses.weighted_hinge_loss(logits, labels, weights, num_labels)
#loss_val = losses.flip_xent_loss(logits, labels, weights, num_labels)
#loss_val = losses.flip_xent_loss_symmetric(logits, labels, weights, num_labels)
#all_losses = [depth_loss, xent_loss]
all_losses = [xent_loss]
# get losses + regularization
total_loss = losses.total_loss_sum(all_losses)
if is_training:
loss_averages_op = losses.add_loss_summaries(total_loss)
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def minimize(loss, global_step, num_batches):
decay_steps = int(num_batches * FLAGS.num_epochs_per_decay) * 2
# Decay the learning rate exponentially based on the number of steps.
global lr
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate, global_step, decay_steps,
FLAGS.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', lr)
print('Using optimizer: Adam')
opt = tf.train.AdamOptimizer(lr)
grads = opt.compute_gradients(loss)
all_vars = tf.contrib.framework.get_variables()
for v in all_vars:
print(v.name)
train_op = opt.apply_gradients(grads, global_step=global_step)
return train_op
def train_step(sess, run_ops):
return sess.run(run_ops)
def num_batches(dataset):
return reader.num_examples(dataset)
def num_examples(dataset):
return reader.num_examples(dataset)
| StarcoderdataPython |
9773067 | class LsvmInterface:
def __init__(self, model_name):
pass
def calc(self) -> list:
raise NotImplementedError
def decrypt(self, encrypted_labels) -> list:
raise NotImplementedError
def get_labels(self) -> list:
raise NotImplementedError
| StarcoderdataPython |
1778622 | <reponame>thanhhvnqb/detectron2
import torch
import torch.nn.functional as F
from torch import nn
import fvcore.nn.weight_init as weight_init
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.modeling.backbone.fpn import FPN
from detectron2.modeling.backbone.resnet import build_resnet_backbone
from .mobilenetv3 import build_mobilenetv3_rw_backbone
class LastLevelP6P7DW(nn.Module):
"""This module is used in FCOS to generate extra layers, P6 and P7 from P5 feature."""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "p5"
norm = "GN"
conv_fcn = []
conv_fcn.append(Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=1, bias=not norm,\
groups=in_channels, norm=get_norm(norm, in_channels), activation=F.relu))
conv_fcn.append(Conv2d(in_channels, out_channels, kernel_size=1, bias=not norm,\
norm=get_norm(norm, out_channels), activation=F.relu))
self.add_module('p6', nn.Sequential(*conv_fcn))
conv_fcn = []
conv_fcn.append(Conv2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=not norm,\
groups=out_channels, norm=get_norm(norm, out_channels), activation=F.relu))
conv_fcn.append(Conv2d(out_channels, out_channels, kernel_size=1, bias=not norm,\
norm=get_norm(norm, out_channels), activation=F.relu))
self.add_module('p7', nn.Sequential(*conv_fcn))
for layer in [*self.p6, *self.p7]:
weight_init.c2_msra_fill(layer)
def forward(self, p5):
p6 = self.p6(p5)
p7 = self.p7(p6)
return [p6, p7]
class LastLevelP6DW(nn.Module):
"""This module is used in FCOS to generate extra layers, P6 and P7 from P5 feature."""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 1
self.in_feature = "p5"
norm = "GN"
conv_fcn = []
conv_fcn.append(Conv2d(in_channels, in_channels, kernel_size=5, stride=2, padding=2, bias=not norm,\
groups=in_channels, norm=get_norm(norm, in_channels), activation=F.relu))
conv_fcn.append(Conv2d(in_channels, out_channels, kernel_size=1, bias=not norm,\
norm=get_norm(norm, out_channels), activation=F.relu))
self.add_module('p6', nn.Sequential(*conv_fcn))
for layer in self.p6:
weight_init.c2_msra_fill(layer)
def forward(self, p5):
p6 = self.p6(p5)
return [p6]
class LastLevelP6P7(nn.Module):
"""This module is used in FCOS to generate extra layers, P6 and P7 from P5 feature."""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "p5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, p5):
p6 = self.p6(p5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
@BACKBONE_REGISTRY.register()
def build_fcos_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(out_channels, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_mb3_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_mobilenetv3_rw_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6DW(out_channels, out_channels),
# top_block=LastLevelP6P7DW(out_channels, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| StarcoderdataPython |
12808149 | <reponame>lematt1991/RecLab<gh_stars>10-100
"""
The package for the Autorec recommender.
See https://doi.org/10.1145/2740908.2742726 for details.
"""
from .autorec import Autorec
| StarcoderdataPython |
1629790 | <reponame>gda2048/rest<gh_stars>1-10
from django.contrib import admin
from chat_room.models import Room, Message
@admin.register(Room)
class RoomAdmin(admin.ModelAdmin):
"""Admin room admin"""
list_display = ("creator", "invited_user", "date")
filter_horizontal = ('invited', )
def invited_user(self, obj):
return "\n".join([user.username for user in obj.invited.all()])
@admin.register(Message)
class ChatAdmin(admin.ModelAdmin):
"""Char admin"""
list_display = ("room", "user", "text", "date")
| StarcoderdataPython |
12853180 | <reponame>nokia/minifold<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
import sys
from pprint import pformat
DEBUG = 0
INFO = 1
WARNING = 2
ERROR = 3
# Shell colors
DEFAULT = 0
RED = 1
GREEN = 2
YELLOW = 3
BLUE = 4
PINK = 5
CYAN = 6
GRAY = 7
# Shell style
DEFAULT = 0
BOLD = 1
UNDERLINED = 4
BLINKING = 5
HIGHLIGHTED = 7
class Log:
enable_print = False
# TODO: The following static paramaters should be load from ~/.minifoldrc
# TODO: dark / light colors
with_color = True
log_level = 0
message_header = {
DEBUG : "DEBUG",
INFO : "INFO",
WARNING : "WARNING",
ERROR : "ERROR",
}
message_color = {
DEBUG : CYAN,
INFO : GREEN,
WARNING : YELLOW,
ERROR : RED,
}
@staticmethod
def start_style(
fg_color :int = None,
bg_color :int = None,
styles :list = list()
) -> str:
styling = list()
if fg_color != None: styling.append("3%d" % fg_color)
if bg_color != None: styling.append("4%d" % bg_color)
if styles: styling += styles
return "\033[%sm" % ";".join(styling) if styling else ""
@staticmethod
def default_style() -> str:
return "\033[0m"
@classmethod
def print(cls, message_type :int, message :str, file = sys.stderr):
if cls.enable_print and message_type >= cls.log_level:
color = cls.message_color[message_type]
header = cls.message_header[message_type]
print(
"%(start_style)s%(message)s%(end_style)s" % {
"start_style" : cls.start_style(fg_color = color),
"message" : " ".join([header, message if isinstance(message, str) else pformat(message)]),
"end_style" : cls.default_style()
},
file = file
)
@classmethod
def debug(cls, s): cls.print(DEBUG, s)
@classmethod
def info(cls, s): cls.print(INFO, s)
@classmethod
def warning(cls, s): cls.print(WARNING, s)
@classmethod
def error(cls, s): cls.print(ERROR, s)
| StarcoderdataPython |
8025189 | from flask import render_template, Blueprint
from models.estudiante import Estudiante
perfil = Blueprint('perfil', __name__)
@perfil.route('/perfil/<nombre_usuario>/')
def detail(nombre_usuario):
estudiante = Estudiante.get(
nombre_usuario=nombre_usuario
)
return render_template('detail.html', persona=estudiante)
| StarcoderdataPython |
4921475 | from libmineshaft.blocks import Block, MultipleStateBlock, NoIDBlock
class Air(Block):
id = 0
imagecoords = (64, 176)
resistance = -1
name = "Air"
falls = False
breaktime = -1
class StoneBlock(NoIDBlock):
imagecoords = (16, 0)
resistance = 10
name = "Stone"
falls = False
breaktime = 15
class Stone(MultipleStateBlock):
id = 1
blocks = [StoneBlock]
class Grass(Block):
id = 2
imagecoords = (48, 0)
resistance = 0
name = "Grass Block"
falls = False
breaktime = 2
class Dirt(Block):
id = 3
imagecoords = (32, 0)
resistance = 0
name = "Dirt"
fallse = False
breaktime = 2
class Cobblestone(Block):
id = 4
imagecoords = (
0,
0,
) # Temporary placeholder, since there is no cobble texture right now
resistance = 10
falls = False
breaktime = 15
class Bedrock(Block):
id = 7
imagecoords = (16, 16)
resistance = -1
name = "Bedrock"
falls = False
breaktime = -1
BLOCKS = {0: Air, 1: Stone, 2: Grass, 3: Dirt, 4: Cobblestone, 7: Bedrock}
| StarcoderdataPython |
215611 | <reponame>JDatPNW/faceTrack<filename>src/clInitializer.py<gh_stars>0
import os
from .Initializer import Initializer
class clInitializer(Initializer):
def getInput(self):
self.visualize = input('Enable visualization? [1=Yes/0=No]: ')
self.visualize = int(self.visualize)
self.inputfile = input(
'Enter the name of the file(folder) containing the YouTube URLs/images/Video : ')
self.inputfile = "/input/" + self.inputfile
self.inputfile = os.path.dirname(
os.path.abspath(__file__)) + "/.." + self.inputfile
self.experiment = input(
'Enter the name of the directory in which the video folders should be saved in: ')
self.experiment = self.experiment + "/"
# Default would be 0 - Cuts off after lower certanties
self.threshold = input('Enter treshhold: ')
self.sampling = input('Enter sampling: ') # 1 works well
self.logger = input(
'Choose between Command Line Logging[1 - faster] and GUI Logging[0]: ') # 1 works well
self.loader = input(
'Do you want to load a YouTube video[0], a folder of images[1], or a video file [2]?: ') # 1 works well
self.tracker = input(
'Choose between dlib[1 - Recommended] and cv2[0] tracking: ') # 1 works well
self.visualizer = input(
'Choose between the cv2[1 - fatser] and GUI[0] Visualizer: ') # 1 works well
self.archiver = input(
'Do you want to safe the results as a .npy[2], .jpg[1] or as a .csv[0]: ') # 1 works well
if(int(self.visualize < 0)):
self.visualize = 0
elif(int(self.visualize) > 0 and int(self.visualize) < 1):
self.visualize = 1
elif(int(self.visualize) > 1):
self.visualize = 1
if(int(self.threshold) < 0):
self.threshold = 0
if(int(self.sampling) < 0):
self.sampling = 0
if(int(self.loader) < 0):
self.loader = 0
elif(int(self.loader) > 0 and int(self.loader) < 1):
self.loader = 1
elif(int(self.loader) > 1 and int(self.loader) < 2):
self.loader = 1
elif(int(self.loader) > 2):
self.loader = 2
if(int(self.logger) < 0):
self.logger = 0
elif(int(self.logger) > 0 and int(self.logger) < 1):
self.logger = 1
elif(int(self.logger) > 1):
self.logger = 1
if(int(self.tracker) < 0):
self.tracker = 0
elif(int(self.tracker) > 0 and int(self.tracker) < 1):
self.tracker = 1
elif(int(self.tracker) > 1):
self.tracker = 1
if(int(self.visualizer) < 0):
self.visualizer = 0
elif(int(self.visualizer) > 0 and int(self.visualizer) < 1):
self.visualizer = 1
elif(int(self.visualizer) > 1):
self.visualizer = 1
if(int(self.archiver) < 0):
self.archiver = 0
elif(int(self.archiver) > 0 and int(self.archiver) < 1):
self.archiver = 1
elif(int(self.archiver) > 1 and int(self.archiver) < 2):
self.archiver = 2
return (self.visualize,
self.inputfile, self.experiment, self.threshold, self.sampling, self.tracker,
self.logger, self.visualizer, self.loader, self.archiver)
| StarcoderdataPython |
8144610 | <gh_stars>1-10
class Solution:
def countEven(self, num: int) -> int:
t = 0
for i in range(1, num+1):
s = sum([int(x) for x in str(i)])
if s % 2 == 0:
t += 1
return t
| StarcoderdataPython |
5071439 | import os
from distutils.debug import DEBUG
class Config:
'''
General configurat3eerrfrfrfion parent class
'''
NEWS_BASE_URL = 'https://newsapi.org/v2/{}?q=Apple&from=2022-01-25&sortBy=popularity&apiKey=11319835f3f642b08ffc5ed98495e990'
NEWS_API_KEY='0aa9f5a46444443fb64afbece6ada52b'
# NEWS_API_KEY = '11319835f3f642b08ffc5ed98495e990'
TOP_HEADLINES_URL = 'https://newsapi.org/v2/top-headlines?country=us&apiKey={}'
SOURCE_URL = 'https://newsapi.org/v2/top-headlines?sources={}&apiKey={}'
CATEGORIES_BASE_URL= 'https://newsapi.org/v2/top-headlines?country=us&category={}&apiKey={}'
SEARCH_NEWS_URL='https://newsapi.org/v2/everything?q={}&apiKey={}'
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
DEBUG = True
config_options = {
'development' : DevConfig,
'production' : ProdConfig
}
| StarcoderdataPython |
322363 | <filename>011-testunit/testFileMyName.py<gh_stars>0
import unittest
from surveyTest import AnonymousSurvey
class TestSurvey(unittest.TestCase):
def setUp(self):
#创建测试中的全局的一个对象供所有测试方法使用
question = "what language did you fitst learn to speak"
self.my_survey = AnonymousSurvey(question)
def test_store_single_response(self):
"""测试单个答案被存储"""
self.my_survey.store_response("English")
self.assertIn("English1",self.my_survey.store_response)
def test_store_response(self):
self.my_survey.store_response(["wanger","lisi"])
self.assertIn("lisi",self.my_survey.store_response)
unittest.main() | StarcoderdataPython |
9635485 | """The Tesla Powerwall integration base entity."""
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, MANUFACTURER, MODEL
class PowerWallEntity(CoordinatorEntity):
"""Base class for powerwall entities."""
def __init__(
self, coordinator, site_info, status, device_type, powerwalls_serial_numbers
):
"""Initialize the sensor."""
super().__init__(coordinator)
self._site_info = site_info
self._device_type = device_type
self._version = status.version
# The serial numbers of the powerwalls are unique to every site
self.base_unique_id = "_".join(powerwalls_serial_numbers)
@property
def device_info(self) -> DeviceInfo:
"""Powerwall device info."""
return DeviceInfo(
identifiers={(DOMAIN, self.base_unique_id)},
manufacturer=MANUFACTURER,
model=f"{MODEL} ({self._device_type.name})",
name=self._site_info.site_name,
sw_version=self._version,
)
| StarcoderdataPython |
3368817 | <filename>vispy/scene/visuals/modular_visual.py
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division, print_function
import numpy as np
from ... import gloo
from .visual import Visual
from ..shaders import ModularProgram, Function, FunctionChain, Variable
from ..components import (VisualComponent, XYPosComponent, XYZPosComponent,
UniformColorComponent, VertexColorComponent)
"""
- Should have swappable input component to allow a variety of different
vertex inputs:
2d attribute + z uniform
3d attribute
2d attribute + z uniform + index
3d attribute + index
1d attribute + x/y ranges (surface plot)
(and any other custom input component the user might come up with)
- Should have swappable / chainable fragment components:
Per-vertex normals (for smooth surfaces)
Per-face normals (for faceted surfaces)
Colors per-vertex, per-face
Materials - phong, etc.
Textures - color, bump map, spec map, etc
Wireframe rendering (note this might require vertex modification)
- Make base shaders look like:
vertex_input => vertex_adjustment, transform_to_nd, post_hook
color_input => color_adjustment
- For efficiency, the vertex inputs should allow both pre-index and
unindexed arrays. However, many fragment shaders may require pre-indexed
arrays. For example, drawing faceted surfaces is not possible with
unindexed arrays since the normal vector changes each time a vertex is
visited.
=> this means that input components need a way to convert their data
and suggest a different input component (?)
=> More generally, we need to be able to map out all of the available
pathways and choose the most efficient one based on the input
data format (to avoid unnecessary conversions) and the requirements
of individual components (including indexed/unindexed, whether
geometry shaders are available, ...)
- Fragment shaders that do not need normals should obviously not compute them
- Some materials require a normal vector, but there may be any number of
ways to generate a normal: per-vertex interpolated, per-face, bump maps,
etc. This means we need a way for one material to indicate that it requires
normals, and a way to tell the component which normal-generating component
it should use.
=> Likewise with colors. In fact, normals and colors are similar enough
that they could probably share most of the same machinery..
=> Color chain \
===> Material chain
=> Normal chain /
Examples:
Color input / filters:
uniform color
color by vertex, color by face
texture color
float texture + colormap
color by height
grid contours
wireframe
Normal input:
normal per vertex
normal per face
texture bump map
texture normal map
Material composition:
shaded / facets
shaded / smooth
phong shading
"""
class ComponentProgram(ModularProgram):
"""
Temporary class to bridge differences between current ModularProgram
and old ModularProgram.
"""
def __init__(self, vert, frag):
self._chains = {}
ModularProgram.__init__(self, Function(vert), Function(frag))
def add_chain(self, var):
"""
Create a new ChainFunction and attach to $var on the appropriate
main function.
"""
chain = FunctionChain(var, [])
self._chains[var] = chain
self[var] = chain
def add_callback(self, hook, func):
self._chains[hook].append(func)
def remove_callback(self, hook, func):
self._chains[hook].remove(func)
def __setitem__(self, name, val):
try:
self.vert[name] = val
except Exception:
try:
self.frag[name] = val
except Exception:
ModularProgram.__setitem__(self, name, val)
class ModularVisual(Visual):
"""
Abstract modular visual. This extends Visual by implementing a system
of attachable components that change the input and output behaviors of
the visual.
* A modular GLSL program with a standard set of vertex and
fragment shader hooks
* A mechanism for adding and removing components
that affect the vertex position (pos_components) and fragment
color (color_components)
* A default draw() method that:
* activates each of the attached components
* negotiates a buffer mode (pre-indexed or unindexed) supported by
all components
* Requests an index buffer from components (if needed)
* Instructs the program to draw using self.primitive
* A simple set_data() method intended to serve as an example for
subclasses to follow.
"""
VERTEX_SHADER = """
void main(void) {
$local_pos = $local_position();
vec4 nd_pos = $map_local_to_nd($local_pos);
gl_Position = nd_pos;
$vert_post_hook();
}
"""
FRAGMENT_SHADER = """
// Fragment shader consists of only a single hook that is usually defined
// by a chain of functions, each which sets or modifies the curren
// fragment color, or discards it.
void main(void) {
gl_FragColor = $frag_color();
}
"""
def __init__(self, parent=None, **kwds):
Visual.__init__(self, parent, **kwds)
# Dict of {'GL_FLAG': bool} and {'glFunctionName': (args)}
# specifications. By default, these are enabled whenever the Visual
# is drawn. This provides a simple way for the user to customize the
# appearance of the Visual. Example:
#
# { 'GL_BLEND': True,
# 'glBlendFunc': ('GL_SRC_ALPHA', 'GL_ONE') }
#
self._gl_options = [None, {}]
self._program = ComponentProgram(self.VERTEX_SHADER,
self.FRAGMENT_SHADER)
self._program.vert['local_pos'] = Variable('local_pos',
vtype='', dtype='vec4')
# Generic chains for attaching post-processing functions
self._program.add_chain('local_position')
self._program.add_chain('vert_post_hook')
self._program.add_chain('frag_color')
# Components for plugging different types of position and color input.
self._pos_components = []
#self._color_component = None
#self.pos_component = XYZPosComponent()
self._color_components = []
#self.color_components = [UniformColorComponent()]
# Primitive, default is GL_TRIANGLES
self._primitive = gloo.gl.GL_TRIANGLES
@property
def primitive(self):
"""
The GL primitive used to draw this visual.
"""
return self._primitive
@property
def vertex_index(self):
"""
Returns the IndexBuffer (or None) that should be used when drawing
this Visual.
"""
# TODO: What to do here? How do we decide which component should
# generate the index?
return self.pos_components[0].index
def set_data(self, pos=None, index=None, z=0.0, color=None):
"""
Default set_data implementation is only used for a few visuals..
*pos* must be array of shape (..., 2) or (..., 3).
*z* is only used in the former case.
"""
# select input component based on pos.shape
if pos is not None:
if pos.shape[-1] == 2:
comp = XYPosComponent(xy=pos.astype(np.float32),
z=z, index=index)
self.pos_components = [comp]
elif pos.shape[-1] == 3:
comp = XYZPosComponent(pos=pos.astype(np.float32), index=index)
self.pos_components = [comp]
else:
raise Exception("Can't handle position data: %s" % pos)
if color is not None:
if isinstance(color, tuple):
self.color_components = [UniformColorComponent(color)]
elif isinstance(color, np.ndarray):
self.color_components = [VertexColorComponent(color)]
else:
raise Exception("Can't handle color data:")
def set_gl_options(self, default=-1, **kwds):
"""
Set all GL options for this Visual. Most common arguments are
'translucent', 'opaque', and 'additive'.
See gloo.set_state() for more information.
These options are invoked every time the Visual is drawn.
"""
if default is not -1:
self._gl_options[0] = default
self._gl_options[1] = kwds
def update_gl_options(self, default=-1, **kwds):
"""
Update GL options rather than replacing all. See set_gl_options().
"""
if default is not -1:
self._gl_options[0] = default
self._gl_options.update(kwds)
def gl_options(self):
"""
Return the GL options in use for this Visual.
See set_gl_options().
"""
return self._gl_options[0], self._gl_options[1].copy()
@property
def pos_components(self):
return self._pos_components[:]
@pos_components.setter
def pos_components(self, comps):
for comp in self._pos_components:
try:
comp._detach()
except:
print(comp)
raise
self._pos_components = comps
for comp in self._pos_components:
comp._attach(self)
self.events.update()
@property
def color_components(self):
return self._color_components[:]
@color_components.setter
def color_components(self, comps):
for comp in self._color_components:
try:
comp._detach()
except:
print(comp)
raise
self._color_components = comps
for comp in self._color_components:
comp._attach(self)
self.events.update()
def update(self):
"""
This method is called whenever the Visual must be redrawn.
"""
self.events.update()
# no need if we use the drawing system
# def on_draw(self, event):
# """ when we get a draw event from the scenegraph
# """
# self._visual.transform = event.viewport_transform
# self.draw()
def draw(self, event=None):
"""
Draw this visual now.
The default implementation configures GL flags according to the
contents of self._gl_options
"""
self._activate_gl_options()
mode = self._draw_mode()
self._activate_components(mode, event)
self._program.draw(self.primitive, self.vertex_index)
# todo: should this be called "buffer_mode" ?
def _draw_mode(self):
"""
Return the mode that should be used to draw this visual
(DRAW_PRE_INDEXED or DRAW_UNINDEXED)
"""
modes = set([VisualComponent.DRAW_PRE_INDEXED,
VisualComponent.DRAW_UNINDEXED])
for comp in self._color_components + self.pos_components:
modes &= comp.supported_draw_modes
if len(modes) == 0:
for c in self._color_components:
print(c, c.supported_draw_modes)
raise Exception("Visual cannot draw--no mutually supported "
"draw modes between components.")
#TODO: pick most efficient draw mode!
return list(modes)[0]
def _activate_gl_options(self):
gloo.set_state(self._gl_options[0], **self._gl_options[1])
def _activate_components(self, mode, event):
"""
This is called immediately before drawing to inform all components
that a draw is about to occur and to let them assign program
variables.
"""
if len(self._pos_components) == 0:
raise Exception("Cannot draw visual %s; no position components"
% self)
if len(self._color_components) == 0:
raise Exception("Cannot draw visual %s; no color components"
% self)
comps = self._pos_components + self._color_components
all_comps = set(comps)
while len(comps) > 0:
comp = comps.pop(0)
comps.extend(comp._deps)
all_comps |= set(comp._deps)
self._activate_transform(event)
for comp in all_comps:
comp.activate(self._program, mode)
def _activate_transform(self, event=None):
# TODO: this must be optimized.
# Allow using as plain visual or in a scenegraph
t = self.transform if (event is None) else event.render_transform
#if isinstance(t, ChainTransform):
# t.simplify() # Reduce number of transforms
#self._program['map_local_to_nd'] = self.transform.shader_map()
self._program['map_local_to_nd'] = t.shader_map()
#print('--------------', self)
#t.simplify()
#for tr in t.transforms:
# print(tr)
| StarcoderdataPython |
5083160 | import argparse
import collections
import datetime
import logging
import re
import sys
import mechanize
from bs4 import BeautifulSoup
BEAUTIFUL_SOUP_PARSER = 'html.parser'
VIOLATIONS_URL = ('http://www1.nyc.gov/assets/finance/jump/'
'pay_parking_camera_violations.html')
DELETED_VIOLATION_PATTERN = 'Violation Entered has been flagged as deleted'
INVALID_VIOLATION_NUMBER_PATTERN = 'Invalid Violation Number'
NO_RESULTS_FOR_PLATE = 'The plate number was not found.'
Violation = collections.namedtuple(
'Violation', 'description number plate issue_date amount')
class TicketCheckerQueryException(Exception):
pass
class InvalidViolationNumberException(TicketCheckerQueryException):
pass
class _Query(object):
def __init__(self,
state=None, plate_type=None, plate_number=None, violation_number=None):
self.state = state
self.plate_type = plate_type
self.plate_number = plate_number
self.violation_number = violation_number
@classmethod
def byViolationNumber(cls, violation_number):
return cls(violation_number=violation_number)
@classmethod
def byPlate(cls, state, plate_type, plate_number):
return cls(state=state, plate_type=plate_type, plate_number=plate_number)
class TicketChecker(object):
def __init__(self, debug=False):
self._br = mechanize.Browser()
if debug:
# Log debug information about HTTP redirects and Refreshes.
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
self._br.set_debug_http(True)
self._br.set_debug_responses(True)
self._br.set_debug_redirects(True)
def getByViolationNumber(self, violation_number):
return self._parseViolations(_Query.byViolationNumber(violation_number))
def getByPlate(self, plate_number, state=None, plate_type=None):
return self._parseViolations(
_Query.byPlate(state, plate_type, plate_number))
def _parseViolations(self, query):
# Get first URL
self._br.open(VIOLATIONS_URL)
# Follow redirect contained in iframe src
soup = BeautifulSoup(self._br.response().read(), BEAUTIFUL_SOUP_PARSER)
self._br.open(soup.body.iframe['src'])
# Set query parameters to form
self._br.select_form(nr=0) # Form has no `name`
# Because there is both a non-mobile and mobile version on the page, we need
# to find the first one and set it.
if query.violation_number:
self._br.find_control(
name='args.VIOLATION_NUMBER_NOL', nr=0).value = query.violation_number
elif query.plate_number:
self._br.find_control(name='args.PLATE', nr=0).value = query.plate_number
if query.state:
self._br.find_control(name='args.STATE', nr=0).value = [query.state,]
if query.plate_type:
self._br.find_control(
name='args.TYPE', nr=0).value = [query.plate_type,]
# Remove duplicate form controls, otherwise we get an error from the server.
form_names_set = set([])
for control in self._br.form.controls[:]:
if control.name in form_names_set:
self._br.form.controls.remove(control)
else:
form_names_set.add(control.name)
# Submit form
self._br.submit()
# Look for violation response text
html = self._br.response().read()
soup = BeautifulSoup(self._br.response().read(), BEAUTIFUL_SOUP_PARSER)
# Errors are put into a class `global-violation-prompt` div tag.
error_tags = soup.find_all(class_='global-violation-prompt')
# Common cases when violation paid or non-existent
if DELETED_VIOLATION_PATTERN in html:
return []
elif NO_RESULTS_FOR_PLATE in html:
return []
elif INVALID_VIOLATION_NUMBER_PATTERN in html:
raise InvalidViolationNumberException()
if error_tags:
raise TicketCheckerQueryException(str([tag.string for tag in error_tags]))
else:
match = re.search(r'No matches found for your violation search', html)
if match:
return [] # No tickets found
else:
# Parse list of violations found:
return TicketChecker.get_violations(soup)
@staticmethod
def get_violations(soup):
violation_list = []
violations = soup.find_all(class_='violation-group-detail')
for violation_tag in violations:
violation_values = violation_tag.find_all(
class_='violation-details-single-value1')
violation = Violation(
number=violation_values[0].string,
plate=violation_values[1].string,
description=violation_values[2].string,
issue_date=datetime.datetime.strptime(
violation_values[3].string, '%m/%d/%Y').date(),
# image_url_on_click = violation_tag.find(class_='nav-link').a['onclick']
amount=violation_values[4].string)
violation_list.append(violation)
return violation_list
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--violation', help='Violation #')
parser.add_argument('--plate', help='License Plate #')
parser.add_argument(
'--state', help='2-letter State of the license plate, defaults to "NY"')
parser.add_argument(
'--plate_type',
help='3-character type of the license plate, defaults to "PAS" (passenger)')
parser.add_argument(
'--debug', action='store_true', help='Turns on debugging of HTTP calls.')
args = parser.parse_args()
if args.violation and args.plate or not args.violation and not args.plate:
print 'Specify either a violation # or license plate #.'
sys.exit(1)
ticket_checker = TicketChecker(debug=args.debug)
if(args.violation):
violations = ticket_checker.getByViolationNumber(
violation_number=args.violation)
else:
violations = ticket_checker.getByPlate(
plate_number=args.plate,
plate_type=args.plate_type,
state=args.state)
if not violations:
print 'No tickets found for ' + str(args)
else:
print ('Found {} ticket(s) for ' + str(args)).format(len(violations))
print 'Got tickets:'
for i, violation in enumerate(violations):
print '\t{}. {}: violation # {} for plate {} on {} for ${}'.format(
i + 1,
violation.description,
violation.number,
violation.plate,
violation.issue_date,
violation.amount)
main() | StarcoderdataPython |
3404222 | <filename>tests/tests_cli_common.py
"""
tests.tests_cli_common.py
~~~~~~~~~~~~~~~~~~~~~~~~~
Testing common cli functionality.
:copyright: (c) 2019 by <NAME>.
:license: Apache2, see LICENSE for more details.
"""
# -- Imports -------------------------------------------------------------------
import pytest
from .utils import Page
# -- Tests ---------------------------------------------------------------------
@pytest.mark.parametrize('dsn', [
"ssh://127.0.0.1:21",
"mailto:<EMAIL>",
])
def test_default_dsn_with_issues(runner, dsn):
""" Passing unsupported DSN string. """
result = runner([dsn])
assert result['code'] == 2
assert "Error: URL {} is not valid".format(dsn) in result['output']
def test_default_url_no_scheme_issue(server, runner):
""" no domain cli execution test """
address = server.router({'^/$': Page("").exists()})
scheme, address = address.split("//")
args = [address, '-r1', '-s', 'failed', '--no-progress', '--fiff']
result = runner(args)
assert result['code'] == 0
assert "{}//{}".format(scheme, address) in result['output']
def test_help(runner):
result = runner(['--help'])
from deadlinks.exporters import Default
section, _ = Default.options()
assert section in result['output']
| StarcoderdataPython |
12802186 | from .actor import CategoricalPolicy, DeterministicPolicy, StateDependentGaussianPolicy, StateIndependentGaussianPolicy
from .base import MLP
from .conv import DQNBody, SACDecoder, SACEncoder, SLACDecoder, SLACEncoder
from .critic import (
ContinuousQFunction,
ContinuousQuantileFunction,
ContinuousVFunction,
DiscreteImplicitQuantileFunction,
DiscreteQFunction,
DiscreteQuantileFunction,
)
from .misc import (
ConstantGaussian,
CumProbNetwork,
Gaussian,
SACLinear,
make_quantile_nerwork,
make_stochastic_latent_variable_model,
)
| StarcoderdataPython |
1915159 | import win32gui
from win32con import *
from win32gui import ShowWindow, SetWindowPos, GetWindowLong, SetWindowLong, SetLayeredWindowAttributes
from win32gui import PostMessage, PostMessage, GetWindowRect, SetCapture, ReleaseCapture, GetCursorPos
from win32api import GetAsyncKeyState
from common.window import BaseWindow
from common.message import subscriber, subscribe
from common.constant import *
from timer import set_timer, kill_timer
from utils import get_message_name
@subscriber
class OverlayWindow(BaseWindow):
def __init__(self):
BaseWindow.__init__(self)
self._modifier = 0
def check_button_state(self, modifier, mask):
if (modifier & mask) == (self._modifier & mask):
return 0
elif modifier & mask != 0:
return 1
else:
return 2
@subscribe
def on_message(self, hwnd, message, wparam, lparam):
print("[%d] message=%-24s wparam=0x%08x lparam=0x%08x" % (hwnd, get_message_name(message), wparam, lparam))
@subscribe(WM_CREATE)
def on_create(self, hwnd, message, wparam, lparam):
set_timer(30000, self.on_timer)
SetWindowLong(hwnd, GWL_STYLE, WS_POPUP | WS_CHILD)
SetWindowLong(hwnd, GWL_EXSTYLE, WS_EX_LAYERED | WS_EX_TOPMOST | WS_EX_TOOLWINDOW)
SetWindowPos(self.hwnd, 0, 0, 0, 200, 200, SWP_NOMOVE | SWP_NOZORDER)
SetWindowPos(self.hwnd, HWND_TOPMOST, 0, 0, 0, 0, SWP_NOMOVE | SWP_NOSIZE)
SetWindowPos(self.hwnd, 0, 0, 0, 0, 0, SWP_NOSIZE | SWP_NOZORDER)
ShowWindow(self.hwnd, SW_SHOW)
@subscribe(WM_STYLECHANGED)
def on_styled(self, hwnd, message, wparam, lparam):
if GetWindowLong(hwnd, GWL_EXSTYLE) & WS_EX_LAYERED:
SetLayeredWindowAttributes(hwnd, 0, 255 * 90 / 100, LWA_ALPHA);
@subscribe(WM_NCHITTEST, WM_NCLBUTTONDOWN, WM_NCRBUTTONDOWN, WM_NCMBUTTONDOWN, WM_NCLBUTTONUP, WM_NCRBUTTONUP, WM_NCMBUTTONUP, WM_NCMOUSEMOVE)
def on_hit(self, hwnd, message, wparam, lparam):
x, y = (lparam & 0xFFFF, lparam >> 16)
left, top, right, bottom = GetWindowRect(hwnd)
dx, dy = (x - left, y - top)
a = dx | (dy << 16)
modifier = reduce(lambda x, y: x << 1 | (1 if GetAsyncKeyState(y) != 0 else 0), reversed([
VK_LBUTTON,
VK_RBUTTON,
VK_SHIFT,
VK_CONTROL,
VK_MBUTTON ]), 0)
button_spec = [
(MK_LBUTTON, (WM_MOUSEMOVE, WM_LBUTTONDOWN, WM_LBUTTONUP)),
(MK_RBUTTON, (WM_MOUSEMOVE, WM_RBUTTONDOWN, WM_RBUTTONUP)),
(MK_MBUTTON, (WM_MOUSEMOVE, WM_MBUTTONDOWN, WM_MBUTTONUP))]
for mask, message in button_spec:
p = self.check_button_state(modifier, mask)
if p:
PostMessage(hwnd, message[p], modifier, a)
break
else:
PostMessage(hwnd, WM_MOUSEMOVE, modifier, a)
self._modifier = wparam
print("[%d] Hit (%d, %d)" % (hwnd, x, y))
@subscribe(WM_LBUTTONDOWN, WM_RBUTTONDOWN, WM_MBUTTONDOWN)
def on_mousedown(self, hwnd, message, wparam, lparam):
self._dx, self._dy = (lparam & 0xFFFF, lparam >> 16)
if wparam & MK_LBUTTON:
SetCapture(hwnd)
@subscribe(WM_LBUTTONUP, WM_RBUTTONUP, WM_MBUTTONUP)
def on_mouseup(self, hwnd, message, wparam, lparam):
if wparam & (MK_LBUTTON | MK_MBUTTON | MK_RBUTTON) == 0:
ReleaseCapture()
@subscribe(WM_MOUSEMOVE)
def on_mousemove(self, hwnd, message, wparam, lparam):
if wparam & MK_LBUTTON:
x0, y0 = GetCursorPos()
SetWindowPos(hwnd, 0, x0 - self._dx, y0 - self._dy, 0, 0, SWP_NOSIZE | SWP_NOZORDER | SWP_NOSENDCHANGING)
@subscribe(WM_PAINT)
def do_paint(self, hwnd, message, wparam, lparam):
pass
@subscribe(WM_DESTROY)
def on_destroy(self, hwnd, message, wparam, lparam):
self.application.quit()
def on_timer(self, timer_id, time):
kill_timer(timer_id)
self.destroyWindow()
| StarcoderdataPython |
6529938 | #!/usr/bin/env python3
# Applies a commit or commits on baranch or branches
# USAGE:
# patch.py -c <commit-list> -b <branch-list> [-p] [-t]
# - <commit-list>: list of commit SHAs to apply.
# - <branch-list>: branches where the commit should be applied. * can be used as wildchar
# - p: push the changes to <brach-list>
# - t: increment version number and create a tag
| StarcoderdataPython |
9731163 | """Pruned ResNetV1bs, implemented in Gluon."""
from __future__ import division
import json
import os
from mxnet.context import cpu
from mxnet.gluon import nn
from mxnet import ndarray
from ..resnetv1b import ResNetV1b
from ..resnetv1b import BasicBlockV1b
from ..resnetv1b import BottleneckV1b
__all__ = ['resnet18_v1b_89', 'resnet50_v1d_86', 'resnet50_v1d_48', 'resnet50_v1d_37',
'resnet50_v1d_11', 'resnet101_v1d_76', 'resnet101_v1d_73'
]
def prune_gluon_block(net, prefix, params_shapes, params=None, pretrained=False, ctx=cpu(0)):
"""
:param params_shapes: dictionary of shapes of convolutional weights
:param prefix: prefix of the original resnet50_v1d
:param pretrained: Boolean specifying if the pretrained model parameters needs to be loaded
:param net: original network that is required to be pruned
:param params: dictionary of parameters for the pruned network. Size of the parameters in
this dictionary tells what
should be the size of channels of each convolution layer.
:param ctx: cpu(0)
:return: "net"
"""
for _, layer in net._children.items():
if pretrained:
if isinstance(layer, nn.BatchNorm):
params_layer = layer._collect_params_with_prefix()
for param_name in ['beta', 'gamma', 'running_mean', 'running_var']:
param_val = params[layer.name.replace(prefix, "resnetv1d") + "_" + param_name]
layer.params.get(param_name)._shape = param_val.shape
params_layer[param_name]._load_init(param_val, ctx=ctx)
if isinstance(layer, nn.Conv2D):
param_shape = params_shapes[layer.name.replace(prefix, "resnetv1d") + "_weight"]
layer._channels = param_shape[0]
layer._kwargs['num_filter'] = param_shape[0]
params_layer = layer._collect_params_with_prefix()
for param_name in ['weight']:
param_shape = params_shapes[
layer.name.replace(prefix, "resnetv1d") + "_" + param_name]
layer.params.get(param_name)._shape = param_shape
if pretrained:
param_val = params[layer.name.replace(prefix, "resnetv1d") + "_" + param_name]
params_layer[param_name]._load_init(param_val, ctx=ctx)
if isinstance(layer, nn.Dense):
layer._in_units = params_shapes[layer.name.replace(prefix, "resnetv1d") + "_weight"][1]
params_layer = layer._collect_params_with_prefix()
for param_name in ['weight', 'bias']:
param_shape = params_shapes[
layer.name.replace(prefix, "resnetv1d") + "_" + param_name]
layer.params.get(param_name)._shape = param_shape
if pretrained:
param_val = params[layer.name.replace(prefix, "resnetv1d") + "_" + param_name]
params_layer[param_name]._load_init(param_val, ctx=ctx)
else:
prune_gluon_block(layer, prefix, params_shapes, params, pretrained, ctx)
def resnet18_v1b_89(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNetV1b-18_2.6x model. Uses resnet18_v1b construction from resnetv1b.py
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], name_prefix='resnetv1b_', **kwargs)
dirname = os.path.dirname(__file__)
json_filename = os.path.join(dirname, 'resnet%d_v%db_%.1fx' % (18, 1, 2.6) + ".json")
with open(json_filename, "r") as jsonFile:
params_shapes = json.load(jsonFile)
if pretrained:
from ..model_store import get_model_file
params_file = get_model_file('resnet%d_v%db_%.1fx' % (18, 1, 2.6), tag=pretrained,
root=root)
prune_gluon_block(model, model.name, params_shapes, params=ndarray.load(params_file),
pretrained=True, ctx=ctx)
else:
prune_gluon_block(model, model.name, params_shapes, params=None, pretrained=False, ctx=ctx)
if pretrained:
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
def resnet50_v1d_86(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNetV1d-50_1.8x model. Uses resnet50_v1d construction from resnetv1b.py
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, avg_down=True,
name_prefix='resnetv1d_', **kwargs)
dirname = os.path.dirname(__file__)
json_filename = os.path.join(dirname, 'resnet%d_v%dd_%.1fx' % (50, 1, 1.8) + ".json")
with open(json_filename, "r") as jsonFile:
params_shapes = json.load(jsonFile)
if pretrained:
from ..model_store import get_model_file
params_file = get_model_file('resnet%d_v%dd_%.1fx' % (50, 1, 1.8), tag=pretrained,
root=root)
prune_gluon_block(model, model.name, params_shapes, params=ndarray.load(params_file),
pretrained=True, ctx=ctx)
else:
prune_gluon_block(model, model.name, params_shapes, params=None, pretrained=False, ctx=ctx)
if pretrained:
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
def resnet50_v1d_48(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNetV1d-50_3.6x model. Uses resnet50_v1d construction from resnetv1b.py
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, avg_down=True,
name_prefix='resnetv1d_', **kwargs)
dirname = os.path.dirname(__file__)
json_filename = os.path.join(dirname, 'resnet%d_v%dd_%.1fx' % (50, 1, 3.6) + ".json")
with open(json_filename, "r") as jsonFile:
params_shapes = json.load(jsonFile)
if pretrained:
from ..model_store import get_model_file
params_file = get_model_file('resnet%d_v%dd_%.1fx' % (50, 1, 3.6), tag=pretrained,
root=root)
prune_gluon_block(model, model.name, params_shapes, params=ndarray.load(params_file),
pretrained=True, ctx=ctx)
else:
prune_gluon_block(model, model.name, params_shapes, params=None, pretrained=False, ctx=ctx)
if pretrained:
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
def resnet50_v1d_37(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNetV1d-50_5.9x model. Uses resnet50_v1d construction from resnetv1b.py
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, avg_down=True,
name_prefix='resnetv1d_', **kwargs)
dirname = os.path.dirname(__file__)
json_filename = os.path.join(dirname, 'resnet%d_v%dd_%.1fx' % (50, 1, 5.9) + ".json")
with open(json_filename, "r") as jsonFile:
params_shapes = json.load(jsonFile)
if pretrained:
from ..model_store import get_model_file
params_file = get_model_file('resnet%d_v%dd_%.1fx' % (50, 1, 5.9), tag=pretrained,
root=root)
prune_gluon_block(model, model.name, params_shapes, params=ndarray.load(params_file),
pretrained=True, ctx=ctx)
else:
prune_gluon_block(model, model.name, params_shapes, params=None, pretrained=False, ctx=ctx)
if pretrained:
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
def resnet50_v1d_11(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNetV1d-50_8.8x model. Uses resnet50_v1d construction from resnetv1b.py
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, avg_down=True,
name_prefix='resnetv1d_', **kwargs)
dirname = os.path.dirname(__file__)
json_filename = os.path.join(dirname, 'resnet%d_v%dd_%.1fx' % (50, 1, 8.8) + ".json")
with open(json_filename, "r") as jsonFile:
params_shapes = json.load(jsonFile)
if pretrained:
from ..model_store import get_model_file
params_file = get_model_file('resnet%d_v%dd_%.1fx' % (50, 1, 8.8), tag=pretrained,
root=root)
prune_gluon_block(model, model.name, params_shapes, params=ndarray.load(params_file),
pretrained=True, ctx=ctx)
else:
prune_gluon_block(model, model.name, params_shapes, params=None, pretrained=False, ctx=ctx)
if pretrained:
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
def resnet101_v1d_76(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNetV1d-101_1.9x model. Uses resnet101_v1d construction from resnetv1b.py
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, avg_down=True,
name_prefix='resnetv1d_', **kwargs)
dirname = os.path.dirname(__file__)
json_filename = os.path.join(dirname, 'resnet%d_v%dd_%.1fx' % (101, 1, 1.9) + ".json")
with open(json_filename, "r") as jsonFile:
params_shapes = json.load(jsonFile)
if pretrained:
from ..model_store import get_model_file
params_file = get_model_file('resnet%d_v%dd_%.1fx' % (101, 1, 1.9), tag=pretrained,
root=root)
prune_gluon_block(model, model.name, params_shapes, params=ndarray.load(params_file),
pretrained=True, ctx=ctx)
else:
prune_gluon_block(model, model.name, params_shapes, params=None, pretrained=False, ctx=ctx)
if pretrained:
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
def resnet101_v1d_73(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNetV1d-101_2.2x model. Uses resnet101_v1d construction from resnetv1b.py
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, avg_down=True,
name_prefix='resnetv1d_', **kwargs)
dirname = os.path.dirname(__file__)
json_filename = os.path.join(dirname, 'resnet%d_v%dd_%.1fx' % (101, 1, 2.2) + ".json")
with open(json_filename, "r") as jsonFile:
params_shapes = json.load(jsonFile)
if pretrained:
from ..model_store import get_model_file
params_file = get_model_file('resnet%d_v%dd_%.1fx' % (101, 1, 2.2), tag=pretrained,
root=root)
prune_gluon_block(model, model.name, params_shapes, params=ndarray.load(params_file),
pretrained=True, ctx=ctx)
else:
prune_gluon_block(model, model.name, params_shapes, params=None, pretrained=False, ctx=ctx)
if pretrained:
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
| StarcoderdataPython |
5193555 | <gh_stars>0
#! /usr/bin/env python
import os
import sys
import fnmatch
import time
import shutil
import subprocess
import stat
def readList(file):
o = open(file)
lines = o.read().splitlines()
o.close()
lines = filter(lambda line : line[0] != "#", lines)
return lines
def cleanup(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def remove(path):
if os.path.exists(path):
shutil.rmtree(path)
directoriesPath = "info/directories.txt"
directoriesList = readList(directoriesPath)
benchmarksPath = directoriesList[0]
tempPath = "temp_/"
cleanup(benchmarksPath)
subprocess.call("wget --no-check-certificate https://baldur.iti.kit.edu/sat-competition-2017/benchmarks/Main.zip -P " + tempPath, shell=True)
subprocess.call("unzip " + tempPath + "Main -d " + benchmarksPath, shell=True)
remove(tempPath)
files = os.listdir(benchmarksPath + "NoLimits")
for file in files:
shutil.move(benchmarksPath + "NoLimits/" + file, benchmarksPath)
remove(benchmarksPath + "NoLimits")
| StarcoderdataPython |
3372945 | <reponame>adonaifariasdev/cursoemvideo-python3
# Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores ‘M’ ou ‘F’.
# Caso esteja errado, peça a digitação novamente até ter um valor correto.
sexo = str(input('Qual o sexo? [M/F]: ')).upper().strip()[0]
while sexo not in 'MmFf':
print('Opção inválida. Tente novamente!')
sexo = str(input('Qual o sexo? [M/F]: ')).upper().strip()[0]
print('Sexo {} registrado com sucesso.'.format(sexo)) | StarcoderdataPython |
1988740 | <reponame>jmjacquet/IronWeb<filename>pyafipws/pyrece.py<gh_stars>0
#!usr/bin/python
# -*- coding: utf-8-*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Aplicativo AdHoc Para generación de Facturas Electrónicas"
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright (C) 2009-2017 <NAME>"
__license__ = "GPL 3.0"
__version__ = "1.31a"
from datetime import datetime
from decimal import Decimal, getcontext, ROUND_DOWN
import os
import sys
import wx
import gui
import unicodedata
import traceback
from ConfigParser import SafeConfigParser
import wsaa, wsfev1, wsfexv1
from utils import SimpleXMLElement, SoapClient, SoapFault, date
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
#from PyFPDF.ejemplos.form import Form
from pyfepdf import FEPDF
# Formatos de archivos:
from formatos import formato_xml, formato_csv, formato_dbf, formato_txt, formato_json
try:
from numeros import conv_text
except:
conv_text = lambda num: str(num)
HOMO = False
DEBUG = '--debug' in sys.argv
CONFIG_FILE = "rece.ini"
ACERCA_DE = u"""
PyRece: Aplicativo AdHoc para generar Facturas Electrónicas
Copyright (C) 2008-2015 <NAME> <EMAIL>
Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA
y es bienvenido a redistribuirlo bajo la licencia GPLv3.
Para información adicional y descargas ver:
http://www.sistemasagiles.com.ar/
"""
INSTRUCTIVO = U"""
Forma de uso:
* Examinar: para buscar el archivo a procesar (opcional)
* Cargar: para leer los datos del archivo de facturas a procesar
* Autenticar: para iniciar la sesión en los servidores de AFIP (obligatorio antes de autorizar)
* Marcar Todo: para seleccionar todas las facturas
* Autorizar: para autorizar las facturas seleccionadas, completando el CAE y demás datos
* Autorizar Lote: para autorizar en un solo lote las facturas seleccionadas
* Grabar: para almacenar los datos procesados en el archivo de facturas
* Previsualizar: para ver por pantalla la factura seleccionadas
* Enviar: para envia por correo electrónico las facturas seleccionadas
Para solicitar soporte comercial, escriba a <EMAIL>
"""
class PyRece(gui.Controller):
def on_load(self, event):
self.cols = []
self.items = []
self.paths = [entrada]
self.token = self.sign = ""
self.smtp = None
self.webservice = None
if entrada and os.path.exists(entrada):
self.cargar()
self.components.cboWebservice.value = DEFAULT_WEBSERVICE
self.on_cboWebservice_click(event)
self.tipos = {
1:u"Factura A",
2:u"Notas de Débito A",
3:u"Notas de Crédito A",
4:u"Recibos A",
5:u"Notas de Venta al contado A",
6:u"Facturas B",
7:u"Notas de Débito B",
8:u"Notas de Crédito B",
9:u"Recibos B",
10:u"Notas de Venta al contado B",
19:u"Facturas de Exportación",
20:u"Nota de Débito por Operaciones con el Exterior",
21:u"Nota de Crédito por Operaciones con el Exterior",
39:u"Otros comprobantes A que cumplan con la R.G. N° 3419",
40:u"Otros comprobantes B que cumplan con la R.G. N° 3419",
60:u"Cuenta de Venta y Líquido producto A",
61:u"Cuenta de Venta y Líquido producto B",
63:u"Liquidación A",
64:u"Liquidación B",
11:u"Factura C",
12:u"Nota de Débito C",
13:u"Nota de Crédito C",
15:u"Recibo C",
}
self.component.bgcolor = "light gray"
# deshabilito ordenar
##self.components.lvwListado.GetColumnSorter = lambda: lambda x,y: 0
def set_cols(self, cols):
self.__cols = cols
lv = self.components.lvwListado
# remove old columns:
lv.clear_all()
# insert new columns
for col in cols:
ch = gui.ListColumn(lv, name=col, text=col.replace("_"," ").title(), align="left")
def get_cols(self):
return self.__cols
cols = property(get_cols, set_cols)
def set_items(self, items):
cols = self.cols
self.__items = items
def convert_str(value):
if value is None:
return ''
elif isinstance(value, str):
return unicode(value,'latin1')
elif isinstance(value, unicode):
return value
else:
return str(value)
self.components.lvwListado.items = [[convert_str(item[col]) for col in cols] for item in items]
wx.SafeYield()
def get_items(self):
return self.__items
items = property(get_items, set_items)
def get_selected_items(self):
for it in self.components.lvwListado.get_selected_items():
yield it.index, it
def set_selected_items(self, selected):
for it in selected:
it.selected = True
def set_paths(self, paths):
self.__paths = paths
self.components.txtArchivo.value = ', '.join([fn for fn in paths])
def get_paths(self):
return self.__paths
paths = property(get_paths, set_paths)
def log(self, msg):
if not isinstance(msg, unicode):
msg = unicode(msg, "latin1","ignore")
print "LOG", msg
self.components.txtEstado.value = msg + u"\n" + self.components.txtEstado.value
wx.SafeYield()
f = None
try:
f = open("pyrece.log","a")
f.write("%s: " % (datetime.now(), ))
f.write(msg.encode("ascii", "ignore"))
f.write("\n\r")
except Exception, e:
print e
finally:
if f:
f.close()
def progreso(self, value):
if self.items:
per = (value+1)/float(len(self.items))*100
self.components.pbProgreso.value = per
wx.SafeYield()
def error(self, code, text):
ex = traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback)
self.log(''.join(ex))
gui.alert(text, 'Error %s' % code)
def verifica_ws(self):
if not self.ws:
gui.alert("Debe seleccionar el webservice a utilizar!", 'Advertencia')
raise RuntimeError()
if not self.token or not self.sign:
gui.alert("Debe autenticarse con AFIP!", 'Advertencia')
raise RuntimeError()
self.ws.Dummy()
def on_btnMarcarTodo_click(self, event):
for it in self.components.lvwListado.items:
it.selected = True
def on_menu_consultas_dummy_click(self, event):
##self.verifica_ws()
try:
if self.webservice in ("wsfev1", "wsfexv1"):
self.ws.Dummy()
msg = "AppServ %s\nDbServer %s\nAuthServer %s" % (
self.ws.AppServerStatus, self.ws.DbServerStatus, self.ws.AuthServerStatus)
location = self.ws.client.location
else:
msg = "%s no soportado" % self.webservice
location = ""
gui.alert(msg, location)
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menu_consultas_lastCBTE_click(self, event):
##self.verifica_ws()
options = [v for k,v in sorted([(k,v) for k,v in self.tipos.items()])]
result = gui.single_choice(options, "Tipo de comprobante",
u"Consulta Último Nro. Comprobante",
)
if not result:
return
tipocbte = [k for k,v in self.tipos.items() if v==result][0]
result = gui.prompt(u"Punto de venta",
u"Consulta Último Nro. Comprobante", '2')
if not result:
return
ptovta = result
try:
if self.webservice=="wsfev1":
ultcmp = "%s (wsfev1)" % self.ws.CompUltimoAutorizado(tipocbte, ptovta)
elif self.webservice=="wsfexv1":
ultcmp = "%s (wsfexv1)" % self.ws.GetLastCMP(tipocbte, ptovta)
gui.alert(u"Último comprobante: %s\n"
u"Tipo: %s (%s)\nPunto de Venta: %s" % (ultcmp, self.tipos[tipocbte],
tipocbte, ptovta), u'Consulta Último Nro. Comprobante')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menu_consultas_getCAE_click(self, event):
self.verifica_ws()
options = [v for k,v in sorted([(k,v) for k,v in self.tipos.items()])]
result = gui.single_choice(options, "Tipo de comprobante",
u"Consulta Comprobante",
)
if not result:
return
tipocbte = [k for k,v in self.tipos.items() if v==result][0]
result = gui.prompt(u"Punto de venta",
u"Consulta Comprobante", '2')
if not result:
return
ptovta = result
result = gui.prompt(u"Nº de comprobante",
u"Consulta Comprobante", '2')
if not result:
return
nrocbte = result
try:
if self.webservice=="wsfe":
cae = 'no soportado'
elif self.webservice=="wsfev1":
cae = "%s (wsfev1)" % self.ws.CompConsultar(tipocbte, ptovta, nrocbte)
self.log('CAE: %s' % self.ws.CAE)
self.log('FechaCbte: %s' % self.ws.FechaCbte)
self.log('PuntoVenta: %s' % self.ws.PuntoVenta)
self.log('CbteNro: %s' % self.ws.CbteNro)
self.log('ImpTotal: %s' % self.ws.ImpTotal)
self.log('ImpNeto: %s' % self.ws.ImpNeto)
self.log('ImptoLiq: %s' % self.ws.ImptoLiq)
self.log('EmisionTipo: %s' % self.ws.EmisionTipo)
elif self.webservice=="wsfexv1":
cae = "%s (wsfexv1)" % self.ws.GetCMP(tipocbte, ptovta, nrocbte)
self.log('CAE: %s' % self.ws.CAE)
self.log('FechaCbte: %s' % self.ws.FechaCbte)
self.log('PuntoVenta: %s' % self.ws.PuntoVenta)
self.log('CbteNro: %s' % self.ws.CbteNro)
self.log('ImpTotal: %s' % self.ws.ImpTotal)
gui.alert(u"CAE: %s\n"
u"Tipo: %s (%s)\nPunto de Venta: %s\nNumero: %s\nFecha: %s" % (
cae, self.tipos[tipocbte],
tipocbte, ptovta, nrocbte, self.ws.FechaCbte),
u'Consulta Comprobante')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menu_consultas_lastID_click(self, event):
##self.verifica_ws()
try:
if self.webservice=="wsfexv1":
ultnro = self.ws.GetLastID()
else:
ultnro = None
gui.alert(u"Último ID (máximo): %s" % (ultnro),
u'Consulta Último ID')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_menu_ayuda_acercade_click(self, event):
text = ACERCA_DE
gui.alert(text, u'Acerca de PyRece Versión %s' % __version__)
def on_menu_ayuda_instructivo_click(self, event):
text = INSTRUCTIVO
gui.alert(text, u'Instructivo de PyRece')
def on_menu_ayuda_limpiar_click(self, event):
self.components.txtEstado.value = ""
def on_menu_ayuda_mensajesXML_click(self, event):
self.verifica_ws()
self.components.txtEstado.value = u"XmlRequest:\n%s\n\nXmlResponse:\n%s" % (
self.ws.xml_request, self.ws.xml_response)
self.component.size = (592, 517)
def on_menu_ayuda_estado_click(self, event):
if self.component.size[1]<517:
self.component.size = (592, 517)
else:
self.component.size = (592, 265)
def on_menu_ayuda_configuracion_click(self, event):
self.components.txtEstado.value = open(CONFIG_FILE).read()
self.component.size = (592, 517)
def on_cboWebservice_click(self, event):
self.webservice = self.components.cboWebservice.value
self.ws = None
self.token = None
self.sign = None
if self.webservice == "wsfev1":
self.ws = wsfev1.WSFEv1()
elif self.webservice == "wsfexv1":
self.ws = wsfexv1.WSFEXv1()
def on_btnAutenticar_click(self, event):
try:
if self.webservice in ('wsfe', ):
service = "wsfe"
elif self.webservice in ('wsfev1', ):
self.log("Conectando WSFEv1... " + wsfev1_url)
self.ws.Conectar("",wsfev1_url, proxy_dict, timeout=60, cacert=CACERT, wrapper=WRAPPER)
self.ws.Cuit = cuit
service = "wsfe"
elif self.webservice in ('wsfex', 'wsfexv1'):
self.log("Conectando WSFEXv1... " + wsfexv1_url)
self.ws.Conectar("",wsfexv1_url, proxy_dict, cacert=CACERT, wrapper=WRAPPER)
self.ws.Cuit = cuit
service = "wsfex"
else:
gui.alert('Debe seleccionar servicio web!', 'Advertencia')
return
self.log("Creando TRA %s ..." % service)
ws = wsaa.WSAA()
tra = ws.CreateTRA(service)
self.log("Frimando TRA (CMS) con %s %s..." % (str(cert),str(privatekey)))
cms = ws.SignTRA(str(tra),str(cert),str(privatekey))
self.log("Llamando a WSAA... " + wsaa_url)
ws.Conectar("", wsdl=wsaa_url, proxy=proxy_dict, cacert=CACERT, wrapper=WRAPPER)
self.log("Proxy: %s" % proxy_dict)
xml = ws.LoginCMS(str(cms))
self.log("Procesando respuesta...")
if xml:
self.token = ws.Token
self.sign = ws.Sign
if DEBUG:
self.log("Token: %s" % self.token)
self.log("Sign: %s" % self.sign)
elif self.token and self.sign:
self.log("Token: %s... OK" % self.token[:10])
self.log("Sign: %s... OK" % self.sign[:10])
if self.webservice in ("wsfev1", "wsfexv1"):
self.ws.Token = self.token
self.ws.Sign = self.sign
if xml:
gui.alert('Autenticado OK!', 'Advertencia')
else:
gui.alert(u'Respuesta: %s' % ws.XmlResponse, u'No se pudo autenticar: %s' % ws.Excepcion)
except SoapFault,e:
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def examinar(self):
filename = entrada
wildcard = ["Planillas Excel (*.xlsx)|*.xlsx",
"Archivos CSV (*.csv)|*.csv",
"Archivos XML (*.xml)|*.xml",
"Archivos TXT (*.txt)|*.txt",
"Archivos DBF (*.dbf)|*.dbf",
"Archivos JSON (*.json)|*.json",
]
if entrada.endswith("xml"):
wildcard.sort(reverse=True)
result = gui.open_file('Abrir', 'datos', filename, '|'.join(wildcard))
if not result:
return
self.paths = [result]
def on_menu_archivo_abrir_click(self, event):
self.examinar()
self.cargar()
def on_menu_archivo_cargar_click(self, event):
self.cargar()
def cargar(self):
try:
items = []
for fn in self.paths:
if fn.lower().endswith(".csv") or fn.lower().endswith(".xlsx"):
filas = formato_csv.leer(fn)
items.extend(filas)
elif fn.lower().endswith(".xml"):
regs = formato_xml.leer(fn)
items.extend(formato_csv.aplanar(regs))
elif fn.lower().endswith(".txt"):
regs = formato_txt.leer(fn)
items.extend(formato_csv.aplanar(regs))
elif fn.lower().endswith(".dbf"):
reg = formato_dbf.leer(conf_dbf, carpeta=os.path.dirname(fn))
items.extend(formato_csv.aplanar(reg.values()))
elif fn.lower().endswith(".json"):
regs = formato_json.leer(fn)
items.extend(formato_csv.aplanar(regs))
else:
self.error(u'Formato de archivo desconocido: %s', unicode(fn))
if len(items) < 2:
gui.alert(u'El archivo no tiene datos válidos', 'Advertencia')
# extraer los nombres de columnas (ignorar vacios de XLSX)
cols = items and [str(it).strip() for it in items[0] if it] or []
if DEBUG: print "Cols",cols
# armar diccionario por cada linea
items = [dict([(col,item[i]) for i, col in enumerate(cols)])
for item in items[1:]]
self.cols = cols
self.items = items
except Exception,e:
self.error(u'Excepción',unicode(e))
##raise
def on_menu_archivo_guardar_click(self, event):
filename = entrada
wildcard = ["Archivos CSV (*.csv)|*.csv", "Archivos XML (*.xml)|*.xml",
"Archivos TXT (*.txt)|*.txt", "Archivos DBF (*.dbf)|*.dbf",
"Archivos JSON (*.json)|*.json",
"Planillas Excel (*.xlsx)|*.xlsx",
]
if entrada.endswith("xml"):
wildcard.sort(reverse=True)
if self.paths:
path = self.paths[0]
else:
path = salida
result = gui.save_file(title='Guardar', filename=path,
wildcard='|'.join(wildcard))
if not result:
return
fn = result[0]
self.grabar(fn)
def grabar(self, fn=None):
try:
if fn is None and salida:
if salida.startswith("-") and self.paths:
fn = os.path.splitext(self.paths[0])[0] + salida
else:
fn = salida
elif not fn:
raise RuntimeError("Debe indicar un nombre de archivo para grabar")
if fn.lower().endswith(".csv") or fn.lower().endswith(".xlsx"):
formato_csv.escribir([self.cols] + [[item[k] for k in self.cols] for item in self.items], fn)
else:
regs = formato_csv.desaplanar([self.cols] + [[item[k] for k in self.cols] for item in self.items])
if fn.endswith(".xml"):
formato_xml.escribir(regs, fn)
elif fn.endswith(".txt"):
formato_txt.escribir(regs, fn)
elif fn.endswith(".dbf"):
formato_dbf.escribir(regs, conf_dbf, carpeta=os.path.dirname(fn))
elif fn.endswith(".json"):
formato_json.escribir(regs, fn)
else:
self.error(u'Formato de archivo desconocido', unicode(fn))
gui.alert(u'Se guardó con éxito el archivo:\n%s' % (unicode(fn),), 'Guardar')
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_btnAutorizar_click(self, event):
self.verifica_ws()
try:
ok = procesadas = rechazadas = 0
cols = self.cols
items = []
self.progreso(0)
selected = []
for i, item in self.get_selected_items():
kargs = item.copy()
selected.append(item)
kargs['cbt_desde'] = kargs['cbt_hasta'] = kargs ['cbt_numero']
for key in kargs:
if isinstance(kargs[key], basestring):
kargs[key] = kargs[key].replace(",",".")
if self.webservice == 'wsfev1':
encabezado = {}
for k in ('concepto', 'tipo_doc', 'nro_doc', 'tipo_cbte', 'punto_vta',
'cbt_desde', 'cbt_hasta', 'imp_total', 'imp_tot_conc', 'imp_neto',
'imp_iva', 'imp_trib', 'imp_op_ex', 'fecha_cbte',
'moneda_id', 'moneda_ctz'):
encabezado[k] = kargs[k]
for k in ('fecha_venc_pago', 'fecha_serv_desde', 'fecha_serv_hasta'):
if k in kargs:
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'tributo_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
desc = kargs[k % 'desc']
base_imp = kargs[k % 'base_imp']
alic = kargs[k % 'alic']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarTributo(id, desc, base_imp, alic, importe)
else:
break
for l in range(1,1000):
k = 'iva_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
base_imp = kargs[k % 'base_imp']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarIva(id, base_imp, importe)
else:
break
for l in range(1,1000):
k = 'cbte_asoc_%%s_%s' % l
if (k % 'tipo') in kargs:
tipo = kargs[k % 'tipo']
pto_vta = kargs[k % 'pto_vta']
nro = kargs[k % 'nro']
if id:
self.ws.AgregarCmpAsoc(tipo, pto_vta, nro)
else:
break
for l in range(1,1000):
k = 'opcional_%%s_%s' % l
if (k % 'id') in kargs:
op_id = kargs[k % 'id']
valor = kargs[k % 'valor']
if op_id:
self.ws.AgregarOpcional(op_id, valor)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.CAESolicitar()
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
gui.alert(self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
gui.alert(self.ws.Obs, u"Observación AFIP")
elif self.webservice == 'wsfexv1':
kargs['cbte_nro'] = kargs ['cbt_numero']
kargs['permiso_existente'] = kargs['permiso_existente'] or ""
encabezado = {}
for k in ('tipo_cbte', 'punto_vta', 'cbte_nro', 'fecha_cbte',
'imp_total', 'tipo_expo', 'permiso_existente', 'pais_dst_cmp',
'nombre_cliente', 'cuit_pais_cliente', 'domicilio_cliente',
'id_impositivo', 'moneda_id', 'moneda_ctz',
'obs_comerciales', 'obs_generales', 'forma_pago', 'incoterms',
'idioma_cbte', 'incoterms_ds'):
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'codigo%s' % l
if k in kargs:
codigo = kargs['codigo%s' % l]
ds = kargs['descripcion%s' % l]
qty = kargs['cantidad%s' % l]
umed = kargs['umed%s' % l]
precio = kargs['precio%s' % l]
importe = kargs['importe%s' % l]
bonif = kargs.get('bonif%s' % l)
self.ws.AgregarItem(codigo, ds, qty, umed, precio, importe, bonif)
else:
break
for l in range(1,1000):
k = 'cbte_asoc_%%s_%s' % l
if (k % 'tipo') in kargs:
tipo = kargs[k % 'tipo']
pto_vta = kargs[k % 'pto_vta']
nro = kargs[k % 'nro']
if id:
self.ws.AgregarCmpAsoc(tipo, pto_vta, nro)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.Authorize(kargs['id'])
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
gui.alert(self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
gui.alert(self.ws.Obs, u"Observación AFIP")
# actualizo la factura
for k in ('cae', 'fecha_vto', 'resultado', 'motivo', 'reproceso', 'err_code', 'err_msg'):
if kargs.get(k):
item[k] = kargs[k] if kargs[k] is not None else ""
self.items[i] = item
self.log(u"ID: %s CAE: %s Motivo: %s Reproceso: %s" % (kargs['id'], kargs['cae'], kargs['motivo'],kargs['reproceso']))
procesadas += 1
if kargs['resultado'] == "R":
rechazadas += 1
elif kargs['resultado'] == "A":
ok += 1
self.progreso(i)
self.items = self.items
self.set_selected_items(selected)
self.progreso(len(self.items) - 1)
gui.alert(u'Proceso finalizado, procesadas %d\n\n'
'Aceptadas: %d\n'
'Rechazadas: %d' % (procesadas, ok, rechazadas),
u'Autorización')
self.grabar()
except SoapFault, e:
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except KeyError, e:
self.error("Error",u'Campo obligatorio no encontrado: %s' % e)
except Exception, e:
self.error(u'Excepción',unicode(e))
finally:
if DEBUG:
if self.webservice == 'wsfev1' and DEBUG:
print self.ws.XmlRequest
print self.ws.XmlResponse
def on_btnAutorizarLote_click(self, event):
self.verifica_ws()
if not self.items: return
try:
#getcontext().prec = 2
ok = 0
rechazadas = 0
cols = self.cols
items = []
self.progreso(0)
cbt_desde = cbt_hasta = None
datos = {
'tipo_cbte': None,
'punto_vta': None,
'fecha_cbte': None,
'fecha_venc_pago': None,
'fecha_cbte': None,
'fecha_venc_pago': None,
'fecha_serv_desde': None,
'fecha_serv_hasta': None,
'moneda_id': None,
'moneda_ctz': None,
'id': None,
}
importes = {
'imp_total': Decimal(0),
'imp_tot_conc': Decimal(0),
'imp_neto': Decimal(0),
'imp_iva':Decimal(0),
'imp_op_ex': Decimal(0),
'imp_trib': Decimal(0),
}
for l in range(1,5):
k = 'iva_%%s_%s' % l
datos[k % 'id'] = None
importes[k % 'base_imp'] = Decimal(0)
importes[k % 'importe'] = Decimal(0)
for l in range(1,10):
k = 'tributo_%%s_%s' % l
datos[k % 'id'] = None
datos[k % 'desc'] = None
importes[k % 'base_imp'] = Decimal(0)
datos[k % 'alic'] = None
importes[k % 'importe'] = Decimal(0)
for i, item in self.get_selected_items():
if cbt_desde is None or int(item['cbt_numero']) < cbt_desde:
cbt_desde = int(item['cbt_numero'])
if cbt_hasta is None or int(item['cbt_numero']) > cbt_hasta:
cbt_hasta = int(item['cbt_numero'])
for key in item:
if key in datos:
if datos[key] is None:
datos[key] = item[key]
elif datos[key] != item[key]:
raise RuntimeError(u"%s tiene valores distintos en el lote!" % key)
if key in importes and item[key]:
importes[key] = importes[key] + Decimal("%.2f" % float(str(item[key].replace(",","."))))
kargs = {'cbt_desde': cbt_desde, 'cbt_hasta': cbt_hasta}
kargs.update({'tipo_doc': 99, 'nro_doc': '0'})
kargs.update(datos)
kargs.update(importes)
if kargs['fecha_serv_desde'] and kargs['fecha_serv_hasta']:
kargs['presta_serv'] = 1
kargs['concepto'] = 2
else:
kargs['presta_serv'] = 0
kargs['concepto'] = 1
del kargs['fecha_serv_desde']
del kargs['fecha_serv_hasta']
for key, val in importes.items():
importes[key] = val.quantize(Decimal('.01'), rounding=ROUND_DOWN)
if 'id' not in kargs or kargs['id'] == "":
id = long(kargs['cbt_desde'])
id += (int(kargs['tipo_cbte'])*10**4 + int(kargs['punto_vta']))*10**8
kargs['id'] = id
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in kargs.items()]))
if '--test' in sys.argv:
kargs['cbt_desde'] = 777
kargs['fecha_cbte'] = '20110802'
kargs['fecha_venc_pago'] = '20110831'
if gui.confirm("Confirma Lote:\n"
"Tipo: %(tipo_cbte)s Desde: %(cbt_desde)s Hasta %(cbt_hasta)s\n"
"Neto: %(imp_neto)s IVA: %(imp_iva)s Trib.: %(imp_trib)s Total: %(imp_total)s"
% kargs, "Autorizar lote:"):
if self.webservice == 'wsfev1':
encabezado = {}
for k in ('concepto', 'tipo_doc', 'nro_doc', 'tipo_cbte', 'punto_vta',
'cbt_desde', 'cbt_hasta', 'imp_total', 'imp_tot_conc', 'imp_neto',
'imp_iva', 'imp_trib', 'imp_op_ex', 'fecha_cbte',
'moneda_id', 'moneda_ctz'):
encabezado[k] = kargs[k]
for k in ('fecha_venc_pago', 'fecha_serv_desde', 'fecha_serv_hasta'):
if k in kargs:
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'iva_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
base_imp = kargs[k % 'base_imp']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarIva(id, base_imp, importe)
else:
break
for l in range(1,1000):
k = 'tributo_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
desc = kargs[k % 'desc']
base_imp = kargs[k % 'base_imp']
alic = kargs[k % 'alic']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarTributo(id, desc, base_imp, alic, importe)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.CAESolicitar()
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
gui.alert(self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
gui.alert(self.ws.Obs, u"Observación AFIP")
for i, item in self.get_selected_items():
for key in ('id', 'cae', 'fecha_vto', 'resultado', 'motivo', 'reproceso', 'err_code', 'err_msg'):
item[key] = kargs[key] if kargs[key] is not None else ""
self.items[i] = item
self.log("ID: %s CAE: %s Motivo: %s Reproceso: %s" % (kargs['id'], kargs['cae'], kargs['motivo'],kargs['reproceso']))
if kargs['resultado'] == "R":
rechazadas += 1
elif kargs['resultado'] == "A":
ok += 1
self.items = self.items # refrescar, ver de corregir
self.progreso(len(self.items))
gui.alert('Proceso finalizado OK!\n\nAceptadas: %d\nRechazadas: %d' % (ok, rechazadas), 'Autorización')
self.grabar()
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_btnPrevisualizar_click(self, event):
try:
j = 0
for i, item in self.get_selected_items():
j += 1
archivo = self.generar_factura(item, mostrar=(j==1))
except Exception, e:
print e
self.error(u'Excepción', unicode(str(e), 'latin1', 'ignore'))
def on_btnEnviar_click(self, event):
try:
ok = no = 0
self.progreso(0)
for i, item in self.get_selected_items():
if not item['cae'] in ("", "NULL"):
archivo = self.generar_factura(item)
if item.get('email'):
self.enviar_mail(item,archivo)
ok += 1
else:
no += 1
self.log("No se envia factura %s por no tener EMAIL" % item['cbt_numero'])
else:
self.log("No se envia factura %s por no tener CAE" % item['cbt_numero'])
no += 1
self.progreso(i)
self.progreso(len(self.items))
gui.alert('Proceso finalizado OK!\n\nEnviados: %d\nNo enviados: %d' % (ok, no), 'Envio de Email')
except Exception, e:
self.error(u'Excepción',unicode(e))
def generar_factura(self, fila, mostrar=False):
fepdf = FEPDF()
fact = formato_csv.desaplanar([self.cols] + [[item[k] for k in self.cols] for item in [fila]])[0]
fact['cbte_nro'] = fact['cbt_numero']
fact['items'] = fact['detalles']
for d in fact['datos']:
fepdf.AgregarDato(d['campo'], d['valor'], d['pagina'])
# por compatiblidad, completo campos anteriores
if d['campo'] not in fact and d['valor']:
fact[d['campo']] = d['valor']
fepdf.factura = fact
# convertir importe total en texto (palabras):
moneda_ds = {"PES": "PESOS", "DOL": "DOLAR EEUU"}.get(fact.get("moneda_id", ""), "")
fact["en_letras"] = "SON " + moneda_ds + " " + conv_text(float(fact["imp_total"]))
# cargo el formato CSV por defecto (factura.csv)
fepdf.CargarFormato(conf_fact.get("formato", "factura.csv"))
# establezco formatos (cantidad de decimales) según configuración:
fepdf.FmtCantidad = conf_fact.get("fmt_cantidad", "0.2")
fepdf.FmtPrecio = conf_fact.get("fmt_precio", "0.2")
# datos fijos:
fepdf.CUIT = cuit # CUIT del emisor para código de barras
for k, v in conf_pdf.items():
fepdf.AgregarDato(k, v)
fepdf.CrearPlantilla(papel=conf_fact.get("papel", "legal"),
orientacion=conf_fact.get("orientacion", "portrait"))
fepdf.ProcesarPlantilla(num_copias=int(conf_fact.get("copias", 1)),
lineas_max=int(conf_fact.get("lineas_max", 24)),
qty_pos=conf_fact.get("cant_pos") or 'izq')
salida = conf_fact.get("salida", "")
fact = fepdf.factura
if salida:
pass
elif 'pdf' in fact and fact['pdf']:
salida = fact['pdf']
else:
# genero el nombre de archivo según datos de factura
d = conf_fact.get('directorio', ".")
clave_subdir = conf_fact.get('subdirectorio','fecha_cbte')
if clave_subdir:
d = os.path.join(d, item[clave_subdir])
if not os.path.isdir(d):
os.mkdir(d)
fs = conf_fact.get('archivo','numero').split(",")
it = item.copy()
tipo_fact, letra_fact, numero_fact = fact['_fmt_fact']
it['tipo'] = tipo_fact.replace(" ", "_")
it['letra'] = letra_fact
it['numero'] = numero_fact
it['mes'] = item['fecha_cbte'][4:6]
it['año'] = item['fecha_cbte'][0:4]
# remover acentos, ñ del nombre de archivo (vía unicode):
fn = u''.join([unicode(it.get(ff,ff)) for ff in fs])
fn = unicodedata.normalize('NFKD', fn).encode('ASCII', 'ignore')
salida = os.path.join(d, "%s.pdf" % fn)
fepdf.GenerarPDF(archivo=salida)
if mostrar:
fepdf.MostrarPDF(archivo=salida,imprimir='--imprimir' in sys.argv)
return salida
def enviar_mail(self, item, archivo):
archivo = self.generar_factura(item)
if item['email']:
msg = MIMEMultipart()
msg['Subject'] = conf_mail['motivo'].replace("NUMERO",str(item['cbt_numero']))
msg['From'] = conf_mail['remitente']
msg['Reply-to'] = msg['From']
msg['To'] = item['email']
msg.preamble = 'Mensaje de multiples partes.\n'
if not 'html' in conf_mail:
part = MIMEText(conf_mail['cuerpo'])
msg.attach(part)
else:
alt = MIMEMultipart('alternative')
msg.attach(alt)
text = MIMEText(conf_mail['cuerpo'])
alt.attach(text)
# We reference the image in the IMG SRC attribute by the ID we give it below
html = MIMEText(conf_mail['html'], 'html')
alt.attach(html)
part = MIMEApplication(open(archivo,"rb").read())
part.add_header('Content-Disposition', 'attachment', filename=os.path.basename(archivo))
msg.attach(part)
try:
self.log("Enviando email: %s a %s" % (msg['Subject'], msg['To']))
if not self.smtp:
self.smtp = SMTP(conf_mail['servidor'], conf_mail.get('puerto', 25))
if conf_mail['usuario'] and conf_mail['clave']:
self.smtp.ehlo()
self.smtp.login(conf_mail['usuario'], conf_mail['clave'])
to = [msg['To']]
bcc = conf_mail.get('bcc', None)
if bcc:
to.append(bcc)
self.smtp.sendmail(msg['From'], to, msg.as_string())
except Exception,e:
self.error(u'Excepción',unicode(e))
if __name__ == '__main__':
if len(sys.argv)>1 and not sys.argv[1].startswith("-"):
CONFIG_FILE = sys.argv[1]
config = SafeConfigParser()
config.read(CONFIG_FILE)
if not len(config.sections()):
if os.path.exists(CONFIG_FILE):
gui.alert(u"Error al cargar archivo de configuración: %s" %
CONFIG_FILE, "PyRece: Imposible Continuar")
else:
gui.alert(u"No se encuentra archivo de configuración: %s" %
CONFIG_FILE, "PyRece: Imposible Continuar")
sys.exit(1)
cert = config.get('WSAA','CERT')
privatekey = config.get('WSAA','PRIVATEKEY')
cuit = config.get('WSFEv1','CUIT')
if config.has_option('WSFEv1','ENTRADA'):
entrada = config.get('WSFEv1','ENTRADA')
else:
entrada = ""
if not os.path.exists(entrada):
entrada = "facturas.csv"
if config.has_option('WSFEv1','SALIDA'):
salida = config.get('WSFEv1','SALIDA')
else:
salida = "resultado.csv"
if config.has_section('FACTURA'):
conf_fact = dict(config.items('FACTURA'))
else:
conf_fact = {}
conf_pdf = dict(config.items('PDF'))
conf_mail = dict(config.items('MAIL'))
if config.has_section('DBF'):
conf_dbf = dict(config.items('DBF'))
else:
conf_dbf = {}
if config.has_option('WSAA','URL') and not HOMO:
wsaa_url = config.get('WSAA','URL')
else:
wsaa_url = wsaa.WSAAURL
if config.has_option('WSFEv1','URL') and not HOMO:
wsfev1_url = config.get('WSFEv1','URL')
else:
wsfev1_url = wsfev1.WSDL
if config.has_option('WSFEXv1','URL') and not HOMO:
wsfexv1_url = config.get('WSFEXv1','URL')
else:
wsfexv1_url = wsfexv1.WSDL
CACERT = config.has_option('WSAA', 'CACERT') and config.get('WSAA', 'CACERT') or None
WRAPPER = config.has_option('WSAA', 'WRAPPER') and config.get('WSAA', 'WRAPPER') or None
DEFAULT_WEBSERVICE = "wsfev1"
if config.has_section('PYRECE'):
DEFAULT_WEBSERVICE = config.get('PYRECE','WEBSERVICE')
if config.has_section('PROXY'):
proxy_dict = dict(("proxy_%s" % k,v) for k,v in config.items('PROXY'))
proxy_dict['proxy_port'] = int(proxy_dict['proxy_port'])
else:
proxy_dict = {}
c = PyRece()
gui.main_loop()
| StarcoderdataPython |
5150655 | <reponame>pitzer42/telerem
import pytest
from unittest.mock import MagicMock
@pytest.fixture
def app():
return MagicMock()
def test_smoke(app):
assert app.events.on() | StarcoderdataPython |
3461258 | # 49. Group Anagrams
# https://leetcode.com/problems/group-anagrams
import unittest
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
dic = {}
for str in strs:
sorted_str = tuple(sorted(str))
if sorted_str not in dic:
dic[sorted_str] = [str]
else:
dic[sorted_str].append(str)
return list(dic.values())
class TestGroupAnagram(unittest.TestCase):
def test(self):
sol = Solution()
is_valid = True
result = sol.groupAnagrams(['eat', 'tea', 'tan', 'ate', 'nat', 'bat'])
true_set = [
['ate', 'eat', 'tea'],
['nat', 'tan'],
['bat']
]
for s in result:
s = sorted(s)
if s not in true_set:
is_valid = False
self.assertTrue(is_valid)
if __name__ == '__main__':
unittest.TestCase()
| StarcoderdataPython |
8083047 | <reponame>hamzamgit/pinax-teams
from django import template
from pinax.invitations.forms import InviteForm
from pinax.invitations.models import InvitationStat
register = template.Library()
@register.inclusion_tag("pinax/invitations/_invites_remaining.html")
def invites_remaining(user):
try:
remaining = user.invitationstat.invites_remaining()
except InvitationStat.DoesNotExist:
remaining = 0
return {"invites_remaining": remaining}
@register.inclusion_tag("pinax/invitations/_invite_form.html")
def invite_form(user):
return {"form": InviteForm(user=user), "user": user}
@register.inclusion_tag("pinax/invitations/_invited.html")
def invites_sent(user):
return {"invited_list": user.invites_sent.all()}
@register.filter
def status_class(invite):
if invite.status == invite.STATUS_SENT:
return "sent"
elif invite.status == invite.STATUS_ACCEPTED:
return "accepted"
elif invite.status == invite.STATUS_JOINED_INDEPENDENTLY:
return "joined"
return ""
| StarcoderdataPython |
6404765 | import pandas as pd
import simpledorff
import json
import re
from lxml import etree
from django.shortcuts import render, redirect, reverse
from django.http import Http404, JsonResponse
from django.contrib import messages
from django.contrib.auth.models import User, Group
from django.contrib.auth.decorators import login_required
from tag_sentenze.views import assign_doc_to_user, remove_doc_from_user
from .forms import UserRegisterForm, TaskModelForm, AddJudgmentsForm, ParseXMLForm
from .models import Tagging, TaggingTask, Profile
from tag_sentenze.models import Judgment, Schema, Task
from django.db.models import Count
@login_required
def register(request):
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
form = UserRegisterForm()
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
# Add by default the new user to the Annotators Group
annotators = Group.objects.get(name='Annotators')
annotators.user_set.add(User.objects.get(username=username))
return redirect('/')
return render(request, 'users/register.html', context={'form': form})
else:
return render(request, 'users/no_permission.html')
@login_required
def home_permission(request):
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
# get all users from Annotators group
annotators = User.objects.filter(groups__name='Annotators').all()
# print(annotators)
context = {
'lista_users': annotators,
'sentenze': Judgment.objects.all()
}
return render(request, 'users/home_permission.html', context=context)
else:
return redirect('tag_sentenze:my-tasks')
@login_required
def user_permission(request, id):
try:
user = User.objects.get(pk=id)
except User.DoesNotExist:
raise Http404("The selected user doesn't exist")
# contains a list of all the sentenze the selected user has access to
permission_list = user.profile.taggings.all()
# contains the rest of the sentenze (all sentenze - selected user sentenze)
all_sentenze = Judgment.objects.all()
for elem in all_sentenze:
if elem in permission_list:
all_sentenze = all_sentenze.exclude(pk=elem.id)
context = {
'permission': permission_list,
'selected_user': user,
'lista_users': User.objects.filter(groups__name='Annotators').all(),
'sentenze': all_sentenze
}
return render(request, 'users/permission_page.html', context=context)
@login_required
def add_permission(request, utente):
data = {}
if request.method == 'POST' and request.is_ajax:
data['user'] = request.POST.get('selected_user')
data['sentenza'] = request.POST.get('selected_sentenza')
# print(data)
# get all the sentenze
sentenze = Judgment.objects.values_list('id', flat=True)
# print(sentenze)
# add a new sentenza to the permission of the selected user
selected_user = User.objects.get(username=data['user'])
selected_sentenza = Judgment.objects.get(id=data['sentenza'])
permission_list = selected_user.profile.taggings.all()
if selected_sentenza not in permission_list:
selected_user.profile.taggings.add(selected_sentenza)
# print(selected_user.profile.taggings.all())
selected_user.save()
return JsonResponse({'response': data}, status=200)
# return user_permission(request, selected_user.id)
@login_required
def remove_permission(request, utente):
data = {}
if request.method == 'POST' and request.is_ajax:
data['user'] = request.POST.get('selected_user')
data['sentenza'] = request.POST.get('selected_sentenza')
# print(data)
# remove the selected sentenza to the permission of the selected user
selected_user = User.objects.get(username=data['user'])
selected_sentenza = Judgment.objects.get(id=data['sentenza'])
permission_list = selected_user.profile.taggings.all()
if selected_sentenza in permission_list:
selected_user.profile.taggings.remove(selected_sentenza)
# print(selected_user.profile.taggings.all())
selected_user.save()
return JsonResponse({'response': data}, status=200)
@login_required
def add_permission_list(request, utente):
data = {}
if request.method == 'POST' and request.is_ajax:
data['user'] = request.POST.get('selected_user')
data['sentenze'] = request.POST.get('selected_sentenze')
# print(data)
# get all the sentenze
sentenze = Judgment.objects.values_list('id', flat=True)
# print(sentenze)
# add a new list of sentenze to the permission of the selected user
selected_user = User.objects.get(username=data['user'])
selected_sentenze = json.loads(data['sentenze'])
# print(selected_sentenze)
permission_list = selected_user.profile.taggings.all()
for elem in selected_sentenze:
tmp_sentenza = Judgment.objects.get(id=elem)
# print(tmp_sentenza.id)
if tmp_sentenza not in permission_list:
selected_user.profile.taggings.add(tmp_sentenza)
# print(selected_user.profile.taggings.all())
selected_user.save()
return JsonResponse({'response': data}, status=200)
@login_required
def remove_permission_list(request, utente):
data = {}
if request.method == 'POST' and request.is_ajax:
data['user'] = request.POST.get('selected_user')
data['sentenze'] = request.POST.get('selected_sentenze')
# print(data)
# get all the sentenze
sentenze = Judgment.objects.values_list('id', flat=True)
# print(sentenze)
# remove a list of sentenze from the permission of the selected user
selected_user = User.objects.get(username=data['user'])
selected_sentenze = json.loads(data['sentenze'])
# print(selected_sentenze)
permission_list = selected_user.profile.taggings.all()
for elem in selected_sentenze:
tmp_sentenza = Judgment.objects.get(id=elem)
# print(tmp_sentenza.id)
if tmp_sentenza in permission_list:
selected_user.profile.taggings.remove(tmp_sentenza)
# print(selected_user.profile.taggings.all())
selected_user.save()
return JsonResponse({'response': data}, status=200)
@login_required
def home_judgment_schema(request):
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
# get all users from Annotators group
schemas = Schema.objects.all()
# print(schemas)
context = {
'lista_schemas': schemas,
'sentenze': Judgment.objects.all()
}
return render(request, 'users/home_sentenza_schema.html', context=context)
else:
return redirect('tag_sentenze:my-tasks')
@login_required
def join_schema(request, id):
try:
schema = Schema.objects.get(pk=id)
except User.DoesNotExist:
#raise Http404("The selected user doesn't exist")
schema = Schema.objects.get(pk=1)
# contains a list of all the sentenze related to the current selected schema
sentenze_list = Judgment.objects.filter(xsd=schema)
all_judgments = Judgment.objects.all()
# contains the rest of the sentenze (all sentenze - selected user sentenze)
for elem in all_judgments:
if elem in sentenze_list:
all_judgments = all_judgments.exclude(pk=elem.id)
context = {
'sentenze_schema': sentenze_list,
'selected_schema': schema,
'schema_id': schema.id,
'lista_schema': Schema.objects.all(),
'sentenze': all_judgments
}
return render(request, 'users/sentenza_schema.html', context=context)
@login_required
def add_sentenza_schema(request, schema):
data = {}
if request.method == 'POST' and request.is_ajax:
data['schema'] = request.POST.get('selected_schema')
data['sentenza'] = request.POST.get('selected_sentenza')
# print(data)
# add the schema to the selected judgment
selected_schema = Schema.objects.get(id=data['schema'])
selected_sentenza = Judgment.objects.get(id=data['sentenza'])
selected_sentenza.xsd = selected_schema
selected_sentenza.save()
return JsonResponse({'response': data}, status=200)
@login_required
def remove_sentenza_schema(request, schema):
data = {}
if request.method == 'POST' and request.is_ajax:
data['schema'] = request.POST.get('selected_schema')
data['sentenza'] = request.POST.get('selected_sentenza')
# print(data)
# remove the schema to the selected judgment
selected_schema = Schema.objects.get(id=data['schema'])
selected_sentenza = Judgment.objects.get(id=data['sentenza'])
selected_sentenza.xsd = None
selected_sentenza.save()
return JsonResponse({'response': data}, status=200)
@login_required
def add_sentenza_schema_list(request, schema):
data = {}
if request.method == 'POST' and request.is_ajax:
data['schema'] = request.POST.get('selected_schema')
data['sentenze'] = request.POST.get('selected_sentenze')
# add a schema to a list of selected judgments
selected_schema = Schema.objects.get(id=data['schema'])
selected_sentenze = json.loads(data['sentenze'])
for sentenza in selected_sentenze:
sel_sent = Judgment.objects.get(id=sentenza)
sel_sent.xsd = selected_schema
sel_sent.save()
return JsonResponse({'response': data}, status=200)
@login_required
def remove_sentenza_schema_list(request, schema):
data = {}
if request.method == 'POST' and request.is_ajax:
data['schema'] = request.POST.get('selected_schema')
data['sentenze'] = request.POST.get('selected_sentenze')
# add a schema to a list of selected judgments
selected_schema = Schema.objects.get(id=data['schema'])
selected_sentenze = json.loads(data['sentenze'])
for sentenza in selected_sentenze:
sel_sent = Judgment.objects.get(id=sentenza)
sel_sent.xsd = None
sel_sent.save()
return JsonResponse({'response': data}, status=200)
# Agreement code
def calc_agreement(task_id, judgment_id):
#print('------------------ CALCOLO AGREEMENT -------------------')
# Creation of a list with all judgment tokens
judgment = Judgment.objects.get(id=judgment_id)
judgment_text = judgment.initial_text
judgment_tokens = judgment_text.split(' ')
# dictionary with token-value pairs
tokens_value = {}
# assign a integer value to every judgment's token
for i in range(len(judgment_tokens)):
tokens_value[judgment_tokens[i]] = i+1
# Get the schema associated to the Judgment and create dictionary with all the tags
print(task_id)
schema = Task.objects.get(id=task_id).xsd.schema_file
tree = etree.ElementTree(file=schema)
tags = tree.xpath("//xsd:element/@name",
namespaces={"xsd": "http://www.w3.org/2001/XMLSchema"})
tags_value = {}
for i in range(len(tags)):
tags_value[tags[i]] = i+1
# Data structure for calculate the agreement
data = {'unit_id': [],
'annotator_id': [],
'annotation': []}
# flag to see if at least two or more users has a token manager not empty
flag = 0
# For every token manager get the list of word-label pair as [[WORD: LABEL], [WORD2: LABEL]]
for elem in TaggingTask.objects.all():
if elem.judgment.id == judgment_id and elem.task.id == task_id:
tm = elem.token_manager
user = elem.user
if tm != ' ' and tm != '':
# increment flag
flag = flag + 1
# Double convertion to dict
tm = json.loads(json.loads(tm))
# Get only tokens
tokens = tm['tokens']
# list of word-label pair as [{WORD: LABEL}, {WORD2: LABEL}]
words = []
while tokens:
t = tokens.pop(0)
if isinstance(t, str):
words.append(tuple([t, None]))
elif t['type'] == "token":
words.append(tuple([t['text'], None]))
else:
label = t['label']
for child in t['tokens']:
# if child is token block, insert again in tokens
if child['type'] == 'token-block':
tokens.insert(0, child)
continue
single_token = child['text']
# insert directly the corrispondent value for every label/tag and token
words.append(
tuple([single_token, tags_value[label]]))
# Insert data into the structure for agreement
for pair in words:
#print('Pair: ', pair)
data['unit_id'].append(pair[0])
data['annotator_id'].append(str(user))
data['annotation'].append(pair[1])
# return None if flag is < 2
if flag < 2:
return None
# Calculate agreement with simpledorff package and pandas
Data = pd.DataFrame(data)
try:
agreement = simpledorff.calculate_krippendorffs_alpha_for_df(Data, experiment_col='unit_id',
annotator_col='annotator_id',
class_col='annotation')
except ZeroDivisionError:
agreement = 1
return agreement
# Receive the POST request to calculate the agreement score on a specific judgment
@login_required
def agreement_post(request, task_id, jud_id):
current_user = request.user
if not current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
return redirect('tag_sentenze:my-tasks')
if request.method == 'POST' and request.is_ajax:
# Agreement value
score = calc_agreement(task_id, jud_id,)
# Save score on the Database if it isn't None
if score != None:
score = round(score, 2)
# judgment.score = score
# judgment.save()
else:
score = 'x'
# print(score)
return JsonResponse({'response': score}, status=200)
@login_required
def list_tasks_agreement(request):
current_user = request.user
tasks = Task.objects.annotate(n_docs=Count(
'judgments', distinct=True), n_users=Count('users', distinct=True)).values()
# admins and editors have access to all taggings
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
context = {
'tasks': tasks
}
print(context)
return render(request, 'users/list_tasks_agreement.html', context=context)
# annotators don't
else:
# print('Annotator access')
return redirect('tag_sentenze:my-tasks')
"""
# Create table in template with judgments as rows and users as columns
"""
@login_required
def list_taggings_agreement(request, id):
current_user = request.user
if not current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
return redirect('tag_sentenze:my-tasks')
task = Task.objects.get(id=id)
judgments = task.judgments.all()
users = task.users.all()
rows = []
# for every row there must be the judgment id, judgment name, agreement, status of judgment-user
# Example: [JudgmentID, JudgmentName, Agreement, 1, 0] if first user has the judgment associated to, 0 otherwise
# In Django template the first value (JudgmentID) must be skipped during the insertion loop into the table
for judgment in judgments:
single_row = []
single_row.append(judgment.id)
single_row.append(judgment.name)
# Add agreement score from database
# agreement = judgment.score
# # print("Agreement judgment {}: {}".format(judgment.id, agreement))
# if agreement == None:
single_row.append('-')
# else:
# single_row.append(agreement)
for user in users:
if TaggingTask.objects.get(user=user, judgment=judgment, task=task).completed:
single_row.append(2)
else:
single_row.append(1)
rows.append(single_row)
# print(rows)
context = {
'users': users,
'judgments': judgments,
'rows': rows,
'task_id': id,
}
return render(request, 'users/list_taggings_agreement.html', context=context)
@login_required
def manage_users(request):
#user = User.objects.all()
user = User.objects.exclude(groups__name__in=["Admins", "Editors"])
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
form = UserRegisterForm()
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
# Add by default the new user to the Annotators Group
annotators = Group.objects.get(name='Annotators')
annotators.user_set.add(User.objects.get(username=username))
return redirect('/')
return render(request, 'users/manage_users.html', context={'form': form, 'users': user})
else:
return render(request, 'users/no_permission.html')
@login_required
def update_user(request, id):
user = User.objects.get(id=id)
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
form = UserRegisterForm(instance=user)
if request.method == 'POST':
form = UserRegisterForm(request.POST, instance=user)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
# Add by default the new user to the Annotators Group
annotators = Group.objects.get(name='Annotators')
annotators.user_set.add(User.objects.get(username=username))
return redirect(reverse('users:manage-users'))
return render(request, 'users/update_user.html', context={'form': form, 'users': user})
else:
return render(request, 'users/no_permission.html')
@login_required
def delete_user(request, id):
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
try:
user = User.objects.get(id=id)
except User.DoesNotExist:
raise Http404()
user.delete()
messages.warning(request, ("User deleted"))
else:
messages.warning(request, ("You are not authorized"))
return redirect(reverse('users:manage-users'))
@login_required
def manage_schemas(request):
schemas = Schema.objects.all()
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
return render(request, 'users/manage_schemas.html', context={'schemas': schemas})
else:
return render(request, 'users/no_permission.html')
@login_required
def add_schemas(request):
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
# print("Admin/Editor access")
if request.method == 'POST':
# Get all the file uploaded
schema_files = request.FILES.getlist('schemas')
for schema in schema_files:
# print(schema)
new_schema = Schema.objects.create(
schema_file=schema,
)
new_schema.save()
# redirect home
return redirect(reverse('users:manage-schemas'))
return render(request, 'users/add_schemas.html')
else:
# print('Annotator access')
return redirect('tag_sentenze:my-tasks')
@login_required
def delete_schema(request, id):
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
try:
schema = Schema.objects.get(id=id)
except Schema.DoesNotExist:
raise Http404()
schema.delete()
messages.warning(request, ("Schema deleted"))
else:
messages.warning(request, ("You are not authorized"))
return redirect(reverse('users:manage-schemas'))
@login_required
def view_schema(request, id):
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
try:
schema = Schema.objects.get(id=id)
except Schema.DoesNotExist:
raise Http404()
context = {'schema_text': schema.tags}
return render(request, 'users/view_schema.html', context=context)
else:
return redirect(reverse('tag_sentenze:my-tasks'))
@login_required
def manage_juds(request):
juds = Judgment.objects.all()
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
return render(request, 'users/manage_juds.html', context={'juds': juds})
else:
return render(request, 'users/no_permission.html')
@login_required
def add_judgments(request):
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
form = AddJudgmentsForm()
# print("Admin/Editor access")
if request.method == 'POST':
form = AddJudgmentsForm(request.POST)
if form.is_valid():
# Get all the file uploaded
judgment_files = request.FILES.getlist('judgments')
task = form.cleaned_data['task']
for judgment in judgment_files:
# print(str(judgment))
new_judg = Judgment.objects.create(
judgment_file=judgment,
)
new_judg.save()
task.judgments.add(new_judg)
# auto assign the new uploaded judgments to all Users of this Task
for user in task.users.all():
assign_doc_to_user(task.id, new_judg.id, user.id)
# redirect home
return redirect(reverse('users:manage-juds'))
# add the CoicheField with the schemas
context = {
'form': form,
}
return render(request, 'users/add_judgments.html', context=context)
else:
# print('Annotator access')
return redirect('tag_sentenze:my-tasks')
@login_required
def delete_judgment(request, id):
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
try:
judgment = Judgment.objects.get(id=id)
except Judgment.DoesNotExist:
raise Http404()
judgment.delete()
messages.warning(request, ("Judgment deleted"))
else:
messages.warning(request, ("You are not authorized"))
return redirect(reverse('users:manage-juds'))
@login_required
def view_judgment(request, id):
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
try:
jud = Judgment.objects.get(id=id)
except Schema.DoesNotExist:
raise Http404()
context = {'jud_text': jud.initial_text.replace("<br/>", "\n")}
return render(request, 'users/view_judgment.html', context=context)
else:
return redirect(reverse('tag_sentenze:my-tasks'))
@login_required
def manage_tasks(request):
tasks = Task.objects.all()
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
return render(request, 'users/manage_tasks.html', context={'tasks': tasks})
else:
return render(request, 'users/no_permission.html')
@login_required
def new_task(request):
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
form = TaskModelForm(initial={'owner': current_user})
if request.method == 'POST':
form = TaskModelForm(request.POST)
if form.is_valid():
task = form.save()
# assign every Doc in the Task to every User in it
for user in form.cleaned_data['users']:
for judgment in form.cleaned_data['judgments']:
assign_doc_to_user(task.id, judgment.id, user.id)
return redirect(reverse('users:manage-tasks'))
return render(request, 'users/create_task.html', context={'form': form})
else:
return render(request, 'users/no_permission.html')
@login_required
def update_task(request, id):
old_task = Task.objects.get(id=id)
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
form = TaskModelForm(instance=old_task)
if request.method == 'POST':
form = TaskModelForm(request.POST, instance=old_task)
if form.is_valid():
# assignments obtained by analizing old and new pairs (user-doc)
old_users = list(old_task.users.all())
old_docs = list(old_task.judgments.all())
old_pairs = [(user, doc)
for user in old_users for doc in old_docs]
# print("old pairs::: ", old_pairs)
new_users = list(form.cleaned_data['users'])
new_docs = list(form.cleaned_data['judgments'])
new_pairs = [(user, doc)
for user in new_users for doc in new_docs]
# print("new pairs::: ", new_pairs)
pairs_to_remove = [p for p in old_pairs if p not in new_pairs]
# print("pairs to remove ::: ", pairs_to_remove)
for (user, doc) in pairs_to_remove:
remove_doc_from_user(old_task.id, doc.id, user.id)
pairs_to_add = [p for p in new_pairs if p not in old_pairs]
# print("pairs to add ::: ", pairs_to_add)
for (user, doc) in pairs_to_add:
assign_doc_to_user(old_task.id, doc.id, user.id)
form.save()
return redirect(reverse('users:manage-tasks'))
return render(request, 'users/update_task.html', context={'form': form, 'tasks': old_task})
else:
return render(request, 'users/no_permission.html')
@login_required
def delete_task(request, id):
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
try:
task = Task.objects.get(id=id)
except Task.DoesNotExist:
raise Http404()
task.delete()
messages.warning(request, ("Task deleted"))
else:
messages.warning(request, ("You are not authorized"))
return redirect(reverse('users:manage-tasks'))
@login_required
def parse_xml(request):
# check if current user belongs to Editor or Admin Group
current_user = request.user
if current_user.groups.filter(name__in=['Editors', 'Admins']).exists():
form = ParseXMLForm()
if request.method == 'POST':
# Create a form instance and populate it with data from the request
form = ParseXMLForm(request.POST, request.FILES)
# Check if the form input is valid:
if form.is_valid():
# build xml string from file
xml_string = b""
f = request.FILES["xml_file"]
for chunk in f.chunks():
xml_string += chunk
# get the schema from the task
task_id = form.data['task']
task = Task.objects.get(id=task_id)
print(task)
schema = task.xsd
# DO NOT validate xml-xsd... ACCEPT ALSO INVALID ONES
# also we need to save the judgment (with original text)
# add initial tag
xml_string = xml_string.decode("utf-8").strip()
if not xml_string.startswith("<sentag>"):
xml_string = "<sentag>" + xml_string + "</sentag>"
# transformations to get the original text
# remove the tags
notags = re.sub('<.*?>', '', xml_string)
# replace the \r\n with <br/>
notags = " <br/> ".join(notags.splitlines())
# remove multi space
notags = " ".join(notags.split())
# remove excessive \n
notags = re.sub(r'(<br/> *){3,}', "<br/> <br/> ", notags)
# remove leading <br/>
if notags[:6] == "<br/> ":
notags = notags[6:]
# print(notags)
# save judgment without original text
judgment = Judgment.objects.create(
judgment_file=request.FILES["xml_file"], initial_text=notags)
# create tagging objects for the new doc-task (for each user in the task)
for user in task.users.all():
assign_doc_to_user(
task.id, judgment.id, user.id, xml_string) # also save the original xml text in the tagging
# add the docs to the Task
task.judgments.add(judgment)
task.save()
# TODO and then load the tagging interface
# return redirect(reverse("tag_sentenze:tag-sentenza", kwargs={"id": tagging_id, "htbp": 1}))
messages.success(
request, ("Annotated document added to the Task"))
return redirect(reverse("users:parse-xml"))
# either schema not valid or GET request -> re-display form
context = {
'form': form,
}
return render(request, 'tag_sentenze/parse_xml.html', context=context)
else: # no permission
return redirect('tag_sentenze:my-tasks')
| StarcoderdataPython |
3242851 | class Tarea:
def __init__(self, args = None, resultados = None):
if args is None:
args = {}
if resultados is None:
resultados = {}
self.args = args
self.resultados = resultados
| StarcoderdataPython |
4835810 | <reponame>super-resolution/Impro
"""
"""
from impro.data.image_factory import ImageFactory
from impro.analysis.filter import Filter
from impro.analysis.analysis_facade import *
import os
def setting_1():
# Create and prepare SIM image
image = ImageFactory.create_image_file(
r"D:\asdf\3D Auswertung 22092016\20160919_SIM1\20160919_SIM_0824a_lh_1_Out_Channel Alignment.czi")
image_array = image.data[:, 8] / 2
image_array = np.clip(image_array[0], 0, 255)
image_array = (image_array).astype("uint8")[1000:2400, 200:1600]
image_array = np.flipud(np.fliplr(image_array))
image_array = np.fliplr(image_array)
# Create and prepare dSTORM data
storm = ImageFactory.create_storm_file(
r"D:\asdf\3D Auswertung 22092016\20160919_SIM1\20190920_3D_sample0824a_SIM0919_SIM1.txt")
indices = Filter.local_density_filter(storm.stormData, 100.0, 18)
storm_data = storm.stormData[indices]
return image_array, storm_data
def setting_2():
# Create and prepare SIM image
image = ImageFactory.create_image_file(r"D:\_3dSarahNeu\!!20170317\20170317_0308c_SIM9_Out_Channel Alignment.czi")
image_array = image.data[:, 1] / 6
image_array = np.clip(image_array[0], 0, 255)
image_array = np.flipud(np.fliplr(image_array))
image_array = (image_array).astype("uint8")[0:1400, 0:1400]
image_array = np.fliplr(image_array)
# Create and prepare dSTORM data
storm = ImageFactory.create_storm_file(
r"D:\_3dSarahNeu\!!20170317\trans_20170317_0308c_SIM9_RG_300_300_Z_coordinates_2016_11_23.txt")
indices = Filter.local_density_filter(storm.stormData, 100.0, 2)
storm_data = storm.stormData[indices]
return image_array, storm_data
def setting_3():
# Create and prepare SIM image
image = ImageFactory.create_image_file(r"D:\Microtuboli\Image2b_Al532_ex488_Structured Illumination.czi")
image_array = image.data[:, 3] / 6
image_array = np.clip(image_array[0], 0, 255)
image_array = np.flipud(image_array)
image_array = (image_array).astype("uint8")[0:1400, 0:1400]
image_array = np.fliplr(image_array)
# Create and prepare dSTORM data
storm = ImageFactory.create_storm_file(r"D:\Microtuboli\20151203_sample2_Al532_Tub_1.txt")
indices = Filter.local_density_filter(storm.stormData, 100.0, 18)
storm_data = storm.stormData[indices]
return image_array, storm_data
#i = image(r"D:\Microtuboli\Image2b_Al532_ex488_Structured Illumination.czi")
#y = i.data[:,0]/2
# i = image(r"D:\_3dSarahNeu\!!20170317\20170317_0308c_SIM10_Out_Channel Alignment.czi")
#data = storm(r"D:\Microtuboli\20151203_sample2_Al532_Tub_1.txt")
# todo: photon filter, z filter
# x = Filter.photon_filter(data.stormData,3000,99999)
y,storm_data = setting_1()
# import matplotlib.pyplot as plt
# from scipy.spatial import Voronoi, voronoi_plot_2d
# vor = Voronoi(storm_data[...,0:2])
# regions, vertices = voronoi_finite_polygons_2d(vor, radius=130)
#
# for region in regions:
# polygon = vertices[region]
# plt.fill(*zip(*polygon), alpha=0.4)
#
# #plt.plot(points[:,0], points[:,1], 'ko')
# plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
# plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
#
# plt.show()
# def find_optimal_alpha():
# bound = [0,5000,41500,43689]
# data = storm_data[np.where((storm_data[...,0]>bound[0])&(storm_data[...,0]<bound[1])&(storm_data[...,1]>bound[2])&(storm_data[...,1]<bound[3]))]
# #data = data[np.where(data[...,0]<5000)]
# im = create_storm(data)
# cv2.imshow("asdf",im)
# cv2.waitKey(0)
#
# points=[]
# for p in data:
# points.append(Point_2(np.int32(p[0]),np.int32(p[1])))
# print("Created_points")
# alpha = Alpha_shape_2(10,points)
# print("alpha =",np.sqrt(alpha.find_optimal_alpha(np.int64(1))))
#find_optimal_alpha()
im = create_alpha_shape(storm_data, 130)
"""render magenta edge image"""
# im = cv2.cvtColor(im, cv2.COLOR_RGBA2GRAY)
# im = cv2.blur(im, (4, 4))
# canny = cv2.Canny(im, 130, 200)
# canny = np.fliplr(canny)
# canny = cv2.cvtColor(canny, cv2.COLOR_GRAY2RGB)
# canny[...,1] = 0
#
# cv2.imshow("canny", canny[1000:])
# cv2.waitKey(0)
"""end render"""
#cv2.imwrite(r"C:\Users\biophys\Desktop\Masterarbeit\src\abb\test\storm.jpg",storm_image)
#cv2.imshow("a",y.astype("uint8"))
#cv2.imshow("asdf",im.astype("uint8"))
#cv2.waitKey(0)
#buffer image
#cv2.imwrite(r"C:\Users\biophys\Desktop\Masterarbeit\src\abb\test\alpha2.jpg",im)
#im = cv2.imread(r"C:\Users\biophys\Desktop\Masterarbeit\src\abb\test_image.tif",-1)
#check alpha shape
#todo:resize images
#y =np.flipud(y).astype(np.uint8)
#y = (y).astype("uint8")[0:1700,0:1700]
#cv2.imwrite(r"C:\Users\biophys\Desktop\Masterarbeit\src\abb\test\SIM2.jpg",y)
#cv2.imshow("microSIM", y)
#cv2.waitKey(0)
col = int(im.shape[1]/200)
row = int(im.shape[0]/200)
#cv2.imwrite(r"C:\Users\biophys\Desktop\Masterarbeit\src\abb\weighting\image.png",im.astype(np.uint8))
#cv2.waitKey(0)
#norm_source = np.linalg.norm(cv2.cvtColor(im,cv2.COLOR_RGBA2GRAY))
#norm_target = np.linalg.norm(y)
#y = (y.astype(np.float32) * (norm_source/norm_target))
#y = np.clip(y, 0, 255)
points1,points2,z,t_list = find_mapping(y.astype(np.uint8), im,n_row=row,n_col=col)
#print(z.max())
#cv2.imwrite(r"C:\Users\biophys\Desktop\Masterarbeit\src\abb\weighting\not_weighted.png",z.astype(np.uint8))
#cv2.imshow("sdgag", z.astype(np.uint8))
#cv2.imshow("y",y)
#cv2.waitKey(0)
#cv2.imwrite(r"C:\Users\biophys\Desktop\Masterarbeit\src\abb\Hough_complete_.jpg",z)
p1, p2 = error_management(t_list, points1, points2,n_row = row)
M = transform.estimate_transform("affine",p1,p2)
def test_transformation_accuracy(offset, source_image):
point1, point2, z, t_lis = find_mapping(y.astype(np.uint8), source_image, n_row=5, n_col=5, offset=offset)
p1, p2 = error_management(t_lis, point1, point2, n_row=5)
for coord in p2:
for i in range(10):
for j in range(10):
z[int(coord[1]+i),int(coord[0]+j)]=np.array([1,0,0,1])*255
T =transform.estimate_transform("affine", p1, p2)
coeff = pearson_correlation(y,cv2.cvtColor(source_image, cv2.COLOR_RGBA2GRAY), T)[0]
return T,z,coeff
def evaluate():
for j in range(20):
#try:
alpha = 20+10*j
im = create_alpha_shape(storm_data, alpha)
translation = []
for i in range(10):
try:
M,z,coeff = test_transformation_accuracy(i*10, im)
translation.append(np.array([M.translation[0],M.translation[1],M.rotation,M.shear, coeff]))
except Exception as error:
print('Caught this error: ' + repr(error))
z[z>255] = 0
results = np.asarray(translation)
np.savetxt(os.getcwd()+r"\test_files\results_3\alpha_"+str(alpha)+"txt", results)
mean_x = np.mean(results[...,0])
print("error_x",np.mean(results[...,0]),np.mean(np.abs(results[...,0]-mean_x)),"std", np.std(results[...,0]))
mean_y = np.mean(results[...,1])
print("error_y",np.mean(results[...,1]), np.mean(np.abs(results[...,1]-mean_y)),"std", np.std(results[...,1]))
print("error_rot",np.mean(results[...,2]),"std", np.std(results[...,2]))
print("error_shear",np.mean(results[...,3]),"std", np.std(results[...,3]))
#except:
# print("fail...")
return M,z,im
#M,z,im = evaluate()
pearsonRGB = cv2.cvtColor(y, cv2.COLOR_GRAY2RGB)
pearson_correlation(y, cv2.cvtColor(im, cv2.COLOR_RGBA2GRAY), M)
color_warp = np.zeros_like(pearsonRGB)
color_warp[0:im.shape[0], 0:im.shape[1]] = cv2.cvtColor(im, cv2.COLOR_RGBA2RGB)
dst = transform.warp(color_warp,inverse_map=M.inverse)*255
dst = dst
cv2.imshow("res", dst)
added = dst.astype(np.uint16) + pearsonRGB.astype(np.uint16)
added[added>255] = 0
cv2.imshow("added", added.astype(np.uint8))
#cv2.imwrite(r"C:\Users\biophys\Desktop\Masterarbeit\src\abb\aligned\raw\microtub.tif",added.astype(np.uint8))
cv2.imshow("sdgag", z.astype(np.uint8))# img.astype("uint8"))
cv2.waitKey(0)
#test data1
#Pearson :0.324,ld100;18;alpha130
#test data2
#slice 0; ld100;5;alpha 100
#test data3
#slice 4; ld100; 18; alpha 130
#4541111115632106 | StarcoderdataPython |
8196977 | <reponame>ezequielramos/oci-python-sdk<filename>src/oci/certificates_management/models/update_root_ca_by_generating_internally_config_details.py
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_certificate_authority_config_details import UpdateCertificateAuthorityConfigDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateRootCaByGeneratingInternallyConfigDetails(UpdateCertificateAuthorityConfigDetails):
"""
The details for updating a private root certificate authority (CA).
Note: This operation automatically rotates the private key.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateRootCaByGeneratingInternallyConfigDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.certificates_management.models.UpdateRootCaByGeneratingInternallyConfigDetails.config_type` attribute
of this class is ``ROOT_CA_GENERATED_INTERNALLY`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param config_type:
The value to assign to the config_type property of this UpdateRootCaByGeneratingInternallyConfigDetails.
Allowed values for this property are: "ROOT_CA_GENERATED_INTERNALLY", "SUBORDINATE_CA_ISSUED_BY_INTERNAL_CA"
:type config_type: str
:param version_name:
The value to assign to the version_name property of this UpdateRootCaByGeneratingInternallyConfigDetails.
:type version_name: str
:param stage:
The value to assign to the stage property of this UpdateRootCaByGeneratingInternallyConfigDetails.
Allowed values for this property are: "CURRENT", "PENDING"
:type stage: str
:param validity:
The value to assign to the validity property of this UpdateRootCaByGeneratingInternallyConfigDetails.
:type validity: oci.certificates_management.models.Validity
"""
self.swagger_types = {
'config_type': 'str',
'version_name': 'str',
'stage': 'str',
'validity': 'Validity'
}
self.attribute_map = {
'config_type': 'configType',
'version_name': 'versionName',
'stage': 'stage',
'validity': 'validity'
}
self._config_type = None
self._version_name = None
self._stage = None
self._validity = None
self._config_type = 'ROOT_CA_GENERATED_INTERNALLY'
@property
def validity(self):
"""
Gets the validity of this UpdateRootCaByGeneratingInternallyConfigDetails.
:return: The validity of this UpdateRootCaByGeneratingInternallyConfigDetails.
:rtype: oci.certificates_management.models.Validity
"""
return self._validity
@validity.setter
def validity(self, validity):
"""
Sets the validity of this UpdateRootCaByGeneratingInternallyConfigDetails.
:param validity: The validity of this UpdateRootCaByGeneratingInternallyConfigDetails.
:type: oci.certificates_management.models.Validity
"""
self._validity = validity
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
8199947 | <reponame>bcmi220/srl_syn_pruning
import torch
import torch.nn as nn
class HighwayMLP(nn.Module):
def __init__(self,
input_size,
gate_bias=-2,
activation_function=nn.functional.relu,
gate_activation=nn.functional.softmax):
super(HighwayMLP, self).__init__()
self.activation_function = activation_function
self.gate_activation = gate_activation
self.normal_layer = nn.Linear(input_size, input_size)
self.gate_layer = nn.Linear(input_size, input_size)
self.gate_layer.bias.data.fill_(gate_bias)
def forward(self, x):
normal_layer_result = self.activation_function(self.normal_layer(x))
gate_layer_result = self.gate_activation(self.gate_layer(x),dim=0)
multiplyed_gate_and_normal = torch.mul(normal_layer_result, gate_layer_result)
multiplyed_gate_and_input = torch.mul((1 - gate_layer_result), x)
return torch.add(multiplyed_gate_and_normal,
multiplyed_gate_and_input)
class HighwayCNN(nn.Module):
def __init__(self,
input_size,
gate_bias=-1,
activation_function=nn.functional.relu,
gate_activation=nn.functional.softmax):
super(HighwayCNN, self).__init__()
self.activation_function = activation_function
self.gate_activation = gate_activation
self.normal_layer = nn.Linear(input_size, input_size)
self.gate_layer = nn.Linear(input_size, input_size)
self.gate_layer.bias.data.fill_(gate_bias)
def forward(self, x):
normal_layer_result = self.activation_function(self.normal_layer(x))
gate_layer_result = self.gate_activation(self.gate_layer(x))
multiplyed_gate_and_normal = torch.mul(normal_layer_result, gate_layer_result)
multiplyed_gate_and_input = torch.mul((1 - gate_layer_result), x)
return torch.add(multiplyed_gate_and_normal,
multiplyed_gate_and_input) | StarcoderdataPython |
12829285 | """
A suite of functions for finding sources in images.
:Authors: <NAME>, <NAME>
:License: :doc:`LICENSE`
"""
import sys
import math
import numpy as np
from scipy import signal, ndimage
import stsci.imagestats as imagestats
from . import cdriz
__all__ = ['gaussian1', 'gausspars', 'gaussian', 'moments', 'errfunc',
'findstars', 'apply_nsigma_separation', 'xy_round',
'precompute_sharp_round', 'sharp_round', 'roundness', 'immoments',
'nmoment', 'centroid', 'cmoment', 'central_moments', 'covmat',
'help', 'getHelpAsString']
#def gaussian(amplitude, xcen, ycen, xsigma, ysigma):
#from numpy import *
FWHM2SIG = 2*np.sqrt(2*np.log(2))
#def gaussian1(height, x0, y0, fwhm, nsigma=1.5, ratio=1., theta=0.0):
def gaussian1(height, x0, y0, a, b, c):
"""
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
a, b, c - ellipse parameters (coefficients in the quadratic form)
"""
return lambda x, y: height * np.exp(-0.5* (a*(x-x0)**2 + b*(x-x0)*(y-y0) + c*(y-y0)**2))
def gausspars(fwhm, nsigma=1.5, ratio=1, theta=0.):
"""
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
fwhm - full width at half maximum of the observation
nsigma - cut the gaussian at nsigma
ratio = ratio of xsigma/ysigma
theta - angle of position angle of the major axis measured
counter-clockwise from the x axis
Returns dimensions nx and ny of the elliptical kernel as well as the
ellipse parameters a, b, c, and f when defining an ellipse through the
quadratic form: a*(x-x0)^2+b(x-x0)*(y-y0)+c*(y-y0)^2 <= 2*f
"""
xsigma = fwhm / FWHM2SIG
ysigma = ratio * xsigma
f = nsigma**2/2.
theta = np.deg2rad(theta)
cost = np.cos(theta)
sint = np.sin(theta)
if ratio == 0: # 1D Gaussian
if theta == 0 or theta == 180:
a = 1/xsigma**2
b = 0.0
c = 0.0
elif theta == 90:
a = 0.0
b = 0.0
c = 1/xsigma**2
else:
print('Unable to construct 1D Gaussian with these parameters\n')
raise ValueError
nx = 2 * int(max(2, (xsigma*nsigma*np.abs(cost))))+1
ny = 2 * int(max(2, (xsigma*nsigma*np.abs(sint))))+1
else: #2D gaussian
xsigma2 = xsigma * xsigma
ysigma2 = ysigma * ysigma
a = cost**2/xsigma2 + sint**2/ysigma2
b = 2 * cost * sint *(1.0/xsigma2-1.0/ysigma2)
c = sint**2/xsigma2 + cost**2/ysigma2
d = b**2 - 4*a*c # discriminant
# nx = int(2*max(2, math.sqrt(-8*c*f/d)))+1
# ny = int(2*max(2, math.sqrt(-8*a*f/d)))+1
nx = 2 * int(2*max(1, nsigma*math.sqrt(-c/d)))+1
ny = 2 * int(2*max(1, nsigma*math.sqrt(-a/d)))+1
return nx, ny, a, b, c, f
def gaussian(height, center_x, center_y, width_x, width_y):
#Returns a gaussian function with the given parameters
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*exp(
-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
def moments(data,cntr):
"""
Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments.
"""
total = data.sum()
#X, Y = np.indices(data.shape)
#x = (X*data).sum()/total
#y = (Y*data).sum()/total
x,y = cntr
xi = int(x)
yi = int(y)
if xi < 0 or xi >= data.shape[1] or yi < 0 or yi >= data.shape[0]:
raise ValueError
col = data[:, xi]
width_x = np.sqrt(abs(((np.arange(col.size)-y)**2*col).sum()/col.sum()))
row = data[yi, :]
width_y = np.sqrt(abs(((np.arange(row.size)-x)**2*row).sum()/row.sum()))
height = data.max()
return height, x, y, width_x, width_y
def errfunc(p, *args):
func = gaussian1(*p)
ret =np.ravel(func(*args[1:]) - args[0])
return ret
def findstars(jdata, fwhm, threshold, skymode,
peakmin=None, peakmax=None, fluxmin=None, fluxmax=None,
nsigma=1.5, ratio=1.0, theta=0.0,
use_sharp_round=False,mask=None,
sharplo=0.2,sharphi=1.0,roundlo=-1.0,roundhi=1.0):
# store input image size:
(img_ny, img_nx) = jdata.shape
# Define convolution inputs
nx, ny, a, b, c, f = gausspars(fwhm, nsigma=nsigma, ratio= ratio, theta=theta)
xc = nx//2
yc = ny//2
yin, xin = np.mgrid[0:ny, 0:nx]
kernel = gaussian1(1.0, xc, yc, a, b, c)(xin,yin)
# define size of extraction box for each source based on kernel size
grx = xc
gry = yc
# DAOFIND STYLE KERNEL "SHAPE"
rmat = np.sqrt((xin-xc)**2 + (yin-yc)**2)
rmatell = a*(xin-xc)**2 + b*(xin-xc)*(yin-yc) + c*(yin-yc)**2
xyrmask = np.where((rmatell <= 2*f) | (rmat <= 2.001),1,0).astype(np.int16)
# Previous *style* computation for kernel "shape":
#xyrmask = np.where(rmat <= max(grx,gry),1,0).astype(np.int16)
npts = xyrmask.sum()
rmask = kernel*xyrmask
denom = (rmask*rmask).sum() - rmask.sum()**2/npts
nkern = (rmask - (rmask.sum()/npts))/denom # normalize kernel to preserve
# fluxes for thresholds
nkern *= xyrmask
# initialize values used for getting source centers
relerr = 1./((rmask**2).sum() - (rmask.sum()**2/xyrmask.sum()))
xsigsq = (fwhm / FWHM2SIG)**2
ysigsq = (ratio**2) * xsigsq
# convolve image with gaussian kernel
convdata = signal.convolve2d(jdata, nkern, boundary='symm', mode='same').astype(np.float32)
# clip image to create regions around each source for segmentation
if mask is None:
tdata=np.where(convdata > threshold, convdata, 0)
else:
tdata=np.where((convdata > threshold) & mask, convdata, 0)
# segment image and find sources
s = ndimage.morphology.generate_binary_structure(2, 2)
ldata, nobj = ndimage.label(tdata, structure=s)
fobjects = ndimage.find_objects(ldata)
fluxes = []
fitind = []
if nobj < 2:
print('No objects found for this image. Please check value of "threshold".')
return fitind,fluxes
# determine center of each source, while removing spurious sources or
# applying limits defined by the user
ninit = 0
ninit2 = 0
s2m, s4m = precompute_sharp_round(nx, ny, xc, yc)
satur = False # Default assumption if use_sharp_round=False
sharp = None
round1 = None
round2 = None
for ss,n in zip(fobjects,range(len(fobjects))):
ssx = ss[1].stop - ss[1].start
ssy = ss[0].stop - ss[0].start
if ssx >= tdata.shape[1]-1 or ssy >= tdata.shape[0]-1:
continue
yr0 = ss[0].start - gry
yr1 = ss[0].stop + gry + 1
if yr0 <= 0 or yr1 >= img_ny: continue # ignore sources within ny//2 of edge
xr0 = ss[1].start - grx
xr1 = ss[1].stop + grx + 1
if xr0 <= 0 or xr1 >= img_nx: continue # ignore sources within nx//2 of edge
ssnew = (slice(yr0,yr1),slice(xr0,xr1))
region = tdata[ssnew]
cntr = centroid(region)
# Define region centered on max value in object (slice)
# This region will be bounds-checked to insure that it only accesses
# a valid section of the image (not off the edge)
maxpos = (int(cntr[1]+0.5)+ssnew[0].start,int(cntr[0]+0.5)+ssnew[1].start)
yr0 = maxpos[0] - gry
yr1 = maxpos[0] + gry + 1
if yr0 < 0 or yr1 > img_ny:
continue
xr0 = maxpos[1] - grx
xr1 = maxpos[1] + grx + 1
if xr0 < 0 or xr1 > img_nx:
continue
# Simple Centroid on the region from the input image
jregion = jdata[yr0:yr1,xr0:xr1]
src_flux = jregion.sum()
src_peak = jregion.max()
if (peakmax is not None and src_peak >= peakmax):
continue
if (peakmin is not None and src_peak <= peakmin):
continue
if fluxmin and src_flux <= fluxmin:
continue
if fluxmax and src_flux >= fluxmax:
continue
datamin = jregion.min()
datamax = jregion.max()
if use_sharp_round:
# Compute sharpness and first estimate of roundness:
dregion = convdata[yr0:yr1,xr0:xr1]
satur, round1, sharp = \
sharp_round(jregion, dregion, xyrmask, xc, yc,
s2m, s4m, nx, ny, datamin, datamax)
# Filter sources:
if sharp is None or (sharp < sharplo or sharp > sharphi):
continue
if round1 is None or (round1 < roundlo or round1 > roundhi):
continue
px, py, round2 = xy_round(jregion, grx, gry, skymode,
kernel, xsigsq, ysigsq, datamin, datamax)
# Filter sources:
if px is None:
continue
if use_sharp_round and not satur and \
(round2 is None or round2 < roundlo or round2 > roundhi):
continue
fitind.append((px + xr0, py + yr0, sharp, round1, round2))
# compute a source flux value
fluxes.append(src_flux)
fitindc, fluxesc = apply_nsigma_separation(fitind, fluxes, fwhm*nsigma / 2)
return fitindc, fluxesc
def apply_nsigma_separation(fitind,fluxes,separation,niter=10):
"""
Remove sources which are within nsigma*fwhm/2 pixels of each other, leaving
only a single valid source in that region.
This algorithm only works for sources which end up sequentially next to each other
based on Y position and removes enough duplicates to make the final source list more
managable. It sorts the positions by Y value in order to group those at the
same positions as much as possible.
"""
for n in range(niter):
if len(fitind) < 1:
break
fitarr = np.array(fitind,np.float32)
fluxarr = np.array(fluxes,np.float32)
inpind = np.argsort(fitarr[:,1])
npind = fitarr[inpind]
fluxind = fluxarr[inpind]
fitind = npind.tolist()
fluxes = fluxind.tolist()
dx = npind[1:,0] - npind[:-1,0]
dy = npind[1:,1] - npind[:-1,1]
dr = np.sqrt(np.power(dx,2)+np.power(dy,2))
nsame = np.where(dr <= separation)[0]
if nsame.shape[0] > 0:
for ind in nsame[-1::-1]:
#continue # <- turn off filtering by source separation
del fitind[ind]
del fluxes[ind]
else:
break
return fitind,fluxes
def xy_round(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin=None,datamax=None):
""" Compute center of source
Original code from IRAF.noao.digiphot.daofind.apfind ap_xy_round()
"""
nyk,nxk = ker2d.shape
if datamin is None:
datamin = data.min()
if datamax is None:
datamax = data.max()
# call C function for speed now...
xy_val = cdriz.arrxyround(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin,datamax)
if xy_val is None:
x = None
y = None
round = None
else:
x = xy_val[0]
y = xy_val[1]
round = xy_val[2]
return x,y,round
def precompute_sharp_round(nxk, nyk, xc, yc):
"""
Pre-computes mask arrays to be used by the 'sharp_round' function
for roundness computations based on two- and four-fold symmetries.
"""
# Create arrays for the two- and four-fold symmetry computations:
s4m = np.ones((nyk,nxk),dtype=np.int16)
s4m[yc, xc] = 0
s2m = np.ones((nyk,nxk),dtype=np.int16)
s2m[yc, xc] = 0
s2m[yc:nyk, 0:xc] = -1;
s2m[0:yc+1, xc+1:nxk] = -1;
return s2m, s4m
def sharp_round(data, density, kskip, xc, yc, s2m, s4m, nxk, nyk,
datamin, datamax):
"""
sharp_round -- Compute first estimate of the roundness and sharpness of the
detected objects.
A Python translation of the AP_SHARP_ROUND IRAF/DAOFIND function.
"""
# Compute the first estimate of roundness:
sum2 = np.sum(s2m*density)
sum4 = np.sum(s4m*abs(density))
if sum2 == 0.0:
round = 0.0
elif sum4 <= 0.0: # eps?
round = None
else:
round = 2.0 * sum2 / sum4
# Eliminate the sharpness test if the central pixel is bad:
mid_data_pix = data[yc, xc]
mid_dens_pix = density[yc, xc]
if mid_data_pix > datamax:
return True, round, None
if mid_data_pix < datamin:
return False, round, None
########################
# Sharpness statistics:
satur = np.max(kskip*data) > datamax
# Exclude pixels (create a mask) outside the [datamin, datamax] range:
uskip = np.where((data >= datamin) & (data <= datamax), 1, 0)
# Update the mask with the "skipped" values from the convolution kernel:
uskip *= kskip
# Also, exclude central pixel:
uskip[yc, xc] = 0
npixels = np.sum(uskip)
if (npixels < 1 or mid_dens_pix <= 0.0):
return satur, round, None
sharp = (mid_data_pix - np.sum(uskip*data)/npixels) / mid_dens_pix
#sharp = (mid_data_pix - np.mean(uskip*data)) / mid_dens_pix
return satur, round, sharp
def roundness(im):
"""
from astropy.io import fits as pyfits
data=pyfits.getdata('j94f05bgq_flt.fits',ext=1)
star0=data[403:412,423:432]
star=data[396:432,3522:3558]
In [53]: findobj.roundness(star0)
Out[53]: 0.99401955054989544
In [54]: findobj.roundness(star)
Out[54]: 0.83091919980660645
"""
perimeter = im.shape[0]*2 +im.shape[1]*2 -4
area = im.size
return 4*np.pi*area/perimeter**2
def immoments(im, p,q):
x = list(range(im.shape[1]))
y = list(range(im.shape[0]))
#coord=np.array([x.flatten(),y.flatten()]).T
"""
moment = 0
momentx = 0
for i in x.flatten():
moment+=momentx
sumx=0
for j in y.flatten():
sumx+=i**0*j**0*star0[i,j]
"""
moment = np.sum([i**p*j**q*im[i,j] for j in x for i in y], dtype=np.float64)
return moment
#ss=[i**0*j**0*list(star0[i,j].flatten()) for i in list(x.flatten()) for j in list(y.flatten())]
def nmoment(im,p,q):
m = immoments(im,p,q)
nmoment = m/np.sum(im, dtype=np.float64)
return nmoment
def centroid(im):
"""
Computes the centroid of an image using the image moments:
centroid = {m10/m00, m01/m00}
These calls point to Python version of moments function
m00 = immoments(im,0,0)
m10 = immoments(im, 1,0)
m01 = immoments(im,0,1)
"""
# These calls point to Python version of moments function
m00 = cdriz.arrmoments(im,0,0)
m10 = cdriz.arrmoments(im, 1,0)
m01 = cdriz.arrmoments(im,0,1)
ycen = m10 / m00
xcen = m01 / m00
return xcen, ycen
def cmoment(im,p,q):
xcen,ycen = centroid(im)
#x,y=np.meshgrid(range(403,412),range(423,432))
x = list(range(im.shape[1]))
y = list(range(im.shape[0]))
mu = np.sum([(i-xcen)**p * (j-ycen)**q * im[i,j] for i in y for j in x],
dtype=np.float64)
return mu
def central_moments(im):
xcen,ycen = centroid(im)
mu00 = cmoment(im,p=0,q=0)
mu01 = 0.
mu10 = 0.
mu11 = immoments(im,1,1) - xcen * immoments(im,0,1)
mu20 = immoments(im,2,0) - xcen * immoments(im,1,0)
mu02 = immoments(im,0,2) - ycen*immoments(im,0,1)
mu21 = immoments(im,2,1) - 2*xcen*immoments(im,1,1) - ycen*immoments(im,2,0) + \
2*xcen**2*immoments(im,0,1)
mu12 = immoments(im,1,2) - 2*ycen*immoments(im,1,1) - xcen*immoments(im,0,2) + \
2*ycen**2*immoments(im,1,0)
mu30 = immoments(im,3,0) - 3*xcen*immoments(im,2,0) + 2*xcen**2*immoments(im,1,0)
mu03 = immoments(im,0,3) - 3*ycen*immoments(im,0,2) + 2*ycen**2*immoments(im,0,1)
cmoments = {'mu00': mu00,
'mu01': mu01,
'mu10': mu10,
'mu11': mu11,
'mu20': mu20,
'mu02': mu02,
'mu21': mu21,
'mu12': mu12,
'mu30': mu30,
'mu03': mu03
}
return cmoments
def covmat(im):
cmoments = central_moments(im)
nmu20 = cmoments['mu20'] / cmoments['mu00']
nmu02 = cmoments['mu02'] / cmoments['mu00']
nmu11 = cmoments['mu11'] / cmoments['mu00']
covmat = np.array([[nmu20, nmu11],[nmu11,nmu02]])
return covmat
| StarcoderdataPython |
8077179 | <reponame>Tlili-ahmed/2BiVQA
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy.optimize import curve_fit
import os
from scipy import stats
from scipy.stats import spearmanr
from sklearn.metrics import mean_squared_error
from statistics import mean
import pandas as pd
from scipy.stats import spearmanr,pearsonr
import sklearn
from scipy.stats import gaussian_kde
import argparse
import time
from tqdm import tqdm
import matplotlib.font_manager as fm
import matplotlib
start_time = time.time()
def read_files(path_to_file):
df = pd.read_csv(path_to_file)
values = df.values.tolist()
mos =[]
predicted = []
for i in values:
mos.append(i[0])
predicted.append(i[1])
return mos, predicted
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
# 4-parameter logistic function
logisticPart = 1 + np.exp(np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mos_pred', type=str, help='path tp mos vs predicted scores file')
args = parser.parse_args()
if not os.path.exists('./figures'):
os.makedirs('./figures')
predicted_file = args.mos_pred
y_ss, y_p = read_files(predicted_file)
beta_init = [np.max(y_ss), np.min(y_ss), np.mean(y_p), 0.5]
popt, _ = curve_fit(logistic_func, y_p, y_ss, p0=beta_init, maxfev=int(1e8))
y_pred_logistic = logistic_func(y_p, *popt)
xy = np.vstack([y_ss,y_p])
z = gaussian_kde(xy)(xy)
min_mos = min(y_ss)
max_mos = max(y_ss)
min_pred = min(y_p)
max_pred = max(y_p)
pas = (max_mos - min_mos)/5
pas1 = (max_pred - min_pred)/5
m = min(y_p) - pas1
l = len(y_p)
u = max(y_p) +pas1
x = np.linspace(m-0.2,u+0.2,num=l)
ms = y_ss
kf = ms - y_pred_logistic
sig = np.std(kf)
print('SROCC = ',spearmanr(y_ss,y_p).correlation)
print('======================================================')
print('PLCC = ', stats.pearsonr(y_ss,y_pred_logistic)[0])
print('======================================================')
try:
KRCC = stats.kendalltau(y_ss, y_p)[0]
except:
KRCC = stats.kendalltau(y_ss, y_p, method='asymptotic')[0]
print('KROCC = ' , KRCC)
print('======================================================')
print('RMSE = ' , np.sqrt(mean_squared_error(y_ss,y_pred_logistic)))
print('======================================================')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
font = {'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 16,
}
font2 = {'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 16,
}
fondt = fm.FontProperties(family='serif',
weight='normal',
style='normal', size=11)
fondtitle = fm.FontProperties(family='serif',
weight='normal',
style='normal', size=8)
ax.set_ylim([min_mos-pas,max_mos+pas])
ax.set_xlim([min_pred-pas1,max_pred+pas1])
plt.scatter(y_p,y_ss, s=10, marker='o', c=z)
plt.plot(x, logistic_func(x, *popt), c='red',label=r'fitted $f(x)$',linewidth=1)
plt.plot(x, logistic_func(x, *popt)+ 2*sig,'--' , c='red',label=r'$f(x) \pm 2 \sigma$',linewidth=1)
plt.plot(x, logistic_func(x, *popt)- 2*sig,'--' , c='red',linewidth=1)
plt.xlabel("Predicted Score",fontdict=font)
plt.ylabel("MOS",fontdict=font)
plt.legend(prop=fondt)
plt.title('MOS vs predicted score', fontdict=font2)
plt.grid(which='both')
plt.grid(which='minor', alpha=0.2)
plt.grid(which='major', alpha=0.5)
plt.savefig('./figures/mos_sroc =' + str(spearmanr(y_ss,y_p).correlation)+'.png')
plt.show()
| StarcoderdataPython |
260424 | from openpype.pipeline import install_host
from openpype.hosts.blender import api
install_host(api)
| StarcoderdataPython |
9706052 | #! /usr/bin/env python
import sys, argparse
def print_help():
print "usage: cluster_te.py [-i INFILE] \
\n\noptional arguments: \
\n -h help \
\n -o OUTFILE out file [optiional] \
\n -w WINDOW window size [3000] \
\n -s STEP window step size [500] \
\n -p PIRNA minimum numebr of piRNA per window to delcare cluster [30] \
\n -r RATIO 5' T ratio [0] \
\n -min MIN minumum piRNA size [24] \
\n -max MAX maximum piRNA size [32] "
def make_windows(min, max, win_size, slide):
windows = []
win_start = min
while win_start <= max:
win_end = win_start + win_size
win_frags = [win_start, win_end]
windows.append(win_frags)
win_start += slide
return sorted(windows)
###MAIN###
#declare parameters
win_size = 3000
slide = 500
min_t_rat = 0
min_pi = 24
max_pi = 32
cluster_min = 30
#read command line
if len(sys.argv) == 1:
sys.exit(print_help())
parser = argparse.ArgumentParser()
parser.add_argument('-i', action='store', dest='infile',
help = 'in file')
parser.add_argument('-o', action='store', dest='outfile',
help='out file [optional]')
parser.add_argument('-w', action='store', dest='window',
help='window size [3000]', type=int)
parser.add_argument('-s', action='store', dest='step',
help='window step size [500]', type=int)
parser.add_argument('-p', action='store', dest='pirna',
help="minimum number of piRNA per window to declare cluster [30]", type=int)
parser.add_argument('-r', action='store', dest='ratio',
help="5' T ratio [0]", type=float)
parser.add_argument('-min', action='store', dest='min',
help="minimum piRNA size [24]", type=int)
parser.add_argument('-max', action='store', dest='max',
help="maximum piRNA size [32]", type=int)
results = parser.parse_args()
#update parameters
if results.infile:
f=open(results.infile)
if results.window:
win_size = results.window
if results.step:
slide = results.step
if results.pirna:
cluster_min = results.pirna
if results.ratio:
min_t_rat = results.ratio
if results.min:
min_pi = results.min
if results.max:
max_pi = results.max
data = {} #contigs are hash keys and list of pirnas on the cotig are values
cluster_hash = {} #final hash of clusters
#working program starts
#read SAM into memory
for line in f:
try:
line = line.strip()
fields = line.split()
seq_id, orient, contig, start, seq_len = fields[0], fields[1], fields[2], int(fields[3]), len(fields[9])
first = list(fields[9])[0]
if orient != '4' and seq_len >= min_pi and seq_len <= max_pi:
new_list = [start, first, orient, seq_id]
if contig not in data:
data[contig] = []
data[contig].append(new_list) #stores position, first base, orientation and piRNA id into data hash
else:
data[contig].append(new_list)
except:
pass
f.close()
#parse contigs
for chrm in data:
data[chrm].sort() #sorts by position on contig: smallest to largest
min = int(data[chrm][0][0]) #first piRNA on the contig
max = int(data[chrm][-1][0]) #last piRNA on the contig
#make windows
windows = make_windows(min, max, win_size, slide)
#intersect windows with piRNAs
wins = []
for window in windows:
win_start, win_end = window[0], window[1]
pi_count = 0
first_nucl = []
pis_in_window = []
rm = []
for pi in data[chrm]:
start, first = int(pi[0]), pi[1]
if start >= win_start and start <= win_end:
pis_in_window.append(pi)
pi_count += 1
first_nucl.append(first)
elif start > win_end:
break
elif start < win_start:
rm.append(pi) #remove piRNA found in a previous window to save time
for pi in rm:
data[chrm].remove(pi)
if len(first_nucl) > 0:
t_ratio = float(first_nucl.count('T')) / len(first_nucl)
if pi_count >= cluster_min and t_ratio >= min_t_rat:
pis_in_window.sort()
wins.append([win_start, win_end , pis_in_window])
#merge overlapping windows together
count = 0
merged_clusters = {}
index_count = 0
while count < len(wins):
try:
index = chrm + '-' + str(index_count)
if wins[count][0] >= (wins[count + 1][0] - 1000):
if index not in merged_clusters:
merged_clusters[index] = []
merged_clusters[index] += wins[count][2]
else:
merged_clusters[index] += wins[count][2]
count += 1
else:
index_count += 1
count += 1
except:
break
for key in sorted(merged_clusters):
pirnas = dict((x[3], x) for x in merged_clusters[key]).values() #remove duplicate pirnas found in overlapping windows
pirnas.sort(key=lambda x: int(x[0]))
plus = minus = 0
start = pirnas[0][0]
stop = pirnas[-1][0]
cluster_len = stop - start
number = len(pirnas)
density = float(number) / cluster_len
for pi in pirnas:
orient = int(pi[2])
if orient == 0:
plus += 1
else:
minus += 1
cluster_hash[key] = [str(start), str(stop), str(cluster_len), str(number), str(plus), str(minus), str(density)]
if results.outfile:
out = open(results.outfile, 'w')
out.write(str('#id\tstart\tstop\tlength\ttotal\tplus_strand\tminus_strand\tdensity\n'))
for key in sorted(cluster_hash):
out.write(str(key + '\t' + '\t'.join(cluster_hash[key]) + '\n'))
out.close()
else:
print '#id\tstart\tstop\tlength\ttotal\tplus_strand\tminus_strand\tdensity'
for key in sorted(cluster_hash):
print key + '\t' + '\t'.join(cluster_hash[key])
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.